repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-9/model.py | #!/usr/bin/env python
"""
model.py for harmonic-plus-noise NSF with trainable sinc filter
version: 9
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
# Building blocks (torch.nn modules + dimension operation)
#
# For blstm
class BLSTMLayer(torch_nn.Module):
""" Wrapper over BLSTM
Input tensor: (batchsize=1, length, dim_in)
Output tensor: (batchsize=1, length, dim_out)
Recurrency is conducted along "length"
"""
def __init__(self, input_dim, output_dim):
super(BLSTMLayer, self).__init__()
if output_dim % 2 != 0:
print("Output_dim of BLSTMLayer is {:d}".format(output_dim))
print("BLSTMLayer expects a layer size of even number")
sys.exit(1)
# bi-directional LSTM
self.l_blstm = torch_nn.LSTM(input_dim, output_dim // 2, \
bidirectional=True)
def forward(self, x):
# permute to (length, batchsize=1, dim)
blstm_data, _ = self.l_blstm(x.permute(1, 0, 2))
# permute it backt to (batchsize=1, length, dim)
return blstm_data.permute(1, 0, 2)
#
# 1D dilated convolution that keep the input/output length
class Conv1dKeepLength(torch_nn.Conv1d):
""" Wrapper for causal convolution
Input tensor: (batchsize=1, length, dim_in)
Output tensor: (batchsize=1, length, dim_out)
https://github.com/pytorch/pytorch/issues/1333
Note: Tanh is optional
"""
def __init__(self, input_dim, output_dim, dilation_s, kernel_s,
causal = False, stride = 1, groups=1, bias=True, \
tanh = True, pad_mode='constant'):
super(Conv1dKeepLength, self).__init__(
input_dim, output_dim, kernel_s, stride=stride,
padding = 0, dilation = dilation_s, groups=groups, bias=bias)
self.pad_mode = pad_mode
self.causal = causal
# input & output length will be the same
if self.causal:
# left pad to make the convolution causal
self.pad_le = dilation_s * (kernel_s - 1)
self.pad_ri = 0
else:
# pad on both sizes
self.pad_le = dilation_s * (kernel_s - 1) // 2
self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le
if tanh:
self.l_ac = torch_nn.Tanh()
else:
self.l_ac = torch_nn.Identity()
def forward(self, data):
# permute to (batchsize=1, dim, length)
# add one dimension (batchsize=1, dim, ADDED_DIM, length)
# pad to ADDED_DIM
# squeeze and return to (batchsize=1, dim, length)
# https://github.com/pytorch/pytorch/issues/1333
x = torch_nn_func.pad(data.permute(0, 2, 1).unsqueeze(2), \
(self.pad_le, self.pad_ri, 0, 0),
mode = self.pad_mode).squeeze(2)
# tanh(conv1())
# permmute back to (batchsize=1, length, dim)
output = self.l_ac(super(Conv1dKeepLength, self).forward(x))
return output.permute(0, 2, 1)
#
# Moving average
class MovingAverage(Conv1dKeepLength):
""" Wrapper to define a moving average smoothing layer
Note: MovingAverage can be implemented using TimeInvFIRFilter too.
Here we define another Module dicrectly on Conv1DKeepLength
"""
def __init__(self, feature_dim, window_len, causal=False, \
pad_mode='replicate'):
super(MovingAverage, self).__init__(
feature_dim, feature_dim, 1, window_len, causal,
groups=feature_dim, bias=False, tanh=False, \
pad_mode=pad_mode)
# set the weighting coefficients
torch_nn.init.constant_(self.weight, 1/window_len)
# turn off grad for this layer
for p in self.parameters():
p.requires_grad = False
def forward(self, data):
return super(MovingAverage, self).forward(data)
#
# FIR filter layer
class TimeInvFIRFilter(Conv1dKeepLength):
""" Wrapper to define a FIR filter over Conv1d
Note: FIR Filtering is conducted on each dimension (channel)
independently: groups=channel_num in conv1d
"""
def __init__(self, feature_dim, filter_coef,
causal=True, flag_train=False):
""" __init__(self, feature_dim, filter_coef,
causal=True, flag_train=False)
feature_dim: dimension of input data
filter_coef: 1-D tensor of filter coefficients
causal: FIR is causal or not (default: true)
flag_train: whether train the filter coefficients (default false)
Input data: (batchsize=1, length, feature_dim)
Output data: (batchsize=1, length, feature_dim)
"""
super(TimeInvFIRFilter, self).__init__(
feature_dim, feature_dim, 1, filter_coef.shape[0], causal,
groups=feature_dim, bias=False, tanh=False)
if filter_coef.ndim == 1:
# initialize weight using provided filter_coef
with torch.no_grad():
tmp_coef = torch.zeros([feature_dim, 1,
filter_coef.shape[0]])
tmp_coef[:, 0, :] = filter_coef
tmp_coef = torch.flip(tmp_coef, dims=[2])
self.weight = torch.nn.Parameter(tmp_coef,
requires_grad=flag_train)
else:
print("TimeInvFIRFilter expects filter_coef to be 1-D tensor")
print("Please implement the code in __init__ if necessary")
sys.exit(1)
def forward(self, data):
return super(TimeInvFIRFilter, self).forward(data)
class TimeVarFIRFilter(torch_nn.Module):
""" TimeVarFIRFilter
Given sequences of filter coefficients and a signal, do filtering
Filter coefs: (batchsize=1, signal_length, filter_order = K)
Signal: (batchsize=1, signal_length, 1)
For batch 0:
For n in [1, sequence_length):
output(0, n, 1) = \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k)
Note: filter coef (0, n, :) is only used to compute the output
at (0, n, 1)
"""
def __init__(self):
super(TimeVarFIRFilter, self).__init__()
def forward(self, signal, f_coef):
"""
Filter coefs: (batchsize=1, signal_length, filter_order = K)
Signal: (batchsize=1, signal_length, 1)
Output: (batchsize=1, signal_length, 1)
For n in [1, sequence_length):
output(0, n, 1)= \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k)
This method may be not efficient:
Suppose signal [x_1, ..., x_N], filter [a_1, ..., a_K]
output [y_1, y_2, y_3, ..., y_N, *, * ... *]
= a_1 * [x_1, x_2, x_3, ..., x_N, 0, ..., 0]
+ a_2 * [ 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0]
+ a_3 * [ 0, 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0]
"""
signal_l = signal.shape[1]
order_k = f_coef.shape[-1]
# pad to (batchsize=1, signal_length + filter_order-1, dim)
padded_signal = torch_nn_func.pad(signal, (0, 0, 0, order_k - 1))
y = torch.zeros_like(signal)
# roll and weighted sum, only take [0:signal_length]
for k in range(order_k):
y += torch.roll(padded_signal, k, dims=1)[:, 0:signal_l, :] \
* f_coef[:, :, k:k+1]
# done
return y
# Sinc filter generator
class SincFilter(torch_nn.Module):
""" SincFilter
Given the cut-off-frequency, produce the low-pass and high-pass
windowed-sinc-filters.
If input cut-off-frequency is (batchsize=1, signal_length, 1),
output filter coef is (batchsize=1, signal_length, filter_order).
For each time step in [1, signal_length), we calculate one
filter for low-pass sinc filter and another for high-pass filter.
Example:
import scipy
import scipy.signal
import numpy as np
filter_order = 31
cut_f = 0.2
sinc_layer = SincFilter(filter_order)
lp_coef, hp_coef = sinc_layer(torch.ones(1, 10, 1) * cut_f)
w, h1 = scipy.signal.freqz(lp_coef[0, 0, :].numpy(), [1])
w, h2 = scipy.signal.freqz(hp_coef[0, 0, :].numpy(), [1])
plt.plot(w, 20*np.log10(np.abs(h1)))
plt.plot(w, 20*np.log10(np.abs(h2)))
plt.plot([cut_f * np.pi, cut_f * np.pi], [-100, 0])
"""
def __init__(self, filter_order):
super(SincFilter, self).__init__()
# Make the filter oder an odd number
# [-(M-1)/2, ... 0, (M-1)/2]
#
self.half_k = (filter_order - 1) // 2
self.order = self.half_k * 2 +1
def hamming_w(self, n_index):
""" prepare hamming window for each time step
n_index (batchsize=1, signal_length, filter_order)
For each time step, n_index will be [-(M-1)/2, ... 0, (M-1)/2]
n_index[0, 0, :] = [-(M-1)/2, ... 0, (M-1)/2]
n_index[0, 1, :] = [-(M-1)/2, ... 0, (M-1)/2]
...
output (batchsize=1, signal_length, filter_order)
output[0, 0, :] = hamming_window
output[0, 1, :] = hamming_window
...
"""
# Hamming window
return 0.54 + 0.46 * torch.cos(2 * np.pi * n_index / self.order)
def sinc(self, x):
""" Normalized sinc-filter sin( pi * x) / pi * x
https://en.wikipedia.org/wiki/Sinc_function
Assume x (batchsize, signal_length, filter_order) and
x[0, 0, :] = [-half_order, - half_order+1, ... 0, ..., half_order]
x[:, :, self.half_order] -> time index = 0, sinc(0)=1
"""
y = torch.zeros_like(x)
y[:,:,0:self.half_k]=torch.sin(np.pi * x[:, :, 0:self.half_k]) \
/ (np.pi * x[:, :, 0:self.half_k])
y[:,:,self.half_k+1:]=torch.sin(np.pi * x[:, :, self.half_k+1:]) \
/ (np.pi * x[:, :, self.half_k+1:])
y[:,:,self.half_k] = 1
return y
def forward(self, cut_f):
""" lp_coef, hp_coef = forward(self, cut_f)
cut-off frequency cut_f (batchsize=1, length, dim = 1)
lp_coef: low-pass filter coefs (batchsize, length, filter_order)
hp_coef: high-pass filter coefs (batchsize, length, filter_order)
"""
# create the filter order index
with torch.no_grad():
# [- (M-1) / 2, ..., 0, ..., (M-1)/2]
lp_coef = torch.arange(-self.half_k, self.half_k + 1,
device=cut_f.device)
# [[[- (M-1) / 2, ..., 0, ..., (M-1)/2],
# [- (M-1) / 2, ..., 0, ..., (M-1)/2],
# ...
# ],
# [[- (M-1) / 2, ..., 0, ..., (M-1)/2],
# [- (M-1) / 2, ..., 0, ..., (M-1)/2],
# ...
# ]]
lp_coef = lp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1)
hp_coef = torch.arange(-self.half_k, self.half_k + 1,
device=cut_f.device)
hp_coef = hp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1)
# temporary buffer of [-1^n] for gain norm in hp_coef
tmp_one = torch.pow(-1, hp_coef)
# unnormalized filter coefs with hamming window
lp_coef = cut_f * self.sinc(cut_f * lp_coef) \
* self.hamming_w(lp_coef)
hp_coef = (self.sinc(hp_coef) \
- cut_f * self.sinc(cut_f * hp_coef)) \
* self.hamming_w(hp_coef)
# normalize the coef to make gain at 0/pi is 0 dB
# sum_n lp_coef[n]
lp_coef_norm = torch.sum(lp_coef, axis=2).unsqueeze(-1)
# sum_n hp_coef[n] * -1^n
hp_coef_norm = torch.sum(hp_coef * tmp_one, axis=2).unsqueeze(-1)
lp_coef = lp_coef / lp_coef_norm
hp_coef = hp_coef / hp_coef_norm
# return normed coef
return lp_coef, hp_coef
#
# Up sampling
class UpSampleLayer(torch_nn.Module):
""" Wrapper over up-sampling
Input tensor: (batchsize=1, length, dim)
Ouput tensor: (batchsize=1, length * up-sampling_factor, dim)
"""
def __init__(self, feature_dim, up_sampling_factor, smoothing=False):
super(UpSampleLayer, self).__init__()
# wrap a up_sampling layer
self.scale_factor = up_sampling_factor
self.l_upsamp = torch_nn.Upsample(scale_factor=self.scale_factor)
if smoothing:
self.l_ave1 = MovingAverage(feature_dim, self.scale_factor)
self.l_ave2 = MovingAverage(feature_dim, self.scale_factor)
else:
self.l_ave1 = torch_nn.Identity()
self.l_ave2 = torch_nn.Identity()
return
def forward(self, x):
# permute to (batchsize=1, dim, length)
up_sampled_data = self.l_upsamp(x.permute(0, 2, 1))
# permute it backt to (batchsize=1, length, dim)
# and do two moving average
return self.l_ave1(self.l_ave2(up_sampled_data.permute(0, 2, 1)))
# Neural filter block (1 block)
class NeuralFilterBlock(torch_nn.Module):
""" Wrapper over a single filter block
"""
def __init__(self, signal_size, hidden_size,\
kernel_size=3, conv_num=10):
super(NeuralFilterBlock, self).__init__()
self.signal_size = signal_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.conv_num = conv_num
self.dilation_s = [np.power(2, x) for x in np.arange(conv_num)]
# ff layer to expand dimension
self.l_ff_1 = torch_nn.Linear(signal_size, hidden_size, \
bias=False)
self.l_ff_1_tanh = torch_nn.Tanh()
# dilated conv layers
tmp = [Conv1dKeepLength(hidden_size, hidden_size, x, \
kernel_size, causal=True, bias=False) \
for x in self.dilation_s]
self.l_convs = torch_nn.ModuleList(tmp)
# ff layer to de-expand dimension
self.l_ff_2 = torch_nn.Linear(hidden_size, hidden_size//4, \
bias=False)
self.l_ff_2_tanh = torch_nn.Tanh()
self.l_ff_3 = torch_nn.Linear(hidden_size//4, signal_size, \
bias=False)
self.l_ff_3_tanh = torch_nn.Tanh()
# a simple scale
self.scale = torch_nn.Parameter(torch.tensor([0.1]),
requires_grad=False)
return
def forward(self, signal, context):
"""
Assume: signal (batchsize=1, length, signal_size)
context (batchsize=1, length, hidden_size)
Output: (batchsize=1, length, signal_size)
"""
# expand dimension
tmp_hidden = self.l_ff_1_tanh(self.l_ff_1(signal))
# loop over dilated convs
# output of a d-conv is input + context + d-conv(input)
for l_conv in self.l_convs:
tmp_hidden = tmp_hidden + l_conv(tmp_hidden) + context
# to be consistent with legacy configuration in CURRENNT
tmp_hidden = tmp_hidden * self.scale
# compress the dimesion and skip-add
tmp_hidden = self.l_ff_2_tanh(self.l_ff_2(tmp_hidden))
tmp_hidden = self.l_ff_3_tanh(self.l_ff_3(tmp_hidden))
output_signal = tmp_hidden + signal
return output_signal
#
# Sine waveform generator
#
# Sine waveform generator
class SineGen(torch_nn.Module):
""" Definition of sine generator
SineGen(samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False)
samp_rate: sampling rate in Hz
harmonic_num: number of harmonic overtones (default 0)
sine_amp: amplitude of sine-wavefrom (default 0.1)
noise_std: std of Gaussian noise (default 0.003)
voiced_thoreshold: F0 threshold for U/V classification (default 0)
flag_for_pulse: this SinGen is used inside PulseGen (default False)
Note: when flag_for_pulse is True, the first time step of a voiced
segment is always sin(np.pi) or cos(0)
"""
def __init__(self, samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False):
super(SineGen, self).__init__()
self.sine_amp = sine_amp
self.noise_std = noise_std
self.harmonic_num = harmonic_num
self.dim = self.harmonic_num + 1
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
self.flag_for_pulse = flag_for_pulse
def _f02uv(self, f0):
# generate uv signal
uv = torch.ones_like(f0)
uv = uv * (f0 > self.voiced_threshold)
return uv
def _f02sine(self, f0_values):
""" f0_values: (batchsize, length, dim)
where dim indicates fundamental tone and overtones
"""
# convert to F0 in rad. The interger part n can be ignored
# because 2 * np.pi * n doesn't affect phase
rad_values = (f0_values / self.sampling_rate) % 1
# initial phase noise (no noise for fundamental component)
rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2],\
device = f0_values.device)
rand_ini[:, 0] = 0
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
if not self.flag_for_pulse:
# for normal case
# To prevent torch.cumsum numerical overflow,
# it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
# Buffer tmp_over_one_idx indicates the time step to add -1.
# This will not change F0 of sine because (x-1) * 2*pi = x *2*pi
tmp_over_one = torch.cumsum(rad_values, 1) % 1
tmp_over_one_idx = (tmp_over_one[:, 1:, :] -
tmp_over_one[:, :-1, :]) < 0
cumsum_shift = torch.zeros_like(rad_values)
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1)
* 2 * np.pi)
else:
# If necessary, make sure that the first time step of every
# voiced segments is sin(pi) or cos(0)
# This is used for pulse-train generation
# identify the last time step in unvoiced segments
uv = self._f02uv(f0_values)
uv_1 = torch.roll(uv, shifts=-1, dims=1)
uv_1[:, -1, :] = 1
u_loc = (uv < 1) * (uv_1 > 0)
# get the instantanouse phase
tmp_cumsum = torch.cumsum(rad_values, dim=1)
# different batch needs to be processed differently
for idx in range(f0_values.shape[0]):
temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
# stores the accumulation of i.phase within
# each voiced segments
tmp_cumsum[idx, :, :] = 0
tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
# rad_values - tmp_cumsum: remove the accumulation of i.phase
# within the previous voiced segment.
i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
# get the sines
sines = torch.cos(i_phase * 2 * np.pi)
return sines
def forward(self, f0):
""" sine_tensor, uv = forward(f0)
input F0: tensor(batchsize=1, length, dim=1)
f0 for unvoiced steps should be 0
output sine_tensor: tensor(batchsize=1, length, dim)
output uv: tensor(batchsize=1, length, 1)
"""
with torch.no_grad():
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \
device=f0.device)
# fundamental component
f0_buf[:, :, 0] = f0[:, :, 0]
for idx in np.arange(self.harmonic_num):
# idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
f0_buf[:, :, idx+1] = f0_buf[:, :, 0] * (idx+2)
# generate sine waveforms
sine_waves = self._f02sine(f0_buf) * self.sine_amp
# generate uv signal
#uv = torch.ones(f0.shape)
#uv = uv * (f0 > self.voiced_threshold)
uv = self._f02uv(f0)
# noise: for unvoiced should be similar to sine_amp
# std = self.sine_amp/3 -> max value ~ self.sine_amp
#. for voiced regions is self.noise_std
noise_amp = uv * self.noise_std + (1-uv) * self.sine_amp / 3
noise = noise_amp * torch.randn_like(sine_waves)
# first: set the unvoiced part to 0 by uv
# then: additive noise
sine_waves = sine_waves * uv + noise
return sine_waves, uv, noise
#####
## Model definition
##
## For condition module only provide Spectral feature to Filter block
class CondModuleHnSincNSF(torch_nn.Module):
""" Condition module for hn-sinc-NSF
Upsample and transform input features
CondModuleHnSincNSF(input_dimension, output_dimension, up_sample_rate,
blstm_dimension = 64, cnn_kernel_size = 3)
Spec, F0, cut_off_freq = CondModuleHnSincNSF(features, F0)
Both input features should be frame-level features
If x doesn't contain F0, just ignore the returned F0
CondModuleHnSincNSF(input_dim, output_dim, up_sample,
blstm_s = 64, cnn_kernel_s = 3,
voiced_threshold = 0):
input_dim: sum of dimensions of input features
output_dim: dim of the feature Spec to be used by neural filter-block
up_sample: up sampling rate of input features
blstm_s: dimension of the features from blstm (default 64)
cnn_kernel_s: kernel size of CNN in condition module (default 3)
voiced_threshold: f0 > voiced_threshold is voiced, otherwise unvoiced
"""
def __init__(self, input_dim, output_dim, up_sample, \
blstm_s = 64, cnn_kernel_s = 3, voiced_threshold = 0):
super(CondModuleHnSincNSF, self).__init__()
# input feature dimension
self.input_dim = input_dim
self.output_dim = output_dim
self.up_sample = up_sample
self.blstm_s = blstm_s
self.cnn_kernel_s = cnn_kernel_s
self.cut_f_smooth = up_sample * 4
self.voiced_threshold = voiced_threshold
# the blstm layer
self.l_blstm = BLSTMLayer(input_dim, self.blstm_s)
# the CNN layer (+1 dim for cut_off_frequence of sinc filter)
self.l_conv1d = Conv1dKeepLength(self.blstm_s, \
self.output_dim, \
dilation_s = 1, \
kernel_s = self.cnn_kernel_s)
# Upsampling layer for hidden features
self.l_upsamp = UpSampleLayer(self.output_dim, \
self.up_sample, True)
# separate layer for up-sampling normalized F0 values
self.l_upsamp_f0_hi = UpSampleLayer(1, self.up_sample, True)
# Upsampling for F0: don't smooth up-sampled F0
self.l_upsamp_F0 = UpSampleLayer(1, self.up_sample, False)
# Another smoothing layer to smooth the cut-off frequency
# for sinc filters. Use a larger window to smooth
self.l_cut_f_smooth = MovingAverage(1, self.cut_f_smooth)
def get_cut_f(self, hidden_feat, f0):
""" cut_f = get_cut_f(self, feature, f0)
feature: (batchsize, length, dim=1)
f0: (batchsize, length, dim=1)
"""
# generate uv signal
uv = torch.ones_like(f0) * (f0 > self.voiced_threshold)
# hidden_feat is between (-1, 1) after conv1d with tanh
# (-0.2, 0.2) + 0.3 = (0.1, 0.5)
# voiced: (0.1, 0.5) + 0.4 = (0.5, 0.9)
# unvoiced: (0.1, 0.5) = (0.1, 0.5)
return hidden_feat * 0.2 + uv * 0.4 + 0.3
def forward(self, feature, f0):
""" spec, f0 = forward(self, feature, f0)
feature: (batchsize, length, dim)
f0: (batchsize, length, dim=1), which should be F0 at frame-level
spec: (batchsize, length, self.output_dim), at wave-level
f0: (batchsize, length, 1), at wave-level
"""
tmp = self.l_upsamp(self.l_conv1d(self.l_blstm(feature)))
# concatenat normed F0 with hidden spectral features
context = torch.cat((tmp[:, :, 0:self.output_dim-1], \
self.l_upsamp_f0_hi(feature[:, :, -1:])), \
dim=2)
# hidden feature for cut-off frequency
hidden_cut_f = tmp[:, :, self.output_dim-1:]
# directly up-sample F0 without smoothing
f0_upsamp = self.l_upsamp_F0(f0)
# get the cut-off-frequency from output of CNN
cut_f = self.get_cut_f(hidden_cut_f, f0_upsamp)
# smooth the cut-off-frequency using fixed average smoothing
cut_f_smoothed = self.l_cut_f_smooth(cut_f)
# return
return context, f0_upsamp, cut_f_smoothed, hidden_cut_f
# For source module
class SourceModuleHnNSF(torch_nn.Module):
""" SourceModule for hn-nsf
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0)
sampling_rate: sampling_rate in Hz
harmonic_num: number of harmonic above F0 (default: 0)
sine_amp: amplitude of sine source signal (default: 0.1)
add_noise_std: std of additive Gaussian noise (default: 0.003)
note that amplitude of noise in unvoiced is decided
by sine_amp
voiced_threshold: threhold to set U/V given F0 (default: 0)
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
uv (batchsize, length, 1)
"""
def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0):
super(SourceModuleHnNSF, self).__init__()
self.sine_amp = sine_amp
self.noise_std = add_noise_std
# to produce sine waveforms
self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
sine_amp, add_noise_std, voiced_threshod)
# to merge source harmonics into a single excitation
self.l_linear = torch_nn.Linear(harmonic_num+1, 1)
self.l_tanh = torch_nn.Tanh()
def forward(self, x):
"""
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
"""
# source for harmonic branch
sine_wavs, uv, _ = self.l_sin_gen(x)
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
# source for noise branch, in the same shape as uv
noise = torch.randn_like(uv) * self.sine_amp / 3
return sine_merge, noise, uv
# For Filter module
class FilterModuleHnSincNSF(torch_nn.Module):
""" Filter for Hn-sinc-NSF
FilterModuleHnSincNSF(signal_size, hidden_size, sinc_order = 31,
block_num = 5, kernel_size = 3,
conv_num_in_block = 10)
signal_size: signal dimension (should be 1)
hidden_size: dimension of hidden features inside neural filter block
sinc_order: order of the sinc filter
block_num: number of neural filter blocks in harmonic branch
kernel_size: kernel size in dilated CNN
conv_num_in_block: number of d-conv1d in one neural filter block
Usage:
output = FilterModuleHnSincNSF(har_source, noi_source, cut_f, context)
har_source: source for harmonic branch (batchsize, length, dim=1)
noi_source: source for noise branch (batchsize, length, dim=1)
cut_f: cut-off-frequency of sinc filters (batchsize, length, dim=1)
context: hidden features to be added (batchsize, length, dim)
output: (batchsize, length, dim=1)
"""
def __init__(self, signal_size, hidden_size, sinc_order = 31, \
block_num = 5, kernel_size = 3, conv_num_in_block = 10):
super(FilterModuleHnSincNSF, self).__init__()
self.signal_size = signal_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.block_num = block_num
self.conv_num_in_block = conv_num_in_block
self.sinc_order = sinc_order
# filter blocks for harmonic branch
tmp = [NeuralFilterBlock(signal_size, hidden_size, \
kernel_size, conv_num_in_block) \
for x in range(self.block_num)]
self.l_har_blocks = torch_nn.ModuleList(tmp)
# filter blocks for noise branch (only one block, 5 sub-blocks)
tmp = [NeuralFilterBlock(signal_size, hidden_size, \
kernel_size, conv_num_in_block // 2) \
for x in range(1)]
self.l_noi_blocks = torch_nn.ModuleList(tmp)
# sinc filter generators and time-variant filtering layer
self.l_sinc_coef = SincFilter(self.sinc_order)
self.l_tv_filtering = TimeVarFIRFilter()
# done
def forward(self, har_component, noi_component, cond_feat, cut_f):
"""
"""
# harmonic component
for l_har_block in self.l_har_blocks:
har_component = l_har_block(har_component, cond_feat)
# noise componebt
for l_noi_block in self.l_noi_blocks:
noi_component = l_noi_block(noi_component, cond_feat)
# get sinc filter coefficients
lp_coef, hp_coef = self.l_sinc_coef(cut_f)
# time-variant filtering
har_signal = self.l_tv_filtering(har_component, lp_coef)
noi_signal = self.l_tv_filtering(noi_component, hp_coef)
# get output
return har_signal + noi_signal
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
self.input_dim = in_dim
self.output_dim = out_dim
# configurations
# amplitude of sine waveform (for each harmonic)
self.sine_amp = 0.1
# standard deviation of Gaussian noise for additive noise
self.noise_std = 0.003
# dimension of hidden features in filter blocks
self.hidden_dim = 64
# upsampling rate on input acoustic features (16kHz * 5ms = 80)
# assume input_reso has the same value
self.upsamp_rate = prj_conf.input_reso[0]
# sampling rate (Hz)
self.sampling_rate = prj_conf.wav_samp_rate
# CNN kernel size in filter blocks
self.cnn_kernel_s = 3
# number of filter blocks (for harmonic branch)
# noise branch only uses 1 block
self.filter_block_num = 5
# number of dilated CNN in each filter block
self.cnn_num_in_block = 10
# number of harmonic overtones in source
self.harmonic_num = 7
# order of sinc-windowed-FIR-filter
self.sinc_order = 31
# the three modules
self.m_cond = CondModuleHnSincNSF(self.input_dim, \
self.hidden_dim, \
self.upsamp_rate, \
cnn_kernel_s=self.cnn_kernel_s)
self.m_source = SourceModuleHnNSF(self.sampling_rate,
self.harmonic_num,
self.sine_amp, self.noise_std)
self.m_filter = FilterModuleHnSincNSF(self.output_dim, \
self.hidden_dim, \
self.sinc_order, \
self.filter_block_num, \
self.cnn_kernel_s, \
self.cnn_num_in_block)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def forward(self, x):
""" definition of forward method
Assume x (batchsize=1, length, dim)
Return output(batchsize=1, length)
"""
# assume x[:, :, -1] is F0, denormalize F0
f0 = x[:, :, -1:]
# normalize the input features data
feat = self.normalize_input(x)
# condition module
# feature-to-filter-block, f0-up-sampled, cut-off-f-for-sinc,
# hidden-feature-for-cut-off-f
cond_feat, f0_upsamped, cut_f, hid_cut_f = self.m_cond(feat, f0)
# source module
# harmonic-source, noise-source (for noise branch), uv
har_source, noi_source, uv = self.m_source(f0_upsamped)
# neural filter module (including sinc-based FIR filtering)
# output
output = self.m_filter(har_source, noi_source, cond_feat, cut_f)
if self.training:
# just in case we need to penalize the hidden feauture for
# cut-off-freq.
return [output.squeeze(-1), hid_cut_f]
else:
return output.squeeze(-1)
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
# frame shift (number of points)
self.frame_hops = [80, 40, 640]
# frame length
self.frame_lens = [320, 80, 1920]
# fft length
self.fft_n = [512, 128, 2048]
# window type in stft
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# loss function
self.loss = torch_nn.MSELoss()
# weight to penalize hidden features for cut-off-frequency
# for experiments on CMU-arctic, ATR-F009, VCTK, cutoff_w = 0.0
self.cutoff_w = 0.0
return
def _stft(self, signal, fft_p, frame_shift, frame_len):
""" wrapper of torch.stft
Remember to use onesided=True, pad_mode="constant"
Signal (batchsize, length)
Output (batchsize, fft_p/2+1, frame_num, 2)
"""
# to be compatible with different torch versions
if torch.__version__.split('.')[1].isnumeric() and \
int(torch.__version__.split('.')[1]) < 7:
return torch.stft(
signal, fft_p, frame_shift, frame_len,
window=self.win(frame_len, dtype=signal.dtype,
device=signal.device),
onesided=True, pad_mode="constant")
else:
return torch.stft(
signal, fft_p, frame_shift, frame_len,
window=self.win(frame_len, dtype=signal.dtype,
device=signal.device),
onesided=True, pad_mode="constant", return_complex=False)
def _amp(self, x):
""" _amp(stft)
x_stft: (batchsize, fft_p/2+1, frame_num, 2)
output: (batchsize, fft_p/2+1, frame_num)
output[x, y, z] = log(x_stft[x, y, z, 1]^2 + x_stft[x, y, z, 2]^2
+ floor)
"""
return torch.log(torch.norm(x, 2, -1).pow(2) + self.amp_floor)
def compute(self, outputs, target):
""" Loss().compute(outputs, target) should return
the Loss in torch.tensor format
Assume output and target as (batchsize=1, length)
"""
# hidden-feature for cut-off-frequency
cut_f = outputs[1]
# generated signal
output = outputs[0]
# convert from (batchsize=1, length, dim=1) to (1, length)
if target.ndim == 3:
target.squeeze_(-1)
# compute loss
loss = 0
for frame_shift, frame_len, fft_p in \
zip(self.frame_hops, self.frame_lens, self.fft_n):
x_stft = self._stft(output, fft_p, frame_shift, frame_len)
y_stft = self._stft(target, fft_p, frame_shift, frame_len)
x_sp_amp = self._amp(x_stft)
y_sp_amp = self._amp(y_stft)
loss += self.loss(x_sp_amp, y_sp_amp)
# A norm on cut_f, which forces sinc-cut-off-frequency
# to be close to the U/V-decided value
# Experiments on CMU-arctic, ATR-F009, and VCTK don't use it
# by setting self.cutoff_w = 0.0
# However, just in case
loss += self.cutoff_w * self.loss(cut_f, torch.zeros_like(cut_f))
return loss
if __name__ == "__main__":
print("Definition of model")
| 39,990 | 38.713009 | 77 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-9/config.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = 'cmu_all_trn'
val_set_name = 'cmu_all_val'
# for convenience
tmp = '../DATA/cmu-arctic-data-set'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = [tmp + '/scp/train.lst']
# val_file_list: list of files for validation set. It can be None
val_list = [tmp + '/scp/val.lst']
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']]
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [80, 1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.mfbsp', '.f0']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [80, 80]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [True, True]
# Similar configurations for output features
output_dirs = [[tmp + '/wav_16k_norm']]
output_dims = [1]
output_exts = ['.wav']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = 16000 * 3
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = 80 * 50
#########################################################
## Configuration for inference stage
#########################################################
# similar options to training stage
test_set_name = ['cmu_all_test_tiny']
# List of test set data
# for convenience, you may directly load test_set list here
test_list = [['slt_arctic_b0474', 'slt_arctic_b0475', 'slt_arctic_b0476',
'bdl_arctic_b0474', 'bdl_arctic_b0475', 'bdl_arctic_b0476',
'rms_arctic_b0474', 'rms_arctic_b0475', 'rms_arctic_b0476',
'clb_arctic_b0474', 'clb_arctic_b0475', 'clb_arctic_b0476']]
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']]
# Directories for output features, which are []
test_output_dirs = [[]]
| 3,430 | 32.31068 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/01_main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The default training/inference process wrapper
Requires model.py and config.py
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
# Load file list and create data loader
trn_lst = nii_list_tool.read_list_from_text(prj_conf.trn_list)
trn_set = nii_dset.NIIDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
global_arg = args)
if prj_conf.val_list is not None:
val_lst = nii_list_tool.read_list_from_text(prj_conf.val_list)
val_set = nii_dset.NIIDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
global_arg = args)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NIIDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
global_arg = args)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 6,513 | 34.595628 | 74 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/03_fuse_score_evaluate.py | #!/usr/bin/python
"""
Wrapper to fuse score and compute EER and min tDCF
Simple score averaging.
Usage:
python 03_fuse_score_evaluate.py log_output_testset_1 log_output_testset_2 ...
The log_output_testset is produced by the pytorch code, for
example, ./lfcc-lcnn-lstmsum-am/01/__pretrained/log_output_testset
It has information like:
...
Generating 71230,LA_E_9999427,0,43237,0, time: 0.005s
Output, LA_E_9999487, 0, 0.172325
...
(See README for the format of this log)
This script will extract the line starts with "Output, ..."
"""
import os
import sys
import numpy as np
from sandbox import eval_asvspoof
def parse_txt(file_path):
bonafide = []
bonafide_file_name = []
spoofed = []
spoofed_file_name = []
with open(file_path, 'r') as file_ptr:
for line in file_ptr:
if line.startswith('Output,'):
#Output, LA_E_9999487, 0, 0.172325
temp = line.split(',')
flag = int(temp[2])
name = temp[1]
if flag:
bonafide_file_name.append(name)
bonafide.append(float(temp[-1]))
else:
spoofed.append(float(temp[-1]))
spoofed_file_name.append(name)
bonafide = np.array(bonafide)
spoofed = np.array(spoofed)
return bonafide, spoofed, bonafide_file_name, spoofed_file_name
def fuse_score(file_path_lists):
bonafide_score = {}
spoofed_score = {}
for data_path in file_path_lists:
bonafide, spoofed, bona_name, spoof_name = parse_txt(data_path)
for score, name in zip(bonafide, bona_name):
if name in bonafide_score:
bonafide_score[name].append(score)
else:
bonafide_score[name] = [score]
for score, name in zip(spoofed, spoof_name):
if name in spoofed_score:
spoofed_score[name].append(score)
else:
spoofed_score[name] = [score]
fused_bonafide = np.array([np.mean(y) for x, y in bonafide_score.items()])
fused_spoofed = np.array([np.mean(y) for x, y in spoofed_score.items()])
return fused_bonafide, fused_spoofed
if __name__ == "__main__":
data_paths = sys.argv[1:]
bonafide, spoofed = fuse_score(data_paths)
mintDCF, eer, threshold = eval_asvspoof.tDCF_wrapper(bonafide, spoofed)
print("Score file: {:s}".format(str(data_paths)))
print("mintDCF: {:1.4f}".format(mintDCF))
print("EER: {:2.3f}%".format(eer * 100))
print("Threshold: {:f}".format(threshold))
| 2,584 | 31.3125 | 78 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/01_config.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
import os
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets (any string you wish to use)
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = 'asvspoof2019_trn'
val_set_name = 'asvspoof2019_val'
# for convenience
# we will use resources in this directory
tmp = os.path.dirname(__file__) + '/../../DATA/asvspoof2019_LA'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = tmp + '/scp/train.lst'
# val_file_list: list of files for validation set. It can be None
val_list = tmp + '/scp/val.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [tmp + '/train_dev']
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# If you have waveform in *.flac, please use input_exts = ['.flac']
input_exts = ['.wav']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [1]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [False]
# Similar configurations for output features
output_dirs = []
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = None
# Optional argument
# Just a buffer for convenience
# It can contain anything
optional_argument = [tmp + '/protocol.txt']
#########################################################
## Configuration for inference stage
#########################################################
# similar options to training stage
test_set_name = 'asvspoof2019_test'
# List of test set data
# for convenience, you may directly load test_set list here
test_list = tmp + '/scp/test.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = [tmp + '/eval']
# Directories for output features, which are []
test_output_dirs = []
| 3,334 | 30.462264 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/01_config_rawnet.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
import os
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets (any string you wish to use)
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = 'asvspoof2019_trn'
val_set_name = 'asvspoof2019_val'
# for convenience
# we will use resources in this directory
tmp = os.path.dirname(__file__) + '/../../DATA/asvspoof2019_LA'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = tmp + '/scp/train.lst'
# val_file_list: list of files for validation set. It can be None
val_list = tmp + '/scp/val.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [tmp + '/train_dev']
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# If you have waveform in *.flac, please use input_exts = ['.flac']
input_exts = ['.wav']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [1]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [False]
# Similar configurations for output features
output_dirs = []
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = 64600
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = 16000
# Optional argument
# Just a buffer for convenience
# It can contain anything
optional_argument = [tmp + '/protocol.txt']
#########################################################
## Configuration for inference stage
#########################################################
# similar options to training stage
test_set_name = 'asvspoof2019_test'
# List of test set data
# for convenience, you may directly load test_set list here
test_list = tmp + '/scp/test.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = [tmp + '/eval']
# Directories for output features, which are []
test_output_dirs = []
| 3,336 | 30.481132 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/02_evaluate.py | #!/usr/bin/python
"""
Wrapper to parse the score file and compute EER and min tDCF
Usage:
python 00_evaluate.py log_output_testset
The log_output_testset is produced by the pytorch code, for
example, ./lfcc-lcnn-lstmsum-am/01/__pretrained/log_output_testset
It has information like:
...
Generating 71230,LA_E_9999427,0,43237,0, time: 0.005s
Output, LA_E_9999487, 0, 0.172325
...
(See README for the format of this log)
This script will extract the line starts with "Output, ..."
"""
import os
import sys
import numpy as np
from sandbox import eval_asvspoof
def parse_txt(file_path):
bonafide = []
spoofed = []
with open(file_path, 'r') as file_ptr:
for line in file_ptr:
if line.startswith('Output,'):
#Output, LA_E_9999487, 0, 0.172325
temp = line.split(',')
flag = int(temp[2])
name = temp[1]
if flag:
bonafide.append(float(temp[-1]))
else:
spoofed.append(float(temp[-1]))
bonafide = np.array(bonafide)
spoofed = np.array(spoofed)
return bonafide, spoofed
if __name__ == "__main__":
data_path = sys.argv[1]
bonafide, spoofed = parse_txt(data_path)
mintDCF, eer, threshold = eval_asvspoof.tDCF_wrapper(bonafide, spoofed)
print("Score file: {:s}".format(data_path))
print("mintDCF: {:1.4f}".format(mintDCF))
print("EER: {:2.3f}%".format(eer * 100))
print("Threshold: {:f}".format(threshold))
| 1,514 | 27.055556 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/01_main_rawnet.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The default training/inference process wrapper
Requires model.py and config.py
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
# Load file list and create data loader
trn_lst = nii_list_tool.read_list_from_text(prj_conf.trn_list)
trn_set = nii_dset.NIIDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
global_arg = args)
if prj_conf.val_list is not None:
val_lst = nii_list_tool.read_list_from_text(prj_conf.val_list)
val_set = nii_dset.NIIDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
global_arg = args)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NIIDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
global_arg = args)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 6,533 | 34.704918 | 76 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-fixed-p2s/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.p2sgrad as nii_p2sgrad
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
##
def protocol_parse(protocol_filepath):
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output class
self.v_out_class = 2
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_angle = []
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfcc_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output embedding from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros([batch_size * self.v_submodels,
self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,347 | 34.691542 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-fixed-p2s/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.p2sgrad as nii_p2sgrad
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
##
def protocol_parse(protocol_filepath):
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output class
self.v_out_class = 2
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_angle = []
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfcc_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output embedding from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros([batch_size * self.v_submodels,
self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,347 | 34.691542 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-fixed-p2s/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.p2sgrad as nii_p2sgrad
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
##
def protocol_parse(protocol_filepath):
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output class
self.v_out_class = 2
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_angle = []
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfcc_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output embedding from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros([batch_size * self.v_submodels,
self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,347 | 34.691542 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-fixed-p2s/06/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.p2sgrad as nii_p2sgrad
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
##
def protocol_parse(protocol_filepath):
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output class
self.v_out_class = 2
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_angle = []
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfcc_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output embedding from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros([batch_size * self.v_submodels,
self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,347 | 34.691542 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-fixed-p2s/03/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.p2sgrad as nii_p2sgrad
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
##
def protocol_parse(protocol_filepath):
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output class
self.v_out_class = 2
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_angle = []
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfcc_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output embedding from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros([batch_size * self.v_submodels,
self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,347 | 34.691542 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-fixed-p2s/02/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.p2sgrad as nii_p2sgrad
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
##
def protocol_parse(protocol_filepath):
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output class
self.v_out_class = 2
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_angle = []
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfcc_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output embedding from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros([batch_size * self.v_submodels,
self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,347 | 34.691542 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-attention-am/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1]
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,207 | 33.642369 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-attention-am/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1]
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,207 | 33.642369 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-attention-am/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1]
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,207 | 33.642369 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-attention-am/06/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1]
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,207 | 33.642369 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-attention-am/03/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1]
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,207 | 33.642369 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-attention-am/02/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfcc_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1]
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,207 | 33.642369 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-fixed-oc/05/model.py | #!/usr/bin/env python
"""
model.py
1;95;0cSelf defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.oc_softmax as nii_oc_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 256
# output class
self.v_out_class = 1
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_a_softmax = []
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfb_dim // 16) * 32, 512),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(256, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_a_softmax.append(
nii_oc_softmax.OCAngleLayer(self.v_emd_dim)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_score = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_score
def _compute_score(self, feature_vec, inference=False):
"""
"""
# compute a-softmax output for each feature configuration
batch_size = feature_vec.shape[0] // self.v_submodels
x_cos_val = torch.zeros(
[feature_vec.shape[0], self.v_out_class],
dtype=feature_vec.dtype, device=feature_vec.device)
x_phi_val = torch.zeros_like(x_cos_val)
for idx in range(self.v_submodels):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx],
inference)
x_cos_val[s_idx:e_idx] = tmp1
x_phi_val[s_idx:e_idx] = tmp2
if inference:
return x_cos_val
else:
return [x_cos_val, x_phi_val]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
a_softmax_act = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device).long()
target_vec = target_vec.repeat(self.v_submodels)
return [a_softmax_act, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
score = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], score.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_oc_softmax.OCSoftmaxWithLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,313 | 34.367206 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-fixed-oc/04/model.py | #!/usr/bin/env python
"""
model.py
1;95;0cSelf defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.oc_softmax as nii_oc_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 256
# output class
self.v_out_class = 1
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_a_softmax = []
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfb_dim // 16) * 32, 512),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(256, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_a_softmax.append(
nii_oc_softmax.OCAngleLayer(self.v_emd_dim)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_score = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_score
def _compute_score(self, feature_vec, inference=False):
"""
"""
# compute a-softmax output for each feature configuration
batch_size = feature_vec.shape[0] // self.v_submodels
x_cos_val = torch.zeros(
[feature_vec.shape[0], self.v_out_class],
dtype=feature_vec.dtype, device=feature_vec.device)
x_phi_val = torch.zeros_like(x_cos_val)
for idx in range(self.v_submodels):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx],
inference)
x_cos_val[s_idx:e_idx] = tmp1
x_phi_val[s_idx:e_idx] = tmp2
if inference:
return x_cos_val
else:
return [x_cos_val, x_phi_val]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
a_softmax_act = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device).long()
target_vec = target_vec.repeat(self.v_submodels)
return [a_softmax_act, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
score = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], score.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_oc_softmax.OCSoftmaxWithLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,313 | 34.367206 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-fixed-oc/01/model.py | #!/usr/bin/env python
"""
model.py
1;95;0cSelf defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.oc_softmax as nii_oc_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 256
# output class
self.v_out_class = 1
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_a_softmax = []
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfb_dim // 16) * 32, 512),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(256, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_a_softmax.append(
nii_oc_softmax.OCAngleLayer(self.v_emd_dim)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_score = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_score
def _compute_score(self, feature_vec, inference=False):
"""
"""
# compute a-softmax output for each feature configuration
batch_size = feature_vec.shape[0] // self.v_submodels
x_cos_val = torch.zeros(
[feature_vec.shape[0], self.v_out_class],
dtype=feature_vec.dtype, device=feature_vec.device)
x_phi_val = torch.zeros_like(x_cos_val)
for idx in range(self.v_submodels):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx],
inference)
x_cos_val[s_idx:e_idx] = tmp1
x_phi_val[s_idx:e_idx] = tmp2
if inference:
return x_cos_val
else:
return [x_cos_val, x_phi_val]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
a_softmax_act = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device).long()
target_vec = target_vec.repeat(self.v_submodels)
return [a_softmax_act, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
score = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], score.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_oc_softmax.OCSoftmaxWithLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,313 | 34.367206 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-fixed-oc/06/model.py | #!/usr/bin/env python
"""
model.py
1;95;0cSelf defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.oc_softmax as nii_oc_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 256
# output class
self.v_out_class = 1
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_a_softmax = []
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfb_dim // 16) * 32, 512),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(256, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_a_softmax.append(
nii_oc_softmax.OCAngleLayer(self.v_emd_dim)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_score = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_score
def _compute_score(self, feature_vec, inference=False):
"""
"""
# compute a-softmax output for each feature configuration
batch_size = feature_vec.shape[0] // self.v_submodels
x_cos_val = torch.zeros(
[feature_vec.shape[0], self.v_out_class],
dtype=feature_vec.dtype, device=feature_vec.device)
x_phi_val = torch.zeros_like(x_cos_val)
for idx in range(self.v_submodels):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx],
inference)
x_cos_val[s_idx:e_idx] = tmp1
x_phi_val[s_idx:e_idx] = tmp2
if inference:
return x_cos_val
else:
return [x_cos_val, x_phi_val]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
a_softmax_act = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device).long()
target_vec = target_vec.repeat(self.v_submodels)
return [a_softmax_act, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
score = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], score.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_oc_softmax.OCSoftmaxWithLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,313 | 34.367206 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-fixed-oc/03/model.py | #!/usr/bin/env python
"""
model.py
1;95;0cSelf defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.oc_softmax as nii_oc_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 256
# output class
self.v_out_class = 1
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_a_softmax = []
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfb_dim // 16) * 32, 512),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(256, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_a_softmax.append(
nii_oc_softmax.OCAngleLayer(self.v_emd_dim)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_score = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_score
def _compute_score(self, feature_vec, inference=False):
"""
"""
# compute a-softmax output for each feature configuration
batch_size = feature_vec.shape[0] // self.v_submodels
x_cos_val = torch.zeros(
[feature_vec.shape[0], self.v_out_class],
dtype=feature_vec.dtype, device=feature_vec.device)
x_phi_val = torch.zeros_like(x_cos_val)
for idx in range(self.v_submodels):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx],
inference)
x_cos_val[s_idx:e_idx] = tmp1
x_phi_val[s_idx:e_idx] = tmp2
if inference:
return x_cos_val
else:
return [x_cos_val, x_phi_val]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
a_softmax_act = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device).long()
target_vec = target_vec.repeat(self.v_submodels)
return [a_softmax_act, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
score = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], score.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_oc_softmax.OCSoftmaxWithLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,313 | 34.367206 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-fixed-oc/02/model.py | #!/usr/bin/env python
"""
model.py
1;95;0cSelf defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.oc_softmax as nii_oc_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 256
# output class
self.v_out_class = 1
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_a_softmax = []
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfb_dim // 16) * 32, 512),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(256, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_a_softmax.append(
nii_oc_softmax.OCAngleLayer(self.v_emd_dim)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_score = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_score
def _compute_score(self, feature_vec, inference=False):
"""
"""
# compute a-softmax output for each feature configuration
batch_size = feature_vec.shape[0] // self.v_submodels
x_cos_val = torch.zeros(
[feature_vec.shape[0], self.v_out_class],
dtype=feature_vec.dtype, device=feature_vec.device)
x_phi_val = torch.zeros_like(x_cos_val)
for idx in range(self.v_submodels):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx],
inference)
x_cos_val[s_idx:e_idx] = tmp1
x_phi_val[s_idx:e_idx] = tmp2
if inference:
return x_cos_val
else:
return [x_cos_val, x_phi_val]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
a_softmax_act = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device).long()
target_vec = target_vec.repeat(self.v_submodels)
return [a_softmax_act, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
score = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], score.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_oc_softmax.OCSoftmaxWithLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,313 | 34.367206 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-p2s/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,979 | 33.675926 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-p2s/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,979 | 33.675926 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-p2s/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,979 | 33.675926 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-p2s/06/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,979 | 33.675926 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-p2s/03/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,979 | 33.675926 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-p2s/02/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,979 | 33.675926 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-p2s/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,147 | 33.349206 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-p2s/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,147 | 33.349206 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-p2s/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,147 | 33.349206 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-p2s/06/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,147 | 33.349206 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-p2s/03/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,147 | 33.349206 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-p2s/02/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,147 | 33.349206 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-oc/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,459 | 33.89842 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-oc/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,459 | 33.89842 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-oc/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,459 | 33.89842 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-oc/06/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,459 | 33.89842 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-oc/03/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,459 | 33.89842 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfcc-lcnn-lstmsum-oc/02/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,459 | 33.89842 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-am/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32),
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class, s=10, m=0.35)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1]
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,629 | 34.123596 | 90 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-am/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32),
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class, s=10, m=0.35)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1]
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,629 | 34.123596 | 90 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-am/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32),
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class, s=10, m=0.35)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1]
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,629 | 34.123596 | 90 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-am/06/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32),
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class, s=10, m=0.35)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1]
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,629 | 34.123596 | 90 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-am/03/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32),
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class, s=10, m=0.35)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1]
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,629 | 34.123596 | 90 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-am/02/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.am_softmax as nii_amsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32),
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_amsoftmax.AMAngleLayer(self.v_emd_dim, self.v_out_class, s=10, m=0.35)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return torch_nn_func.softmax(out_score_neg, dim=1)[:, 1]
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_amsoftmax.AMSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,629 | 34.123596 | 90 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-sig/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,697 | 33.421546 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-sig/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,697 | 33.421546 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-sig/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,697 | 33.421546 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-sig/06/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,697 | 33.421546 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-sig/03/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,697 | 33.421546 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-sig/02/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,697 | 33.421546 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-p2s/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.p2sgrad as nii_p2sgrad
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output class
self.v_out_class = 2
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_angle = []
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(self.spec_fb_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output embedding from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_emb = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros([batch_size * self.v_submodels,
self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,800 | 34.587838 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-p2s/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.p2sgrad as nii_p2sgrad
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output class
self.v_out_class = 2
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_angle = []
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(self.spec_fb_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output embedding from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_emb = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros([batch_size * self.v_submodels,
self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,800 | 34.587838 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-p2s/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.p2sgrad as nii_p2sgrad
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output class
self.v_out_class = 2
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_angle = []
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(self.spec_fb_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output embedding from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_emb = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros([batch_size * self.v_submodels,
self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,800 | 34.587838 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-p2s/06/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.p2sgrad as nii_p2sgrad
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output class
self.v_out_class = 2
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_angle = []
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(self.spec_fb_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output embedding from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_emb = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros([batch_size * self.v_submodels,
self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,800 | 34.587838 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-p2s/03/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.p2sgrad as nii_p2sgrad
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output class
self.v_out_class = 2
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_angle = []
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(self.spec_fb_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output embedding from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_emb = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros([batch_size * self.v_submodels,
self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,800 | 34.587838 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-p2s/02/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.p2sgrad as nii_p2sgrad
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output class
self.v_out_class = 2
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_angle = []
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(self.spec_fb_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output embedding from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_emb = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros([batch_size * self.v_submodels,
self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,800 | 34.587838 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-sig/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.a_softmax as nii_a_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 1
# output class
self.v_out_class = 1
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
#self.m_a_softmax = []
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(self.spec_fb_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_emb = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,079 | 34.233645 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-sig/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.a_softmax as nii_a_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 1
# output class
self.v_out_class = 1
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
#self.m_a_softmax = []
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(self.spec_fb_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_emb = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,079 | 34.233645 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-sig/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.a_softmax as nii_a_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 1
# output class
self.v_out_class = 1
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
#self.m_a_softmax = []
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(self.spec_fb_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_emb = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,079 | 34.233645 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-sig/06/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.a_softmax as nii_a_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 1
# output class
self.v_out_class = 1
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
#self.m_a_softmax = []
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(self.spec_fb_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_emb = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,079 | 34.233645 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-sig/03/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.a_softmax as nii_a_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 1
# output class
self.v_out_class = 1
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
#self.m_a_softmax = []
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(self.spec_fb_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_emb = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,079 | 34.233645 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-fixed-sig/02/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.a_softmax as nii_a_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 1
# output class
self.v_out_class = 1
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
#self.m_a_softmax = []
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(self.spec_fb_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_emb = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,079 | 34.233645 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-fixed-p2s/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.p2sgrad as nii_p2sgrad
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
## a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output class
self.v_out_class = 2
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_angle = []
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfb_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output embedding from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros([batch_size * self.v_submodels,
self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,262 | 34.331019 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-fixed-p2s/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.p2sgrad as nii_p2sgrad
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
## a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output class
self.v_out_class = 2
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_angle = []
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfb_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output embedding from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros([batch_size * self.v_submodels,
self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,262 | 34.331019 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-fixed-p2s/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.p2sgrad as nii_p2sgrad
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
## a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output class
self.v_out_class = 2
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_angle = []
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfb_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output embedding from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros([batch_size * self.v_submodels,
self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,262 | 34.331019 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-fixed-p2s/06/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.p2sgrad as nii_p2sgrad
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
## a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output class
self.v_out_class = 2
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_angle = []
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfb_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output embedding from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros([batch_size * self.v_submodels,
self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,262 | 34.331019 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-fixed-p2s/03/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.p2sgrad as nii_p2sgrad
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
## a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output class
self.v_out_class = 2
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_angle = []
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfb_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output embedding from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros([batch_size * self.v_submodels,
self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,262 | 34.331019 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-fixed-p2s/02/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.p2sgrad as nii_p2sgrad
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
## a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
# target data
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# working sampling rate, torchaudio is used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# manual choose the first 600 frames in the data
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output class
self.v_out_class = 2
self.m_transform = []
self.m_output_act = []
self.m_frontend = []
self.m_angle = []
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfb_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.ones([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
fs: frame shift
fl: frame length
fn: fft points
trunc_len: number of frames per file (by truncating)
datalength: original length of data
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, fft_bin, frame_length)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_length, fft_bin)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output embedding from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros([batch_size * self.v_submodels,
self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,262 | 34.331019 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-oc/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32),
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,561 | 33.970787 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-oc/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32),
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,561 | 33.970787 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-oc/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32),
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,561 | 33.970787 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-oc/06/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32),
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,561 | 33.970787 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-oc/03/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32),
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,561 | 33.970787 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-lstmsum-oc/02/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32),
nii_nn.BLSTMLayer((lfb_dim//16) * 32, (lfb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,561 | 33.970787 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-attention-p2s/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,863 | 33.487239 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-attention-p2s/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,863 | 33.487239 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-attention-p2s/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,863 | 33.487239 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-attention-p2s/06/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,863 | 33.487239 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-attention-p2s/03/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,863 | 33.487239 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/lfb-lcnn-attention-p2s/02/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFB dim (base component)
self.lfb_dim = [60]
self.lfb_with_delta = False
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfb_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfb_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfb_with_delta:
lfb_dim = lfb_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((lfb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((lfb_dim // 16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFB(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfb_dim[idx],
with_energy=False,
with_emphasis=True,
with_delta=self.lfb_with_delta)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_score = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,863 | 33.487239 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/rawnet2/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torchaudio
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.block_rawnet as nii_rawnet
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim,args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_pth = prj_conf.optional_argument[0]
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_pth)
# Model
self.m_filter_num = 20
self.m_filter_len = 1025
self.m_res_ch_1 = 128
self.m_res_ch_2 = 512
self.m_gru_node = 256
self.m_gru_layer = 3
self.m_emb_dim = 64
self.m_num_class = 2
self.m_rawnet = nii_rawnet.RawNet(
self.m_filter_num, self.m_filter_len,
in_dim, prj_conf.wav_samp_rate,
self.m_res_ch_1, self.m_res_ch_2,
self.m_gru_node, self.m_gru_layer,
self.m_emb_dim, self.m_num_class
)
# segment length =
self.m_seg_len = prj_conf.truncate_seq
if self.m_seg_len is None:
# use default segment length
self.m_seg_len = 64600
print("RawNet uses a default segment length {:d}".format(
self.m_seg_len))
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
# log prob
logprob = self.m_rawnet.forward(x)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(
target, device=x.device, dtype=torch.long)
return [logprob, target_vec, True]
else:
if x.shape[1] < self.m_seg_len:
# too short, no need to split
scores = self.m_rawnet.inference(x)
else:
# split input into segments
num_seq = x.shape[1] // self.m_seg_len
scores = []
for idx in range(num_seq):
stime = idx * self.m_seg_len
etime = idx * self.m_seg_len + self.m_seg_len
scores.append(self.m_rawnet.inference(x[:, stime:etime]))
# average scores
# (batch, num_classes, seg_num) -> (batch, num_classes)
scores = torch.stack(scores, dim=2).mean(dim=-1)
targets = self._get_target(filenames)
for filename, target, score in zip(filenames, targets, scores):
print("Output, %s, %d, %f" % (filename, target, score[-1]))
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.CrossEntropyLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 6,838 | 32.038647 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/rawnet2/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torchaudio
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.block_rawnet as nii_rawnet
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim,args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_pth = prj_conf.optional_argument[0]
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_pth)
# Model
self.m_filter_num = 20
self.m_filter_len = 1025
self.m_res_ch_1 = 128
self.m_res_ch_2 = 512
self.m_gru_node = 256
self.m_gru_layer = 3
self.m_emb_dim = 64
self.m_num_class = 2
self.m_rawnet = nii_rawnet.RawNet(
self.m_filter_num, self.m_filter_len,
in_dim, prj_conf.wav_samp_rate,
self.m_res_ch_1, self.m_res_ch_2,
self.m_gru_node, self.m_gru_layer,
self.m_emb_dim, self.m_num_class
)
# segment length =
self.m_seg_len = prj_conf.truncate_seq
if self.m_seg_len is None:
# use default segment length
self.m_seg_len = 64600
print("RawNet uses a default segment length {:d}".format(
self.m_seg_len))
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
# log prob
logprob = self.m_rawnet.forward(x)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(
target, device=x.device, dtype=torch.long)
return [logprob, target_vec, True]
else:
if x.shape[1] < self.m_seg_len:
# too short, no need to split
scores = self.m_rawnet.inference(x)
else:
# split input into segments
num_seq = x.shape[1] // self.m_seg_len
scores = []
for idx in range(num_seq):
stime = idx * self.m_seg_len
etime = idx * self.m_seg_len + self.m_seg_len
scores.append(self.m_rawnet.inference(x[:, stime:etime]))
# average scores
# (batch, num_classes, seg_num) -> (batch, num_classes)
scores = torch.stack(scores, dim=2).mean(dim=-1)
targets = self._get_target(filenames)
for filename, target, score in zip(filenames, targets, scores):
print("Output, %s, %d, %f" % (filename, target, score[-1]))
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.CrossEntropyLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 6,838 | 32.038647 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/rawnet2/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torchaudio
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.block_rawnet as nii_rawnet
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim,args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_pth = prj_conf.optional_argument[0]
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_pth)
# Model
self.m_filter_num = 20
self.m_filter_len = 1025
self.m_res_ch_1 = 128
self.m_res_ch_2 = 512
self.m_gru_node = 256
self.m_gru_layer = 3
self.m_emb_dim = 64
self.m_num_class = 2
self.m_rawnet = nii_rawnet.RawNet(
self.m_filter_num, self.m_filter_len,
in_dim, prj_conf.wav_samp_rate,
self.m_res_ch_1, self.m_res_ch_2,
self.m_gru_node, self.m_gru_layer,
self.m_emb_dim, self.m_num_class
)
# segment length =
self.m_seg_len = prj_conf.truncate_seq
if self.m_seg_len is None:
# use default segment length
self.m_seg_len = 64600
print("RawNet uses a default segment length {:d}".format(
self.m_seg_len))
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
# log prob
logprob = self.m_rawnet.forward(x)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(
target, device=x.device, dtype=torch.long)
return [logprob, target_vec, True]
else:
if x.shape[1] < self.m_seg_len:
# too short, no need to split
scores = self.m_rawnet.inference(x)
else:
# split input into segments
num_seq = x.shape[1] // self.m_seg_len
scores = []
for idx in range(num_seq):
stime = idx * self.m_seg_len
etime = idx * self.m_seg_len + self.m_seg_len
scores.append(self.m_rawnet.inference(x[:, stime:etime]))
# average scores
# (batch, num_classes, seg_num) -> (batch, num_classes)
scores = torch.stack(scores, dim=2).mean(dim=-1)
targets = self._get_target(filenames)
for filename, target, score in zip(filenames, targets, scores):
print("Output, %s, %d, %f" % (filename, target, score[-1]))
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.CrossEntropyLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 6,838 | 32.038647 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/rawnet2/06/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torchaudio
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.block_rawnet as nii_rawnet
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim,args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_pth = prj_conf.optional_argument[0]
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_pth)
# Model
self.m_filter_num = 20
self.m_filter_len = 1025
self.m_res_ch_1 = 128
self.m_res_ch_2 = 512
self.m_gru_node = 256
self.m_gru_layer = 3
self.m_emb_dim = 64
self.m_num_class = 2
self.m_rawnet = nii_rawnet.RawNet(
self.m_filter_num, self.m_filter_len,
in_dim, prj_conf.wav_samp_rate,
self.m_res_ch_1, self.m_res_ch_2,
self.m_gru_node, self.m_gru_layer,
self.m_emb_dim, self.m_num_class
)
# segment length =
self.m_seg_len = prj_conf.truncate_seq
if self.m_seg_len is None:
# use default segment length
self.m_seg_len = 64600
print("RawNet uses a default segment length {:d}".format(
self.m_seg_len))
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
# log prob
logprob = self.m_rawnet.forward(x)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(
target, device=x.device, dtype=torch.long)
return [logprob, target_vec, True]
else:
if x.shape[1] < self.m_seg_len:
# too short, no need to split
scores = self.m_rawnet.inference(x)
else:
# split input into segments
num_seq = x.shape[1] // self.m_seg_len
scores = []
for idx in range(num_seq):
stime = idx * self.m_seg_len
etime = idx * self.m_seg_len + self.m_seg_len
scores.append(self.m_rawnet.inference(x[:, stime:etime]))
# average scores
# (batch, num_classes, seg_num) -> (batch, num_classes)
scores = torch.stack(scores, dim=2).mean(dim=-1)
targets = self._get_target(filenames)
for filename, target, score in zip(filenames, targets, scores):
print("Output, %s, %d, %f" % (filename, target, score[-1]))
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.CrossEntropyLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 6,838 | 32.038647 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/rawnet2/03/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torchaudio
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.block_rawnet as nii_rawnet
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim,args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_pth = prj_conf.optional_argument[0]
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_pth)
# Model
self.m_filter_num = 20
self.m_filter_len = 1025
self.m_res_ch_1 = 128
self.m_res_ch_2 = 512
self.m_gru_node = 256
self.m_gru_layer = 3
self.m_emb_dim = 64
self.m_num_class = 2
self.m_rawnet = nii_rawnet.RawNet(
self.m_filter_num, self.m_filter_len,
in_dim, prj_conf.wav_samp_rate,
self.m_res_ch_1, self.m_res_ch_2,
self.m_gru_node, self.m_gru_layer,
self.m_emb_dim, self.m_num_class
)
# segment length =
self.m_seg_len = prj_conf.truncate_seq
if self.m_seg_len is None:
# use default segment length
self.m_seg_len = 64600
print("RawNet uses a default segment length {:d}".format(
self.m_seg_len))
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
# log prob
logprob = self.m_rawnet.forward(x)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(
target, device=x.device, dtype=torch.long)
return [logprob, target_vec, True]
else:
if x.shape[1] < self.m_seg_len:
# too short, no need to split
scores = self.m_rawnet.inference(x)
else:
# split input into segments
num_seq = x.shape[1] // self.m_seg_len
scores = []
for idx in range(num_seq):
stime = idx * self.m_seg_len
etime = idx * self.m_seg_len + self.m_seg_len
scores.append(self.m_rawnet.inference(x[:, stime:etime]))
# average scores
# (batch, num_classes, seg_num) -> (batch, num_classes)
scores = torch.stack(scores, dim=2).mean(dim=-1)
targets = self._get_target(filenames)
for filename, target, score in zip(filenames, targets, scores):
print("Output, %s, %d, %f" % (filename, target, score[-1]))
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.CrossEntropyLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 6,838 | 32.038647 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/rawnet2/02/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torchaudio
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.block_rawnet as nii_rawnet
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim,args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_pth = prj_conf.optional_argument[0]
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_pth)
# Model
self.m_filter_num = 20
self.m_filter_len = 1025
self.m_res_ch_1 = 128
self.m_res_ch_2 = 512
self.m_gru_node = 256
self.m_gru_layer = 3
self.m_emb_dim = 64
self.m_num_class = 2
self.m_rawnet = nii_rawnet.RawNet(
self.m_filter_num, self.m_filter_len,
in_dim, prj_conf.wav_samp_rate,
self.m_res_ch_1, self.m_res_ch_2,
self.m_gru_node, self.m_gru_layer,
self.m_emb_dim, self.m_num_class
)
# segment length =
self.m_seg_len = prj_conf.truncate_seq
if self.m_seg_len is None:
# use default segment length
self.m_seg_len = 64600
print("RawNet uses a default segment length {:d}".format(
self.m_seg_len))
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
# log prob
logprob = self.m_rawnet.forward(x)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(
target, device=x.device, dtype=torch.long)
return [logprob, target_vec, True]
else:
if x.shape[1] < self.m_seg_len:
# too short, no need to split
scores = self.m_rawnet.inference(x)
else:
# split input into segments
num_seq = x.shape[1] // self.m_seg_len
scores = []
for idx in range(num_seq):
stime = idx * self.m_seg_len
etime = idx * self.m_seg_len + self.m_seg_len
scores.append(self.m_rawnet.inference(x[:, stime:etime]))
# average scores
# (batch, num_classes, seg_num) -> (batch, num_classes)
scores = torch.stack(scores, dim=2).mean(dim=-1)
targets = self._get_target(filenames)
for filename, target, score in zip(filenames, targets, scores):
print("Output, %s, %d, %f" % (filename, target, score[-1]))
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.CrossEntropyLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 6,838 | 32.038647 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-oc/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,526 | 33.504444 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-oc/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,526 | 33.504444 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-oc/01/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,526 | 33.504444 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-oc/06/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,526 | 33.504444 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-oc/03/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,526 | 33.504444 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-attention-oc/02/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.oc_softmax as nii_ocsoftmax
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
# self.model_debug = False
# self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# spectrogram dim (base component)
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# pooling layer
self.m_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part for output layer
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_pooling.append(
nii_nn.SelfWeightedPooling((self.spec_fb_dim // 16) * 32)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim//16) * 32 * 2, self.v_emd_dim)
)
self.m_angle.append(
nii_ocsoftmax.OCAngleLayer(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_pooling = torch_nn.ModuleList(self.m_pooling)
self.m_angle = torch_nn.ModuleList(self.m_angle)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to store output scores from sub-models
output_emb = torch.zeros([x.shape[0], self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
hidden_features = m_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output(hidden_features)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models * batch_size
batch_size = x.shape[0] // self.v_submodels
# buffer to save the scores
# for non-target classes
out_score_neg = torch.zeros(
[x.shape[0], self.v_out_class], device=x.device, dtype=x.dtype)
# for target classes
out_score_pos = torch.zeros_like(out_score_neg)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp_score = m_score(x[s_idx:e_idx], inference)
out_score_neg[s_idx:e_idx] = tmp_score[0]
out_score_pos[s_idx:e_idx] = tmp_score[1]
if inference:
return out_score_neg
else:
return out_score_neg, out_score_pos
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_ocsoftmax.OCSoftmaxWithLoss()
def compute(self, input_data, target):
"""loss = compute(input_data, target_data)
Note:
1. input_data will be the output from Model.forward()
input_data will be a tuple of [scores, target_vec]
2. we will not use target given by the system script
we will use the target_vec in input_data[1]
"""
loss = self.m_loss(input_data[0], input_data[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,526 | 33.504444 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-lstmsum-p2s/05/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((self.spec_fb_dim//16) * 32,
(self.spec_fb_dim//16) * 32),
nii_nn.BLSTMLayer((self.spec_fb_dim//16) * 32,
(self.spec_fb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,414 | 33.640449 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/03-asvspoof-mega/spec2-lcnn-lstmsum-p2s/04/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import core_modules.p2sgrad as nii_p2sgrad
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class TrainableLinearFb(nii_nn.LinearInitialized):
"""Linear layer initialized with linear filter bank
"""
def __init__(self, fn, sr, filter_num):
super(TrainableLinearFb, self).__init__(
nii_front_end.linear_fb(fn, sr, filter_num))
return
def forward(self, x):
return torch.log10(
torch.pow(super(TrainableLinearFb, self).forward(x), 2) +
torch.finfo(torch.float32).eps)
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
self.spec_with_delta = False
self.spec_fb_dim = 60
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
self.v_emd_dim = 64
# output classes
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# final part on training
self.m_angle = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n) in enumerate(zip(
self.v_truncate_lens, self.fft_n)):
fft_n_bins = fft_n // 2 + 1
self.m_transform.append(
torch_nn.Sequential(
TrainableLinearFb(fft_n,self.m_target_sr,self.spec_fb_dim),
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((self.spec_fb_dim//16) * 32,
(self.spec_fb_dim//16) * 32),
nii_nn.BLSTMLayer((self.spec_fb_dim//16) * 32,
(self.spec_fb_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((self.spec_fb_dim // 16) * 32, self.v_emd_dim)
)
self.m_angle.append(
nii_p2sgrad.P2SActivationLayer(self.v_emd_dim, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.Spectrogram(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_angle = torch_nn.ModuleList(self.m_angle)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).sum(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, x, inference=False):
"""
"""
# number of sub models
batch_size = x.shape[0]
# compute score through p2sgrad layer
out_score = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, m_score in enumerate(self.m_angle):
tmp_score = m_score(x[idx * batch_size : (idx+1) * batch_size])
out_score[idx * batch_size : (idx+1) * batch_size] = tmp_score
if inference:
# output_score [:, 1] corresponds to the positive class
return out_score[:, 1]
else:
return out_score
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_p2sgrad.P2SGradLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,414 | 33.640449 | 80 | py |
Subsets and Splits