repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
GreedyAC | GreedyAC-master/agent/nonlinear/SACDiscrete.py | #!/usr/bin/env python3
# Import modules
import os
from gym.spaces import Box
import torch
import numpy as np
import torch.nn.functional as F
from torch.optim import Adam
from agent.baseAgent import BaseAgent
import agent.nonlinear.nn_utils as nn_utils
from agent.nonlinear.policy.MLP import Softmax
from agent.nonlinear.value_function.MLP import DoubleQ, Q
from utils.experience_replay import TorchBuffer as ExperienceReplay
class SACDiscrete(BaseAgent):
def __init__(self, env, gamma, tau, alpha, policy, target_update_interval,
critic_lr, actor_lr_scale, actor_hidden_dim,
critic_hidden_dim, replay_capacity, seed, batch_size, betas,
double_q=True, soft_q=True, cuda=False, clip_stddev=1000,
init=None, activation="relu"):
"""
Constructor
Parameters
----------
env : gym.Environment
The environment to run on
gamma : float
The discount factor
tau : float
The weight of the weighted average, which performs the soft update
to the target critic network's parameters toward the critic
network's parameters, that is: target_parameters =
((1 - τ) * target_parameters) + (τ * source_parameters)
alpha : float
The entropy regularization temperature. See equation (1) in paper.
policy : str
The type of policy, currently, only support "gaussian"
target_update_interval : int
The number of updates to perform before the target critic network
is updated toward the critic network
critic_lr : float
The critic learning rate
actor_lr : float
The actor learning rate
actor_hidden_dim : int
The number of hidden units in the actor's neural network
critic_hidden_dim : int
The number of hidden units in the critic's neural network
replay_capacity : int
The number of transitions stored in the replay buffer
seed : int
The random seed so that random samples of batches are repeatable
batch_size : int
The number of elements in a batch for the batch update
cuda : bool, optional
Whether or not cuda should be used for training, by default False.
Note that if True, cuda is only utilized if available.
clip_stddev : float, optional
The value at which the standard deviation is clipped in order to
prevent numerical overflow, by default 1000. If <= 0, then
no clipping is done.
init : str
The initialization scheme to use for the weights, one of
'xavier_uniform', 'xavier_normal', 'uniform', 'normal',
'orthogonal', by default None. If None, leaves the default
PyTorch initialization.
Raises
------
ValueError
If the batch size is larger than the replay buffer
"""
action_space = env.action_space
obs_space = env.observation_space
if isinstance(action_space, Box):
raise ValueError("SACDiscrete can only be used with " +
"discrete actions")
super().__init__()
self.batch = True
# Ensure batch size < replay capacity
if batch_size > replay_capacity:
raise ValueError("cannot have a batch larger than replay " +
"buffer capacity")
# Set the seed for all random number generators, this includes
# everything used by PyTorch, including setting the initial weights
# of networks. PyTorch prefers seeds with many non-zero binary units
self.torch_rng = torch.manual_seed(seed)
self.rng = np.random.default_rng(seed)
self.is_training = True
self.gamma = gamma
self.tau = tau
self.alpha = alpha
self.double_q = double_q
self.soft_q = soft_q
self.num_actions = action_space.n
self.device = torch.device("cuda:0" if cuda and
torch.cuda.is_available() else "cpu")
# Keep a replay buffer
action_shape = 1
obs_dim = obs_space.shape
self.replay = ExperienceReplay(replay_capacity, seed, obs_dim,
action_shape, self.device)
self.batch_size = batch_size
# Set the interval between timesteps when the target network should be
# updated and keep a running total of update number
self.target_update_interval = target_update_interval
self.update_number = 0
num_inputs = obs_space.shape[0]
self._init_critic(obs_space, critic_hidden_dim, init, activation,
critic_lr, betas)
self.policy_type = policy.lower()
self._init_policy(obs_space, action_space, actor_hidden_dim, init,
activation, actor_lr_scale * critic_lr, betas,
clip_stddev)
def sample_action(self, state):
state = torch.FloatTensor(state).to(self.device).unsqueeze(0)
if self.is_training:
action, _, _ = self.policy.sample(state)
act = action.detach().cpu().numpy()[0]
return int(act[0])
else:
_, log_prob, _ = self.policy.sample(state)
return log_prob.argmax().item()
def update(self, state, action, reward, next_state, done_mask):
# Adjust action to ensure it can be sent to the experience replay
# buffer properly
action = np.array([action])
# Keep transition in replay buffer
self.replay.push(state, action, reward, next_state, done_mask)
# Sample a batch from memory
state_batch, action_batch, reward_batch, next_state_batch, \
mask_batch = self.replay.sample(batch_size=self.batch_size)
if state_batch is None:
# Not enough samples in buffer
return
self._update_critic(state_batch, action_batch, reward_batch,
next_state_batch, mask_batch)
self._update_actor(state_batch, action_batch, reward_batch,
next_state_batch, mask_batch)
def reset(self):
pass
def eval(self):
self.is_training = False
def train(self):
self.is_training = True
def save_model(self, env_name, suffix="", actor_path=None,
critic_path=None):
pass
def load_model(self, actor_path, critic_path):
pass
def get_parameters(self):
pass
def _init_critic(self, obs_space, critic_hidden_dim, init,
activation, critic_lr, betas):
"""
Initializes the critic
"""
num_inputs = obs_space.shape[0]
if self.double_q:
critic_type = DoubleQ
else:
critic_type = Q
self.critic = critic_type(num_inputs, 1, critic_hidden_dim, init,
activation).to(device=self.device)
self.critic_target = critic_type(num_inputs, 1, critic_hidden_dim,
init, activation).to(self.device)
# Ensure critic and target critic share the same parameters at the
# beginning of training
nn_utils.hard_update(self.critic_target, self.critic)
self.critic_optim = Adam(
self.critic.parameters(),
lr=critic_lr,
betas=betas,
)
def _init_policy(self, obs_space, action_space, actor_hidden_dim, init,
activation, actor_lr, betas, clip_stddev):
"""
Initializes the policy
"""
num_inputs = obs_space.shape[0]
num_actions = action_space.n
if self.policy_type == "softmax":
self.policy = Softmax(num_inputs, num_actions, actor_hidden_dim,
activation, init).to(self.device)
else:
raise NotImplementedError(f"policy {self.policy_type} unknown")
self.policy_optim = Adam(self.policy.parameters(), lr=actor_lr,
betas=betas)
def _update_critic(self, state_batch, action_batch, reward_batch,
next_state_batch, mask_batch):
if self.double_q:
self._update_double_critic(state_batch, action_batch, reward_batch,
next_state_batch, mask_batch,)
else:
self._update_single_critic(state_batch, action_batch, reward_batch,
next_state_batch, mask_batch)
# Increment the running total of updates and update the critic target
# if needed
self.update_number += 1
if self.update_number % self.target_update_interval == 0:
self.update_number = 0
nn_utils.soft_update(self.critic_target, self.critic, self.tau)
def _update_double_critic(self, state_batch, action_batch,
reward_batch, next_state_batch, mask_batch):
"""
Update the critic using a batch of transitions when using a double Q
critic.
"""
if not self.double_q:
raise ValueError("cannot call _update_single_critic when using " +
"a double Q critic")
# When updating Q functions, we don't want to backprop through the
# policy and target network parameters
with torch.no_grad():
next_state_action, next_state_log_pi, _ = \
self.policy.sample(next_state_batch)
next_q1, next_q2 = self.critic_target(
next_state_batch,
next_state_action,
)
next_q = torch.min(next_q1, next_q2)
if self.soft_q:
next_q -= self.alpha * next_state_log_pi
q_target = reward_batch + mask_batch * self.gamma * next_q
q1, q2 = self.critic(state_batch, action_batch)
# Calculate the losses on each critic
# JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
q1_loss = F.mse_loss(q1, q_target)
# JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
q2_loss = F.mse_loss(q2, q_target)
q_loss = q1_loss + q2_loss
# Update the critic
self.critic_optim.zero_grad()
q_loss.backward()
self.critic_optim.step()
def _update_single_critic(self, state_batch, action_batch, reward_batch,
next_state_batch, mask_batch):
"""
Update the critic using a batch of transitions when using a single Q
critic.
"""
if self.double_q:
raise ValueError("cannot call _update_single_critic when using " +
"a double Q critic")
# When updating Q functions, we don't want to backprop through the
# policy and target network parameters
with torch.no_grad():
next_state_action, next_state_log_pi, _ = \
self.policy.sample(next_state_batch)
next_q = self.critic_target(next_state_batch, next_state_action)
if self.soft_q:
next_q -= self.alpha * next_state_log_pi
q_target = reward_batch + mask_batch * self.gamma * next_q
q = self.critic(state_batch, action_batch)
q_loss = F.mse_loss(q, q_target)
# Update the critic
self.critic_optim.zero_grad()
q_loss.backward()
self.critic_optim.step()
def _get_q(self, state_batch, action_batch):
"""
Gets the Q values for `action_batch` actions in `state_batch` states
from the critic, rather than the target critic.
Parameters
----------
state_batch : torch.Tensor
The batch of states to calculate the action values in. Of the form
(batch_size, state_dims).
action_batch : torch.Tensor
The batch of actions to calculate the action values of in each
state. Of the form (batch_size, action_dims).
"""
if self.double_q:
q1, q2 = self.critic(state_batch, action_batch)
return torch.min(q1, q2)
else:
return self.critic(state_batch, action_batch)
def _update_actor(self, state_batch, action_batch, reward_batch,
next_state_batch, mask_batch):
# Calculate the actor loss using Eqn(5) in FKL/RKL paper
# Repeat the state for each action
state_batch = state_batch.repeat_interleave(self.num_actions, dim=0)
actions = torch.tensor([n for n in range(self.num_actions)])
actions = actions.repeat(self.batch_size)
actions = actions.unsqueeze(-1)
with torch.no_grad():
q = self._get_q(state_batch, actions)
log_prob = self.policy.log_prob(state_batch, actions)
prob = log_prob.exp()
with torch.no_grad():
scale = q - log_prob * self.alpha
policy_loss = prob * scale
policy_loss = policy_loss.reshape([self.batch_size, self.num_actions])
policy_loss = -policy_loss.sum(dim=1).mean()
# Update the actor
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
| 13,490 | 36.475 | 79 | py |
GreedyAC | GreedyAC-master/agent/nonlinear/VAC.py | # Import modules
import torch
import inspect
from gym.spaces import Box, Discrete
import numpy as np
import torch.nn.functional as F
from torch.optim import Adam
from agent.baseAgent import BaseAgent
import agent.nonlinear.nn_utils as nn_utils
from agent.nonlinear.policy.MLP import Gaussian
from agent.nonlinear.value_function.MLP import Q as QMLP
from utils.experience_replay import TorchBuffer as ExperienceReplay
class VAC(BaseAgent):
"""
VAC implements the Vanilla Actor-Critic agent
"""
def __init__(self, num_inputs, action_space, gamma, tau, alpha, policy,
target_update_interval, critic_lr, actor_lr_scale,
num_samples, actor_hidden_dim, critic_hidden_dim,
replay_capacity, seed, batch_size, betas, env, cuda=False,
clip_stddev=1000, init=None, activation="relu"):
"""
Constructor
Parameters
----------
num_inputs : int
The number of input features
action_space : gym.spaces.Space
The action space from the gym environment
gamma : float
The discount factor
tau : float
The weight of the weighted average, which performs the soft update
to the target critic network's parameters toward the critic
network's parameters, that is: target_parameters =
((1 - τ) * target_parameters) + (τ * source_parameters)
alpha : float
The entropy regularization temperature. See equation (1) in paper.
policy : str
The type of policy, currently, only support "gaussian"
target_update_interval : int
The number of updates to perform before the target critic network
is updated toward the critic network
critic_lr : float
The critic learning rate
actor_lr : float
The actor learning rate
actor_hidden_dim : int
The number of hidden units in the actor's neural network
critic_hidden_dim : int
The number of hidden units in the critic's neural network
replay_capacity : int
The number of transitions stored in the replay buffer
seed : int
The random seed so that random samples of batches are repeatable
batch_size : int
The number of elements in a batch for the batch update
cuda : bool, optional
Whether or not cuda should be used for training, by default False.
Note that if True, cuda is only utilized if available.
clip_stddev : float, optional
The value at which the standard deviation is clipped in order to
prevent numerical overflow, by default 1000. If <= 0, then
no clipping is done.
init : str
The initialization scheme to use for the weights, one of
'xavier_uniform', 'xavier_normal', 'uniform', 'normal',
'orthogonal', by default None. If None, leaves the default
PyTorch initialization.
Raises
------
ValueError
If the batch size is larger than the replay buffer
"""
super().__init__()
self.batch = True
# Ensure batch size < replay capacity
if batch_size > replay_capacity:
raise ValueError("cannot have a batch larger than replay " +
"buffer capacity")
# Set the seed for all random number generators, this includes
# everything used by PyTorch, including setting the initial weights
# of networks. PyTorch prefers seeds with many non-zero binary units
self.torch_rng = torch.manual_seed(seed)
self.rng = np.random.default_rng(seed)
self.is_training = True
self.gamma = gamma
self.tau = tau
self.alpha = alpha
self.action_space = action_space
if not isinstance(action_space, Box):
raise ValueError("VAC only works with Box action spaces")
self.state_dims = num_inputs
self.num_samples = num_samples - 1
assert num_samples >= 2
self.device = torch.device("cuda:0" if cuda and
torch.cuda.is_available() else "cpu")
if isinstance(action_space, Box):
self.action_dims = action_space.high.shape[0]
# Keep a replay buffer
self.replay = ExperienceReplay(replay_capacity, seed,
(num_inputs,),
action_space.shape[0], self.device)
elif isinstance(action_space, Discrete):
self.action_dims = 1
# Keep a replay buffer
self.replay = ExperienceReplay(replay_capacity, seed, num_inputs,
1, self.device)
self.batch_size = batch_size
# Set the interval between timesteps when the target network should be
# updated and keep a running total of update number
self.target_update_interval = target_update_interval
self.update_number = 0
# Create the critic Q function
if isinstance(action_space, Box):
action_shape = action_space.shape[0]
elif isinstance(action_space, Discrete):
action_shape = 1
self.critic = QMLP(num_inputs, action_shape, critic_hidden_dim,
init, activation).to(device=self.device)
self.critic_optim = Adam(self.critic.parameters(), lr=critic_lr,
betas=betas)
self.critic_target = QMLP(num_inputs, action_shape,
critic_hidden_dim, init, activation).to(
self.device)
nn_utils.hard_update(self.critic_target, self.critic)
self.policy_type = policy.lower()
actor_lr = actor_lr_scale * critic_lr
if self.policy_type == "gaussian":
self.policy = Gaussian(num_inputs, action_space.shape[0],
actor_hidden_dim, activation,
action_space, clip_stddev, init).to(
self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=actor_lr,
betas=betas)
else:
raise NotImplementedError
source = inspect.getsource(inspect.getmodule(inspect.currentframe()))
self.info = {}
self.info = {
"source": source,
}
def sample_action(self, state):
state = torch.FloatTensor(state).to(self.device).unsqueeze(0)
if self.is_training:
action, _, _ = self.policy.sample(state)
else:
_, _, action = self.policy.sample(state)
act = action.detach().cpu().numpy()[0]
return act
def update(self, state, action, reward, next_state, done_mask):
# Keep transition in replay buffer
self.replay.push(state, action, reward, next_state, done_mask)
# Sample a batch from memory
state_batch, action_batch, reward_batch, next_state_batch, \
mask_batch = self.replay.sample(batch_size=self.batch_size)
if state_batch is None:
return
# When updating Q functions, we don't want to backprop through the
# policy and target network parameters
with torch.no_grad():
next_state_action, _, _ = \
self.policy.sample(next_state_batch)
qf_next_value = self.critic_target(next_state_batch,
next_state_action)
q_target = reward_batch + mask_batch * self.gamma * qf_next_value
q_prediction = self.critic(state_batch, action_batch)
q_loss = F.mse_loss(q_prediction, q_target)
# Update the critic
self.critic_optim.zero_grad()
q_loss.backward()
self.critic_optim.step()
# Sample action that the agent would take
pi, _, _ = self.policy.sample(state_batch)
# Calculate the advantage
with torch.no_grad():
q_pi = self.critic(state_batch, pi)
sampled_actions, _, _ = self.policy.sample(state_batch,
self.num_samples)
if self.num_samples == 1:
sampled_actions = sampled_actions.unsqueeze(0)
sampled_actions = torch.permute(sampled_actions, (1, 0, 2))
state_baseline = 0
if self.num_samples > 2:
# Baseline computed with self.num_samples - 1 action
# value estimates
baseline_actions = sampled_actions[:, :-1]
baseline_actions = torch.reshape(baseline_actions,
[-1, self.action_dims])
stacked_s_batch = torch.repeat_interleave(state_batch,
self.num_samples-1,
dim=0)
stacked_s_batch = torch.reshape(stacked_s_batch,
[-1, self.state_dims])
baseline_q_vals = self.critic(stacked_s_batch,
baseline_actions)
baseline_q_vals = torch.reshape(baseline_q_vals,
[self.batch_size,
self.num_samples-1])
state_baseline = baseline_q_vals.mean(axis=1).unsqueeze(1)
advantage = q_pi - state_baseline
# Estimate the entropy from a single sampled action in each state
entropy_actions = sampled_actions[:, -1]
entropy = self.policy.log_prob(state_batch, entropy_actions)
with torch.no_grad():
entropy *= entropy
entropy = -entropy
policy_loss = self.policy.log_prob(state_batch, pi) * advantage
policy_loss = -(policy_loss + (self.alpha * entropy)).mean()
# Update the actor
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
# Update target network
self.update_number += 1
if self.update_number % self.target_update_interval == 0:
self.update_number = 0
nn_utils.soft_update(self.critic_target, self.critic, self.tau)
def reset(self):
pass
def eval(self):
self.is_training = False
def train(self):
self.is_training = True
def save_model(self, env_name, suffix="", actor_path=None,
critic_path=None):
pass
def load_model(self, actor_path, critic_path):
pass
def get_parameters(self):
pass
| 10,808 | 38.021661 | 78 | py |
GreedyAC | GreedyAC-master/agent/nonlinear/nn_utils.py | # Import modules
import torch
import torch.nn as nn
import numpy as np
def weights_init_(layer, init="kaiming", activation="relu"):
"""
Initializes the weights for a fully connected layer of a neural network.
Parameters
----------
layer : torch.nn.Module
The layer to initialize
init : str
The type of initialization to use, one of 'xavier_uniform',
'xavier_normal', 'uniform', 'normal', 'orthogonal', 'kaiming_uniform',
'default', by default 'kaiming_uniform'.
activation : str
The activation function in use, used to calculate the optimal gain
value.
"""
if "weight" in dir(layer):
gain = torch.nn.init.calculate_gain(activation)
if init == "xavier_uniform":
torch.nn.init.xavier_uniform_(layer.weight, gain=gain)
elif init == "xavier_normal":
torch.nn.init.xavier_normal_(layer.weight, gain=gain)
elif init == "uniform":
torch.nn.init.uniform_(layer.weight) / layer.in_features
elif init == "normal":
torch.nn.init.normal_(layer.weight) / layer.in_features
elif init == "orthogonal":
torch.nn.init.orthogonal_(layer.weight)
elif init == "zeros":
torch.nn.init.zeros_(layer.weight)
elif init == "kaiming_uniform" or init == "default" or init is None:
# PyTorch default
return
else:
raise NotImplementedError(f"init {init} not implemented yet")
if "bias" in dir(layer):
torch.nn.init.constant_(layer.bias, 0)
def soft_update(target, source, tau):
"""
Updates the parameters of the target network towards the parameters of
the source network by a weight average depending on tau. The new
parameters for the target network are:
((1 - τ) * target_parameters) + (τ * source_parameters)
Parameters
----------
target : torch.nn.Module
The target network
source : torch.nn.Module
The source network
tau : float
The weighting for the weighted average
"""
with torch.no_grad():
for target_param, param in zip(target.parameters(),
source.parameters()):
# Use in-place operations mul_ and add_ to avoid
# copying tensor data
target_param.data.mul_(1.0 - tau)
target_param.data.add_(tau * param.data)
def hard_update(target, source):
"""
Sets the parameters of the target network to the parameters of the
source network. Equivalent to soft_update(target, source, 1)
Parameters
----------
target : torch.nn.Module
The target network
source : torch.nn.Module
The source network
"""
with torch.no_grad():
for target_param, param in zip(target.parameters(),
source.parameters()):
target_param.data.copy_(param.data)
def init_layers(layers, init_scheme):
"""
Initializes the weights for the layers of a neural network.
Parameters
----------
layers : list of nn.Module
The list of layers
init_scheme : str
The type of initialization to use, one of 'xavier_uniform',
'xavier_normal', 'uniform', 'normal', 'orthogonal', by default None.
If None, leaves the default PyTorch initialization.
"""
def fill_weights(layers, init_fn):
for i in range(len(layers)):
init_fn(layers[i].weight)
if init_scheme.lower() == "xavier_uniform":
fill_weights(layers, nn.init.xavier_uniform_)
elif init_scheme.lower() == "xavier_normal":
fill_weights(layers, nn.init.xavier_normal_)
elif init_scheme.lower() == "uniform":
fill_weights(layers, nn.init.uniform_)
elif init_scheme.lower() == "normal":
fill_weights(layers, nn.init.normal_)
elif init_scheme.lower() == "orthogonal":
fill_weights(layers, nn.init.orthogonal_)
elif init_scheme is None:
# Use PyTorch default
return
def _calc_conv_outputs(in_height, in_width, kernel_size, dilation=1, padding=0,
stride=1):
"""
Calculates the output height and width given in input height and width and
the kernel size.
Parameters
----------
in_height : int
The height of the input image
in_width : int
The width of the input image
kernel_size : tuple[int, int] or int
The kernel size
dilation : tuple[int, int] or int
Spacing between kernel elements, by default 1
padding : tuple[int, int] or int
Padding added to all four sides of the input, by default 0
stride : tuple[int, int] or int
Stride of the convolution, by default 1
Returns
-------
tuple[int, int]
The output width and height
"""
# Reshape so that kernel_size, padding, dilation, and stride have one
# element per dimension
if isinstance(kernel_size, int):
kernel_size = [kernel_size] * 2
if isinstance(padding, int):
padding = [padding] * 2
if isinstance(dilation, int):
dilation = [dilation] * 2
if isinstance(stride, int):
stride = [stride] * 2
out_height = in_height + 2 * padding[0] - dilation[0] * (
kernel_size[0] - 1) - 1
out_height //= stride[0]
out_width = in_width + 2 * padding[1] - dilation[1] * (
kernel_size[1] - 1) - 1
out_width //= stride[1]
return out_height + 1, out_width + 1
def _get_activation(activation):
"""
Returns an activation operation given a string describing the activation
operation
Parameters
----------
activation : str
The string representation of the activation operation, one of 'relu',
'tanh'
Returns
-------
nn.Module
The activation function
"""
# Set the activation funcitons
if activation.lower() == "relu":
act = nn.ReLU()
elif activation.lower() == "tanh":
act = nn.Tanh()
else:
raise ValueError(f"unknown activation {activation}")
return act
| 6,135 | 31.638298 | 79 | py |
GreedyAC | GreedyAC-master/agent/nonlinear/policy/MLP.py | # Import modules
import torch
import time
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal, Independent
from agent.nonlinear.nn_utils import weights_init_
# Global variables
EPSILON = 1e-6
class SquashedGaussian(nn.Module):
"""
Class SquashedGaussian implements a policy following a squashed
Gaussian distribution in each state, parameterized by an MLP.
"""
def __init__(self, num_inputs, num_actions, hidden_dim, activation,
action_space=None, clip_stddev=1000, init=None):
"""
Constructor
Parameters
----------
num_inputs : int
The number of elements in the state feature vector
num_actions : int
The dimensionality of the action vector
hidden_dim : int
The number of units in each hidden layer of the network
activation : str
The activation function to use, one of 'relu', 'tanh'
action_space : gym.spaces.Space, optional
The action space of the environment, by default None. This argument
is used to ensure that the actions are within the correct scale.
clip_stddev : float, optional
The value at which the standard deviation is clipped in order to
prevent numerical overflow, by default 1000. If <= 0, then
no clipping is done.
init : str
The initialization scheme to use for the weights, one of
'xavier_uniform', 'xavier_normal', 'uniform', 'normal',
'orthogonal', by default None. If None, leaves the default
PyTorch initialization.
"""
super(SquashedGaussian, self).__init__()
self.num_actions = num_actions
# Determine standard deviation clipping
self.clip_stddev = clip_stddev > 0
self.clip_std_threshold = np.log(clip_stddev)
# Set up the layers
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.mean_linear = nn.Linear(hidden_dim, num_actions)
self.log_std_linear = nn.Linear(hidden_dim, num_actions)
# Initialize weights
self.apply(lambda module: weights_init_(module, init, activation))
# action rescaling
if action_space is None:
self.action_scale = torch.tensor(1.)
self.action_bias = torch.tensor(0.)
else:
self.action_scale = torch.FloatTensor(
(action_space.high - action_space.low) / 2.)
self.action_bias = torch.FloatTensor(
(action_space.high + action_space.low) / 2.)
if activation == "relu":
self.act = F.relu
elif activation == "tanh":
self.act = torch.tanh
else:
raise ValueError(f"unknown activation function {activation}")
def forward(self, state):
"""
Performs the forward pass through the network, predicting the mean
and the log standard deviation.
Parameters
----------
state : torch.Tensor of float
The input state to predict the policy in
Returns
-------
2-tuple of torch.Tensor of float
The mean and log standard deviation of the Gaussian policy in the
argument state
"""
x = self.act(self.linear1(state))
x = self.act(self.linear2(x))
mean = self.mean_linear(x)
log_std = self.log_std_linear(x)
if self.clip_stddev:
log_std = torch.clamp(log_std, min=-self.clip_std_threshold,
max=self.clip_std_threshold)
return mean, log_std
def sample(self, state, num_samples=1):
"""
Samples the policy for an action in the argument state
Parameters
----------
state : torch.Tensor of float
The input state to predict the policy in
Returns
-------
torch.Tensor of float
A sampled action
"""
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
if self.num_actions > 1:
normal = Independent(normal, 1)
x_t = normal.sample((num_samples,))
if num_samples == 1:
x_t = x_t.squeeze(0)
y_t = torch.tanh(x_t)
action = y_t * self.action_scale + self.action_bias
log_prob = normal.log_prob(x_t)
log_prob -= torch.log(self.action_scale * (1 - y_t.pow(2)) +
EPSILON).sum(axis=-1).reshape(log_prob.shape)
if self.num_actions > 1:
log_prob = log_prob.unsqueeze(-1)
mean = torch.tanh(mean) * self.action_scale + self.action_bias
return action, log_prob, mean, x_t
def rsample(self, state, num_samples=1):
"""
Samples the policy for an action in the argument state using
the reparameterization trick
Parameters
----------
state : torch.Tensor of float
The input state to predict the policy in
Returns
-------
torch.Tensor of float
A sampled action
"""
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
if self.num_actions > 1:
normal = Independent(normal, 1)
# For re-parameterization trick (mean + std * N(0,1))
# rsample() implements the re-parameterization trick
x_t = normal.rsample((num_samples,))
if num_samples == 1:
x_t = x_t.squeeze(0)
y_t = torch.tanh(x_t)
action = y_t * self.action_scale + self.action_bias
log_prob = normal.log_prob(x_t)
log_prob -= torch.log(self.action_scale * (1 - y_t.pow(2)) +
EPSILON).sum(axis=-1).reshape(log_prob.shape)
if self.num_actions > 1:
log_prob = log_prob.unsqueeze(-1)
mean = torch.tanh(mean) * self.action_scale + self.action_bias
return action, log_prob, mean, x_t
def log_prob(self, state_batch, x_t_batch):
"""
Calculates the log probability of taking the action generated
from x_t, where x_t is returned from sample or rsample. The
log probability is returned for each action dimension separately.
"""
mean, log_std = self.forward(state_batch)
std = log_std.exp()
normal = Normal(mean, std)
if self.num_actions > 1:
normal = Independent(normal, 1)
y_t = torch.tanh(x_t_batch)
log_prob = normal.log_prob(x_t_batch)
log_prob -= torch.log(self.action_scale * (1 - y_t.pow(2)) +
EPSILON).sum(axis=-1).reshape(log_prob.shape)
if self.num_actions > 1:
log_prob = log_prob.unsqueeze(-1)
return log_prob
def to(self, device):
"""
Moves the network to a device
Parameters
----------
device : torch.device
The device to move the network to
Returns
-------
nn.Module
The current network, moved to a new device
"""
self.action_scale = self.action_scale.to(device)
self.action_bias = self.action_bias.to(device)
return super(SquashedGaussian, self).to(device)
class Softmax(nn.Module):
"""
Softmax implements a softmax policy in each state, parameterized
using an MLP to predict logits.
"""
def __init__(self, num_inputs, num_actions, hidden_dim, activation,
init=None):
super(Softmax, self).__init__()
self.num_actions = num_actions
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, num_actions)
# self.apply(weights_init_)
self.apply(lambda module: weights_init_(module, init, activation))
if activation == "relu":
self.act = F.relu
elif activation == "tanh":
self.act = torch.tanh
else:
raise ValueError(f"unknown activation {activation}")
def forward(self, state):
x = self.act(self.linear1(state))
x = self.act(self.linear2(x))
return self.linear3(x)
def sample(self, state, num_samples=1):
logits = self.forward(state)
if len(logits.shape) != 1 and (len(logits.shape) != 2 and 1 not in
logits.shape):
shape = logits.shape
raise ValueError(f"expected a vector of logits, got shape {shape}")
probs = F.softmax(logits, dim=1)
policy = torch.distributions.Categorical(probs)
actions = policy.sample((num_samples,))
log_prob = F.log_softmax(logits, dim=1)
log_prob = torch.gather(log_prob, dim=1, index=actions)
if num_samples == 1:
actions = actions.squeeze(0)
log_prob = log_prob.squeeze(0)
actions = actions.unsqueeze(-1)
log_prob = log_prob.unsqueeze(-1)
# return actions.float(), log_prob, None
return actions.int(), log_prob, logits.argmax(dim=-1)
def all_log_prob(self, states):
logits = self.forward(states)
log_probs = F.log_softmax(logits, dim=1)
return log_probs
def log_prob(self, states, actions):
"""
Returns the log probability of taking actions in states.
"""
logits = self.forward(states)
log_probs = F.log_softmax(logits, dim=1)
log_probs = torch.gather(log_probs, dim=1, index=actions.long())
return log_probs
def to(self, device):
"""
Moves the network to a device
Parameters
----------
device : torch.device
The device to move the network to
Returns
-------
nn.Module
The current network, moved to a new device
"""
return super(Softmax, self).to(device)
class Gaussian(nn.Module):
"""
Class Gaussian implements a policy following Gaussian distribution
in each state, parameterized as an MLP. The predicted mean is scaled to be
within `(action_min, action_max)` using a `tanh` activation.
"""
def __init__(self, num_inputs, num_actions, hidden_dim, activation,
action_space, clip_stddev=1000, init=None):
"""
Constructor
Parameters
----------
num_inputs : int
The number of elements in the state feature vector
num_actions : int
The dimensionality of the action vector
hidden_dim : int
The number of units in each hidden layer of the network
action_space : gym.spaces.Space
The action space of the environment
clip_stddev : float, optional
The value at which the standard deviation is clipped in order to
prevent numerical overflow, by default 1000. If <= 0, then
no clipping is done.
init : str
The initialization scheme to use for the weights, one of
'xavier_uniform', 'xavier_normal', 'uniform', 'normal',
'orthogonal', by default None. If None, leaves the default
PyTorch initialization.
"""
super(Gaussian, self).__init__()
self.num_actions = num_actions
# Determine standard deviation clipping
self.clip_stddev = clip_stddev > 0
self.clip_std_threshold = np.log(clip_stddev)
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.mean_linear = nn.Linear(hidden_dim, num_actions)
self.log_std_linear = nn.Linear(hidden_dim, num_actions)
# Initialize weights
self.apply(lambda module: weights_init_(module, init, activation))
# Action rescaling
self.action_max = torch.FloatTensor(action_space.high)
self.action_min = torch.FloatTensor(action_space.low)
if activation == "relu":
self.act = F.relu
elif activation == "tanh":
self.act = torch.tanh
else:
raise ValueError(f"unknown activation {activation}")
def forward(self, state):
"""
Performs the forward pass through the network, predicting the mean
and the log standard deviation.
Parameters
----------
state : torch.Tensor of float
The input state to predict the policy in
Returns
-------
2-tuple of torch.Tensor of float
The mean and log standard deviation of the Gaussian policy in the
argument state
"""
x = self.act(self.linear1(state))
x = self.act(self.linear2(x))
mean = torch.tanh(self.mean_linear(x))
mean = ((mean + 1) / 2) * (self.action_max - self.action_min) + \
self.action_min # ∈ [action_min, action_max]
log_std = self.log_std_linear(x)
# Works better with std dev clipping to ±1000
if self.clip_stddev:
log_std = torch.clamp(log_std, min=-self.clip_std_threshold,
max=self.clip_std_threshold)
return mean, log_std
def rsample(self, state, num_samples=1):
"""
Samples the policy for an action in the argument state
Parameters
----------
state : torch.Tensor of float
The input state to predict the policy in
Returns
-------
torch.Tensor of float
A sampled action
"""
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
if self.num_actions > 1:
normal = Independent(normal, 1)
# For re-parameterization trick (mean + std * N(0,1))
# rsample() implements the re-parameterization trick
action = normal.rsample((num_samples,))
action = torch.clamp(action, self.action_min, self.action_max)
if num_samples == 1:
action = action.squeeze(0)
log_prob = normal.log_prob(action)
if self.num_actions == 1:
log_prob.unsqueeze(-1)
return action, log_prob, mean
def sample(self, state, num_samples=1):
"""
Samples the policy for an action in the argument state
Parameters
----------
state : torch.Tensor of float
The input state to predict the policy in
num_samples : int
The number of actions to sample
Returns
-------
torch.Tensor of float
A sampled action
"""
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
if self.num_actions > 1:
normal = Independent(normal, 1)
# Non-differentiable
action = normal.sample((num_samples,))
action = torch.clamp(action, self.action_min, self.action_max)
if num_samples == 1:
action = action.squeeze(0)
log_prob = normal.log_prob(action)
if self.num_actions == 1:
log_prob.unsqueeze(-1)
# print(action.shape)
return action, log_prob, mean
def log_prob(self, states, actions, show=False):
"""
Returns the log probability of taking actions in states. The
log probability is returned for each action dimension
separately, and should be added together to get the final
log probability
"""
mean, log_std = self.forward(states)
std = log_std.exp()
normal = Normal(mean, std)
if self.num_actions > 1:
normal = Independent(normal, 1)
log_prob = normal.log_prob(actions)
if self.num_actions == 1:
log_prob.unsqueeze(-1)
if show:
print(torch.cat([mean, std], axis=1)[0])
return log_prob
def to(self, device):
"""
Moves the network to a device
Parameters
----------
device : torch.device
The device to move the network to
Returns
-------
nn.Module
The current network, moved to a new device
"""
self.action_max = self.action_max.to(device)
self.action_min = self.action_min.to(device)
return super(Gaussian, self).to(device)
| 16,553 | 31.206226 | 79 | py |
GreedyAC | GreedyAC-master/agent/nonlinear/value_function/MLP.py | #!/usr/bin/env python3
# Import modules
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import agent.nonlinear.nn_utils as nn_utils
# Class definitions
class V(nn.Module):
"""
Class V is an MLP for estimating the state value function `v`.
"""
def __init__(self, num_inputs, hidden_dim, init, activation):
"""
Constructor
Parameters
----------
num_inputs : int
Dimensionality of input feature vector
hidden_dim : int
The number of units in each hidden layer
init : str
The initialization scheme to use for the weights, one of
'xavier_uniform', 'xavier_normal', 'uniform', 'normal',
'orthogonal', by default None. If None, leaves the default
PyTorch initialization.
activation : str
The activation function to use; one of 'relu', 'tanh'
"""
super(V, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, 1)
self.apply(lambda module: nn_utils.weights_init_(module, init))
if activation == "relu":
self.act = F.relu
elif activation == "tanh":
self.act = torch.tanh
else:
raise ValueError(f"unknown activation {activation}")
def forward(self, state):
"""
Performs the forward pass through the network, predicting the value of
`state`.
Parameters
----------
state : torch.Tensor of float
The feature vector of the state to compute the value of
Returns
-------
torch.Tensor of float
The value of the state
"""
x = self.act(self.linear1(state))
x = self.act(self.linear2(x))
x = self.linear3(x)
return x
class DiscreteQ(nn.Module):
"""
Class DiscreteQ implements an action value network with number of
predicted action values equal to the number of available actions.
"""
def __init__(self, num_inputs, num_actions, hidden_dim, init,
activation):
"""
Constructor
Parameters
----------
num_inputs : int
Dimensionality of state feature vector
num_actions : int
Dimensionality of the action feature vector
hidden_dim : int
The number of units in each hidden layer
init : str
The initialization scheme to use for the weights, one of
'xavier_uniform', 'xavier_normal', 'uniform', 'normal',
'orthogonal', by default None. If None, leaves the default
PyTorch initialization.
activation : str
The activation function to use; one of 'relu', 'tanh'
"""
super(DiscreteQ, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, num_actions)
self.apply(lambda module: nn_utils.weights_init_(module, init))
if activation == "relu":
self.act = F.relu
elif activation == "tanh":
self.act = torch.tanh
else:
raise ValueError(f"unknown activation {activation}")
def forward(self, state):
"""
Performs the forward pass through each network, predicting the
action-value for `action` in `state`.
Parameters
----------
state : torch.Tensor of float
The state that the action was taken in
Returns
-------
torch.Tensor
The action value predictions
"""
x = self.act(self.linear1(state))
x = self.act(self.linear2(x))
return self.linear3(x)
class Q(nn.Module):
"""
Class Q implements an action-value network using an MLP function
approximator. The action value is computed by concatenating the action to
the state observation as the input to the neural network.
"""
def __init__(self, num_inputs, num_actions, hidden_dim, init,
activation):
"""
Constructor
Parameters
----------
num_inputs : int
Dimensionality of state feature vector
num_actions : int
Dimensionality of the action feature vector
hidden_dim : int
The number of units in each hidden layer
init : str
The initialization scheme to use for the weights, one of
'xavier_uniform', 'xavier_normal', 'uniform', 'normal',
'orthogonal', by default None. If None, leaves the default
PyTorch initialization.
activation : str
The activation function to use; one of 'relu', 'tanh'
"""
super(Q, self).__init__()
# Q1 architecture
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, 1)
self.apply(lambda module: nn_utils.weights_init_(module, init))
if activation == "relu":
self.act = F.relu
elif activation == "tanh":
self.act = torch.tanh
else:
raise ValueError(f"unknown activation {activation}")
def forward(self, state, action):
"""
Performs the forward pass through each network, predicting the
action-value for `action` in `state`.
Parameters
----------
state : torch.Tensor of float
The state that the action was taken in
action : torch.Tensor of float
The action taken in the input state to predict the value function
of
Returns
-------
torch.Tensor
The action value prediction
"""
xu = torch.cat([state, action], 1)
x = self.act(self.linear1(xu))
x = self.act(self.linear2(x))
x = self.linear3(x)
return x
class DoubleQ(nn.Module):
"""
Class DoubleQ implements two action-value networks,
computing the action-value function using two separate fully
connected neural net. This is useful for implementing double Q-learning.
The action values are computed by concatenating the action to the state
observation and using this as input to each neural network.
"""
def __init__(self, num_inputs, num_actions, hidden_dim, init,
activation):
"""
Constructor
Parameters
----------
num_inputs : int
Dimensionality of state feature vector
num_actions : int
Dimensionality of the action feature vector
hidden_dim : int
The number of units in each hidden layer
init : str
The initialization scheme to use for the weights, one of
'xavier_uniform', 'xavier_normal', 'uniform', 'normal',
'orthogonal', by default None. If None, leaves the default
PyTorch initialization.
activation : str
The activation function to use; one of 'relu', 'tanh'
"""
super(DoubleQ, self).__init__()
# Q1 architecture
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, 1)
# Q2 architecture
self.linear4 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.linear5 = nn.Linear(hidden_dim, hidden_dim)
self.linear6 = nn.Linear(hidden_dim, 1)
self.apply(lambda module: nn_utils.weights_init_(module, init))
if activation == "relu":
self.act = F.relu
elif activation == "tanh":
self.act = torch.tanh
else:
raise ValueError(f"unknown activation {activation}")
def forward(self, state, action):
"""
Performs the forward pass through each network, predicting two
action-values (from each action-value approximator) for the input
action in the input state.
Parameters
----------
state : torch.Tensor of float
The state that the action was taken in
action : torch.Tensor of float
The action taken in the input state to predict the value function
of
Returns
-------
2-tuple of torch.Tensor of float
A 2-tuple of action values, one predicted by each function
approximator
"""
xu = torch.cat([state, action], 1)
x1 = self.act(self.linear1(xu))
x1 = self.act(self.linear2(x1))
x1 = self.linear3(x1)
x2 = self.act(self.linear4(xu))
x2 = self.act(self.linear5(x2))
x2 = self.linear6(x2)
return x1, x2
| 8,998 | 30.798587 | 78 | py |
Vecchia_GPR_var_select | Vecchia_GPR_var_select-master/code/func/KISS_GP.py | import math
import torch
import gpytorch
import numpy
class GPRegressionModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
# SKI requires a grid size hyperparameter. This util can help with that
# We're setting Kronecker structure to False because we're using an additive structure decomposition
grid_size = int(gpytorch.utils.grid.choose_grid_size(train_x, kronecker_structure=False))
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.AdditiveStructureKernel(
gpytorch.kernels.ScaleKernel(
gpytorch.kernels.GridInterpolationKernel(
gpytorch.kernels.MaternKernel(nu=2.5),
grid_size=grid_size, num_dims=1
)
), num_dims=train_x.shape[-1]
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
def KISS_GP_wrap(XTrn, yTrn, XTst, yTst):
XTrnTorch = torch.from_numpy(XTrn).to(torch.float32)
yTrnTorch = torch.from_numpy(numpy.array(yTrn)).to(torch.float32)
XTstTorch = torch.from_numpy(XTst).to(torch.float32)
yTstTorch = torch.from_numpy(numpy.array(yTst)).to(torch.float32)
# Train
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = GPRegressionModel(XTrnTorch, yTrnTorch, likelihood)
model.train()
likelihood.train()
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
training_iterations = 100
for i in range(training_iterations):
optimizer.zero_grad()
output = model(XTrnTorch)
loss = -mll(output, yTrnTorch)
loss.backward()
optimizer.step()
# Predict
model.eval()
likelihood.eval()
with torch.no_grad(), gpytorch.settings.fast_pred_var():
yPred = likelihood(model(XTstTorch)).mean.view([XTstTorch.shape[0]])
rmseScr = torch.sqrt(torch.square(yTstTorch - yPred).mean() / torch.var(yTstTorch))
return dict({"yTstPred": yPred.tolist(), "rmseScr": rmseScr.item()})
## A test here
# n = 1000
# train_x = torch.rand([n, 4])
# train_y = torch.sin(train_x[:, 0] * (4 * math.pi) + torch.randn(n) * 0.2) + \
# torch.sin(train_x[:, 1] * (2 * math.pi) + torch.randn(n) * 0.2) + \
# torch.sin(train_x[:, 2] * (3 * math.pi) + torch.randn(n) * 0.2)
# nTst = 1000
# xTst = torch.rand([nTst, 4])
# yTst = torch.sin(xTst[:, 0] * (4 * math.pi) + torch.randn(n) * 0.2) + \
# torch.sin(xTst[:, 1] * (2 * math.pi) + torch.randn(n) * 0.2) + \
# torch.sin(xTst[:, 2] * (3 * math.pi) + torch.randn(n) * 0.2)
# KISS_GP_wrap(train_x.numpy(), train_y.numpy(), xTst.numpy(), yTst.numpy())
| 2,912 | 40.614286 | 108 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/startup_config.py | #!/usr/bin/env python
"""
startup_config
Startup configuration utilities
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import random
import numpy as np
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def set_random_seed(random_seed, args=None):
""" set_random_seed(random_seed, args=None)
Set the random_seed for numpy, python, and cudnn
input
-----
random_seed: integer random seed
args: argue parser
"""
# initialization
torch.manual_seed(random_seed)
random.seed(random_seed)
np.random.seed(random_seed)
os.environ['PYTHONHASHSEED'] = str(random_seed)
#For torch.backends.cudnn.deterministic
#Note: this default configuration may result in RuntimeError
#see https://pytorch.org/docs/stable/notes/randomness.html
if args is None:
cudnn_deterministic = True
cudnn_benchmark = False
else:
cudnn_deterministic = args.cudnn_deterministic_toggle
cudnn_benchmark = args.cudnn_benchmark_toggle
if not cudnn_deterministic:
print("cudnn_deterministic set to False")
if cudnn_benchmark:
print("cudnn_benchmark set to True")
if torch.cuda.is_available():
torch.cuda.manual_seed_all(random_seed)
torch.backends.cudnn.deterministic = cudnn_deterministic
torch.backends.cudnn.benchmark = cudnn_benchmark
return
| 1,549 | 25.271186 | 66 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/math_tools/stats.py | #!/usr/bin/env python
"""
stats.py
Tools to calcualte statistics
"""
from __future__ import absolute_import
import os
import sys
import numpy as np
import core_scripts.other_tools.display as nii_display
import core_scripts.data_io.conf as nii_dconf
import torch
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def f_var2std(var):
"""
std = f_var2std(var)
Args:
var: np.arrary, variance
Return:
std: np.array, standard-devitation
std = sqrt(variance), std[std<floor] = 1.0
"""
negative_idx = var < 0
std = np.sqrt(var)
std[negative_idx] = 1.0
floored_idx = std < nii_dconf.std_floor
std[floored_idx] = 1.0
return std
def f_online_mean_std(data, mean_old, var_old, cnt_old):
"""
mean, var, count=f_online_mean_var(data, mean, var, num_count):
online algorithm to accumulate mean and var
input
-----
data: input data as numpy.array, in shape [length, dimension]
mean: mean to be updated, np.array [dimension]
var: var to be updated, np.array [dimension]
num_count: how many data rows have been calculated before
this calling.
output
------
mean: mean, np.array [dimension]
var: var, np.array [dimension]
count: accumulated data number, = num_count + data.shape[0]
Ref. parallel algorithm
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
"""
try:
# how many time steps (number of rows) in this data
cnt_this = data.shape[0]
# if input data is empty, don't update
if cnt_this == 0:
return mean_old, var_old, cnt_old
if data.ndim == 1:
# single dimension data, 1d array
mean_this = data.mean()
var_this = data.var()
dim = 1
else:
# multiple dimension data, 2d array
mean_this = data.mean(axis=0)
var_this = data.var(axis=0)
dim = data.shape[1]
# difference of accumulated mean and data mean
diff_mean = mean_this - mean_old
# new mean and var
new_mean = np.zeros([dim], dtype=nii_dconf.h_dtype)
new_var = np.zeros([dim], dtype=nii_dconf.h_dtype)
# update count
updated_count = cnt_old + cnt_this
# update mean
new_mean = mean_old + diff_mean * (float(cnt_this) /
(cnt_old + cnt_this))
# update var
if cnt_old == 0:
# if this is the first data
if data.ndim == 1:
# remember that var is array, not scalar
new_var[0] = var_this
else:
new_var = var_this
else:
# not first data
new_var = (var_old * (float(cnt_old) / updated_count)
+ var_this * (float(cnt_this)/ updated_count)
+ (diff_mean * diff_mean
/ (float(cnt_this)/cnt_old
+ float(cnt_old)/cnt_this
+ 2.0)))
# done
return new_mean, new_var, updated_count
except ValueError:
if data.ndim > 1:
if data.shape[1] != mean_old.shape[0] or \
data.shape[1] != var_old.shape[0]:
nii_display.f_print("Dimension incompatible", "error")
nii_display.f_die("Error in online mean var calculation")
else:
if mean_old.shape[0] != 1 or \
var_old.shape[0] != 1:
nii_display.f_print("Dimension incompatible", "error")
nii_display.f_die("Error in online mean var calculation")
def f_online_mean_cov(data, mean_old, cov_old, cnt_old):
"""
mean, cov, count=f_online_mean_cov(data, mean, cov, num_count):
online algorithm to accumulate mean and cov
input
-----
data: input data as numpy.array, in shape [length, dimension]
mean: mean to be updated, np.array [dimension]
cov: cov to be updated, np.array [dimension, dimension]
num_count: how many data rows have been calculated before
this calling.
output
------
mean: mean, np.array [dimension]
cov: cov, np.array [dimension, dimension]
count: accumulated data number, = num_count + data.shape[0]
Note that the returned cov is biased.
Ref. parallel algorithm
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
"""
if data.ndim == 1:
nii_display.f_print("Dimension incompatible", "error")
nii_display.f_die("Error in online mean cov calculation")
try:
# how many time steps (number of rows) in this data
cnt_this = data.shape[0]
dim = data.shape[1]
# if input data is empty, don't update
if cnt_this == 0:
return mean_old, cov_old, cnt_old
# multiple dimension data, 2d array
mean_this = data.mean(axis=0)
# assumpe number of columns be the number of variables
if cnt_this == 1:
cov_this = np.zeros([dim, dim], dtype=nii_dconf.h_dtype)
else:
cov_this = np.cov(data.T)
# difference of accumulated mean and data mean
diff_mean = mean_this - mean_old
# new mean and cov
new_mean = np.zeros([dim], dtype=nii_dconf.h_dtype)
new_cov = np.zeros([dim, dim], dtype=nii_dconf.h_dtype)
# update count
updated_count = cnt_old + cnt_this
# update mean
new_mean = mean_old + diff_mean * (float(cnt_this) /
(cnt_old + cnt_this))
# update cov
if cnt_old == 0:
new_cov = cov_this
else:
# not first data
new_cov = (cov_old * (float(cnt_old) / updated_count)
+ cov_this * (float(cnt_this)/ updated_count)
+ (np.outer(diff_mean, diff_mean)
/ (float(cnt_this)/cnt_old
+ float(cnt_old)/cnt_this
+ 2.0)))
# done
return new_mean, new_cov, updated_count
except ValueError:
if data.ndim > 1:
if data.shape[1] != mean_old.shape[0] or \
data.shape[1] != cov_old.shape[0]:
nii_display.f_print("Dimension incompatible", "error")
nii_display.f_die("Error in online mean cov calculation")
else:
if mean_old.shape[0] != 1 or \
cov_old.shape[0] != 1:
nii_display.f_print("Dimension incompatible", "error")
nii_display.f_die("Error in online mean cov calculation")
def cov_tensor(X):
"""cov = cov_tensor(X)
compute covariance of tensor
input
-----
X: tensor, (batch, length, dim)
output
------
cov: tensor, (batch, dim, dim)
Note that this function returns biased cov
"""
X = data
D = X.shape[1]
mean = torch.mean(X, dim=1).unsqueeze(1)
X = X - mean
return 1/D * torch.matmul(X.transpose(-1, -2), X)
def f_online_mean_cov_tensor(data, mean_old, cov_old, cnt_old):
"""
mean, cov, count=f_online_mean_cov(data, mean, cov, num_count):
online algorithm to accumulate mean and cov
input
-----
data: tensor, in shape [batch, length, dimension]
mean: mean to be updated, tensor [batch, dimension]
cov: cov to be updated, tensor [batch, dimension, dimension]
num_count: how many data rows have been calculated before
this calling.
output
------
mean: mean, tensor [batch, dimension]
cov: cov, tensor [batch, dimension, dimension]
count: accumulated data number, = num_count + data.shape[0]
Note that the returned cov is biased.
Example:
data = torch.randn([2, 10, 5])
mean_start = torch.zeros([2, 5])
cov_start = torch.zeros([2, 5, 5])
cnt = 0
mean_old = mean_start
cov_old = cov_start
for idx in range(10):
new_mean, new_var, cnt = f_online_mean_cov_tensor(data[:, idx:idx+1, :],
mean_old, cov_old, cnt)
mean_old = new_mean
cov_old = new_var
Ref. parallel algorithm
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
"""
# how many time steps (number of rows) in this data
batch, cnt_this, dim = data.shape
# if input data is empty, don't update
if cnt_this == 0:
return mean_old, cov_old, cnt_old
# Mean and Cov for this data
# multiple dimension data, 2d array
mean_this = data.mean(dim=1)
# assumpe number of columns be the number of variables
if cnt_this == 1:
cov_this = torch.zeros(
[dim, dim], dtype=data.dtype, device=data.device)
else:
cov_this = cov(data.T)
# Update
# difference of accumulated mean and data mean
diff_mean = mean_this - mean_old
# new mean and cov
new_mean = torch.zeros([dim], dtype=data.dtype, device=data.device)
new_cov = torch.zeros([dim, dim], dtype=data.dtype, device=data.device)
# update count
updated_count = cnt_old + cnt_this
# update mean
new_mean = mean_old + diff_mean * (float(cnt_this) /
(cnt_old + cnt_this))
# update cov
if cnt_old == 0:
new_cov = cov_this
else:
# not first data
new_cov = (cov_old * (float(cnt_old) / updated_count)
+ cov_this * (float(cnt_this)/ updated_count)
+ (torch.bmm(diff_mean.unsqueeze(-1), diff_mean.unsqueeze(1))
/ (float(cnt_this)/cnt_old
+ float(cnt_old)/cnt_this
+ 2.0)))
# done
return new_mean, new_cov, updated_count
if __name__ == "__main__":
pass
| 10,262 | 30.194529 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/other_tools/script_model_para.py | #!/usr/bin/env python
"""
A simple wrapper to show parameter of the model
Usage:
# go to the model directory, then
$: python script_model_para.py
We assume model.py and config.py are available in the project directory.
"""
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import importlib
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def f_model_show(pt_model):
"""
f_model_show(pt_model)
Args: pt_model, a Pytorch model
Print the informaiton of the model
"""
#f_model_check(pt_model)
print(pt_model)
num = sum(p.numel() for p in pt_model.parameters() if p.requires_grad)
print("Parameter number: {:d}".format(num))
for name, p in pt_model.named_parameters():
if p.requires_grad:
print("Layer: {:s}\tPara. num: {:<10d} ({:02.1f}%)\tShape: {:s}"\
.format(name, p.numel(), p.numel()*100.0/num, str(p.shape)))
return
if __name__ == "__main__":
sys.path.insert(0, os.getcwd())
if len(sys.argv) == 3:
prj_model = importlib.import_module(sys.argv[1])
prj_conf = importlib.import_module(sys.argv[2])
else:
print("By default, load model.py and config.py")
prj_model = importlib.import_module("model")
prj_conf = importlib.import_module("config")
input_dims = sum(prj_conf.input_dims)
output_dims = sum(prj_conf.output_dims)
model = prj_model.Model(input_dims, output_dims, None)
f_model_show(model)
| 1,902 | 29.206349 | 78 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/other_tools/debug.py | #!/usr/bin/env python
"""
debug.py
Tools to help debugging
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import datetime
import numpy as np
import torch
from core_scripts.data_io import io_tools as nii_io
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def convert_data_for_debug(data):
""" data_new = convert_data_for_debug(data)
For debugging, it is convenient to has a data in numpy format
Args
----
data: tensor
Return
------
data_new: numpy array
"""
if hasattr(data, 'detach'):
return data.detach().to('cpu').numpy()
elif hasattr(data, 'cpu'):
return data.to('cpu').numpy()
elif hasattr(data, 'numpy'):
return data.numpy()
else:
return data
def qw(data, path=None):
""" write data tensor into a temporary buffer
Args
----
data: a pytorch tensor or numpy tensor
path: str, path to be write the data
if None, it will be "./debug/temp.bin"
Return
------
None
"""
if path is None:
path = 'debug/temp.bin'
try:
os.mkdir(os.path.dirname(path))
except OSError:
pass
# write to IO
nii_io.f_write_raw_mat(convert_data_for_debug(data), path)
return
def check_para(pt_model):
""" check_para(pt_model)
Quickly check the statistics on the parameters of the model
Args
----
pt_model: a Pytorch model defined based on torch.nn.Module
Return
------
None
"""
mean_buf = [p.mean() for p in pt_model.parameters() if p.requires_grad]
std_buf = [p.std() for p in pt_model.parameters() if p.requires_grad]
print(np.array([convert_data_for_debug(x) for x in mean_buf]))
print(np.array([convert_data_for_debug(x) for x in std_buf]))
return
class data_probe:
""" data_probe is defined to collect intermediate data
produced from the inference or training stage
"""
def __init__(self):
# a list to store all intermediate data
self.data_buf = []
# a list of data name
self.data_names = []
# a single array to store the data
self.data_concated = None
# default data convert method
self.data_convert_method = convert_data_for_debug
# default method to dump method
self.data_dump_method = nii_io.pickle_dump
# dump file name extension
self.dump_file_ext = '.pkl'
return
def add_data(self, input_data, name=None):
""" add_data(input_data)
Add the input data to a data list. Data will be automatically
converted by self.data_convert_method
input
-----
input_data: tensor, or numpy.array
"""
self.data_buf.append(self.data_convert_method(input_data))
if name:
self.data_names.append(name)
return
def _merge_data(self):
""" merge_data()
Merge the data in the list to a big numpy array table.
Follow the convention of this project, we assume data has shape
(batchsize, length, feat_dim)
"""
self.data_concated = np.concatenate(self.data_buf, axis=1)
return
def _dump_file_path(self, file_path, add_time_tag=True):
""" add additional infor to the ump file path
"""
if add_time_tag:
time_tag = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
return file_path + '_' + time_tag + self.dump_file_ext, \
file_path + '_name_' + time_tag + self.dump_file_ext
else:
return file_path + self.dump_file_ext, \
file_path + '_name_' + self.dump_file_ext, \
def dump(self, output_path='./debug/data_dump', add_time_tag=True):
""" dump(output_path='./debug/data_dump')
input
-----
output_path: str, path to store the dumped data
"""
# add additional infor to output_path name
output_path_new, output_file_lst = self._dump_file_path(
output_path, add_time_tag)
try:
os.mkdir(os.path.dirname(output_path_new))
os.mkdir(os.path.dirname(output_file_lst))
except OSError:
pass
## merge data if it has not been done
#if self.data_concated is None:
# self.merge_data()
#nii_io.f_write_raw_mat(self.data_concated, output_path_new)
self.data_dump_method(self.data_buf, output_path_new)
print("Data dumped to {:s}".format(output_path_new))
self.data_dump_method(self.data_names, output_file_lst)
print("Data dumped to {:s}".format(output_file_lst))
self.data_concated = None
return
if __name__ == '__main__':
print("Debugging tools")
| 4,915 | 27.581395 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/other_tools/log_parser.py | #!/usr/bin/env python
"""
log_parser
tools to parse log_train and log_err
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import os
import re
import sys
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def f_read_log_err(file_path):
"""
each line looks like
10753,LJ045-0082,0,9216,0, 22/12100, Time: 0.190877s, Loss: 85.994621, ...
"""
def parse_line(line_input):
line_tmps = line_input.split(',')
tmp_loss = []
for tmp in line_tmps:
if tmp.count('Time'):
tmp_time = float(tmp.lstrip(' Time:').rstrip('s'))
elif tmp.count('Loss'):
tmp_loss.append(float(tmp.lstrip(' Loss:')))
return tmp_time, tmp_loss
time_mat = []
error_mat = []
with open(file_path, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip()
if line.count("Loss"):
tmp_time, tmp_loss = parse_line(line)
time_mat.append(tmp_time)
error_mat.append(tmp_loss)
return np.array(error_mat), np.array(time_mat)
# This function is obsolete
def f_read_log_err_old(file_path, train_num, val_num):
"""
log_train, log_val = f_read_log_err(log_err, num_train_utt, num_val_utt)
input:
-----
log_err: path to the log_err file
num_train_utt: how many training utterances
num_val_utt: how many validation utterances
output:
------
log_train: np.array, average error values per epoch on training set
log_val: np.array, average error values per epoch on valiation set
"""
data_str = []
with open(file_path, 'r') as file_ptr:
for line in file_ptr:
if not line.count('skip'):
try:
tmp = int(line[0])
data_str.append(line)
except ValueError:
pass
row = len(data_str)
col = len(np.fromstring(data_str[0], dtype=np.float32, sep=','))
data = np.zeros([row,col])
for idx, line in enumerate(data_str):
data[idx, :] = np.fromstring(line, dtype=np.float32, sep=',')
print(data.shape[0])
total_num = train_num + val_num
epoch_num = int(data.shape[0] / total_num)
data_train = np.zeros([epoch_num, data.shape[1]])
data_val = np.zeros([epoch_num, data.shape[1]])
for x in range(epoch_num):
temp_data = data[x * total_num:(x+1)*total_num, :]
train_part = temp_data[0:train_num,:]
val_part = temp_data[train_num:(train_num+val_num),:]
data_train[x, :] = np.mean(train_part, axis=0)
data_val[x, :] = np.mean(val_part, axis=0)
return data_train, data_val
def pass_number(input_str):
return np.array([float(x) for x in input_str.split()]).sum()
def f_read_log_train(file_path, sep='/'):
"""
data_train, data_val, time_per_epoch = read_log_train(path_to_log_train)
input:
-----
path_to_log_train: path to the log_train file
output:
------
data_train: error values per epoch on training set
data_val: error values per epoch on valiation set
time_per_epoch: training time per epoch
"""
def parse_line(line_input, sep):
if sep == ' ':
return line_input.split()
else:
return line_input.split(sep)
read_flag = False
data_str = []
with open(file_path, 'r') as file_ptr:
for line in file_ptr:
if read_flag and line.count('|') > 2:
data_str.append(line)
if line.count('Duration'):
read_flag = True
row = len(data_str)
data_train = None
data_val = None
time_per_epoch = np.zeros(row)
for idx, line in enumerate(data_str):
try:
time_per_epoch[idx] = float(line.split('|')[1])
except ValueError:
continue
trn_data = parse_line(line.split('|')[2], sep)
val_data = parse_line(line.split('|')[3], sep)
if data_train is None or data_val is None:
data_train = np.zeros([row, len(trn_data)])
data_val = np.zeros([row, len(val_data)])
for idx2 in np.arange(len(trn_data)):
data_train[idx, idx2] = pass_number(trn_data[idx2])
data_val[idx,idx2] = pass_number(val_data[idx2])
return data_train, data_val, time_per_epoch
def read_log_err_pytorch(file_path, merge_epoch=False):
def set_size(line):
return int(line.split('/')[1].split(',')[0])
def data_line(line):
if line.count("Time:"):
return True
else:
return False
def get_data(line):
return [float(x.split(":")[1]) for x in line.split(',') if x.count("Loss:")]
trn_utt_num = None
val_utt_num = None
trn_total_num = 0
val_total_num = 0
with open(file_path, 'r') as file_ptr:
for line in file_ptr:
if not data_line(line):
continue
temp_num = set_size(line)
col_num = len(get_data(line))
if trn_utt_num is None:
trn_utt_num = temp_num
if temp_num != val_utt_num and temp_num != trn_utt_num:
val_utt_num = temp_num
if trn_utt_num == temp_num:
trn_total_num += 1
if val_utt_num == temp_num:
val_total_num += 1
if trn_utt_num is None:
print("Cannot parse file")
return
if val_utt_num is None:
print("Trn %d, no val" % (trn_utt_num))
else:
print("Trn %d, val %d" % (trn_utt_num, val_utt_num))
print("Trn data %d, val data %d" % (trn_total_num, val_total_num))
trn_data = np.zeros([trn_total_num, col_num])
val_data = np.zeros([val_total_num, col_num])
trn_utt_cnt = 0
val_utt_cnt = 0
with open(file_path, 'r') as file_ptr:
for line in file_ptr:
if not data_line(line):
continue
data = get_data(line)
temp_num = set_size(line)
if trn_utt_num == temp_num:
trn_data[trn_utt_cnt, :] = np.array(data)
trn_utt_cnt += 1
if val_utt_num == temp_num:
val_data[val_utt_cnt, :] = np.array(data)
val_utt_cnt += 1
if merge_epoch:
trn_data_new = np.zeros([trn_total_num // trn_utt_num, col_num])
val_data_new = np.zeros([val_total_num // val_utt_num, col_num])
for idx in range(min([trn_total_num // trn_utt_num, val_total_num // val_utt_num])):
trn_data_new[idx, :] = trn_data[idx*trn_utt_num:(idx+1)*trn_utt_num, :].mean(axis=0)
val_data_new[idx, :] = val_data[idx*val_utt_num:(idx+1)*val_utt_num, :].mean(axis=0)
return trn_data_new, val_data_new
else:
return trn_data, val_data
if __name__ == "__main__":
print("logParser")
| 7,064 | 31.113636 | 96 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/data_io/wav_tools.py | #!/usr/bin/env python
"""
data_io
Interface to process waveforms.
Note that functions here are based on numpy, and they are intended to be used
before data are converted into torch tensors.
data on disk -> DataSet.__getitem__() -----> Collate ----> Pytorch model
numpy.tensor torch.tensor
These functions don't work on pytorch tensors
"""
from __future__ import absolute_import
import os
import sys
import numpy as np
import wave
import scipy.io.wavfile
try:
import soundfile
except ModuleNotFoundError:
pass
import core_scripts.data_io.io_tools as nii_io_tk
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
def mulaw_encode(x, quantization_channels, scale_to_int=True):
"""x_mu = mulaw_encode(x, quantization_channels, scale_to_int=True)
mu-law companding
input
-----
x: np.array, float-valued waveforms in (-1, 1)
quantization_channels (int): Number of channels
scale_to_int: Bool
True: scale mu-law to int
False: return mu-law in (-1, 1)
output
------
x_mu: np.array, mulaw companded wave
"""
mu = quantization_channels - 1.0
x_mu = np.sign(x) * np.log1p(mu * np.abs(x)) / np.log1p(mu)
if scale_to_int:
x_mu = np.array((x_mu + 1) / 2 * mu + 0.5, dtype=np.int32)
return x_mu
def mulaw_decode(x_mu, quantization_channels, input_int=True):
"""mulaw_decode(x_mu, quantization_channels, input_int=True)
mu-law decoding
input
-----
x_mu: np.array, mu-law waveform
quantization_channels: int, Number of channels
input_int: Bool
True: convert x_mu (int) from int to float, before mu-law decode
False: directly decode x_mu (float)
output
------
x: np.array, waveform from mulaw decoding
"""
mu = quantization_channels - 1.0
if input_int:
x = x_mu / mu * 2 - 1.0
else:
x = x_mu
x = np.sign(x) * (np.exp(np.abs(x) * np.log1p(mu)) - 1.0) / mu
return x
def alaw_encode(x, quantization_channels, scale_to_int=True, A=87.6):
"""x_a = alaw_encoder(x, quantization_channels, scale_to_int=True, A=87.6)
input
-----
x: np.array, float-valued waveforms in (-1, 1)
quantization_channels (int): Number of channels
scale_to_int: Bool
True: scale mu-law to int
False: return mu-law in (-1, 1)
A: float, parameter for a-law, default 87.6
output
------
x_a: np.array, a-law companded waveform
"""
num = quantization_channels - 1.0
x_abs = np.abs(x)
flag = (x_abs * A) >= 1
x_a = A * x_abs
x_a[flag] = 1 + np.log(x_a[flag])
x_a = np.sign(x) * x_a / (1 + np.log(A))
if scale_to_int:
x_a = np.array((x_a + 1) / 2 * num + 0.5, dtype=np.int32)
return x_a
def alaw_decode(x_a, quantization_channels, input_int=True, A=87.6):
"""alaw_decode(x_a, quantization_channels, input_int=True)
input
-----
x_a: np.array, mu-law waveform
quantization_channels: int, Number of channels
input_int: Bool
True: convert x_mu (int) from int to float, before mu-law decode
False: directly decode x_mu (float)
A: float, parameter for a-law, default 87.6
output
------
x: np.array, waveform
"""
num = quantization_channels - 1.0
if input_int:
x = x_a / num * 2 - 1.0
else:
x = x_a
sign = np.sign(x)
x_a_abs = np.abs(x)
x = x_a_abs * (1 + np.log(A))
flag = x >= 1
x[flag] = np.exp(x[flag] - 1)
x = sign * x / A
return x
def waveReadAsFloat(wavFileIn):
""" sr, wavData = wavReadToFloat(wavFileIn)
Wrapper over scipy.io.wavfile
Return:
sr: sampling_rate
wavData: waveform in np.float32 (-1, 1)
"""
sr, wavdata = scipy.io.wavfile.read(wavFileIn)
if wavdata.dtype is np.dtype(np.int16):
wavdata = np.array(wavdata, dtype=np.float32) / \
np.power(2.0, 16-1)
elif wavdata.dtype is np.dtype(np.int32):
wavdata = np.array(wavdata, dtype=np.float32) / \
np.power(2.0, 32-1)
elif wavdata.dtype is np.dtype(np.float32):
pass
else:
print("Unknown waveform format %s" % (wavFileIn))
sys.exit(1)
return sr, wavdata
def waveFloatToPCMFile(waveData, wavFile, bit=16, sr=16000):
"""waveSaveFromFloat(waveData, wavFile, bit=16, sr=16000)
Save waveData (np.float32) as PCM *.wav
Args:
waveData: waveform data as np.float32
wavFile: output PCM waveform file
bit: PCM bits
sr: sampling rate
"""
# recover to 16bit range [-32768, +32767]
rawData = waveData * np.power(2.0, bit-1)
rawData[rawData >= np.power(2.0, bit-1)] = np.power(2.0, bit-1)-1
rawData[rawData < -1*np.power(2.0, bit-1)] = -1*np.power(2.0, bit-1)
# write as signed 16bit PCM
if bit == 16:
rawData = np.asarray(rawData, dtype=np.int16)
elif bit == 32:
rawData = np.asarray(rawData, dtype=np.int32)
else:
print("Only be able to save wav in int16 and int32 type")
print("Save to int16")
rawData = np.asarray(rawData, dtype=np.int16)
scipy.io.wavfile.write(wavFile, sr, rawData)
return
def flacReadAsFloat(wavFileIn):
""" sr, wavData = flacReadAsFloat(wavFileIn)
Wrapper over soundfile.read
Return:
sr: sampling_rate
wavData: waveform in np.float32 (-1, 1)
"""
if 'soundfile' in sys.modules:
x, sr = soundfile.read(wavFileIn)
else:
print("soundfile is not installed.")
print("Due to practical reason, soundfile is not included in env.yml")
print("To install soundfile with support to flac, try:")
print(" conda install libsndfile=1.0.31 -c conda-forge")
print(" conda install pysoundfile -c conda-forge")
exit(1)
return sr, x
def readWaveLength(wavFileIn):
""" length = readWaveLength(wavFileIn)
Read the length of the waveform
Input:
waveFile, str, path to the input waveform
Return:
length, int, length of waveform
"""
with wave.open(wavFileIn, 'rb') as file_ptr:
wavlength = file_ptr.getnframes()
return wavlength
def buffering(x, n, p=0, opt=None):
"""buffering(x, n, p=0, opt=None)
input
-----
x: np.array, input signal, (length, )
n: int, window length
p: int, overlap, not frame shift
outpupt
-------
output: np.array, framed buffer, (frame_num, frame_length)
Example
-------
framed = buffer(wav, 320, 80, 'nodelay')
Code from https://stackoverflow.com/questions/38453249/
"""
if opt not in ('nodelay', None):
raise ValueError('{} not implemented'.format(opt))
i = 0
if opt == 'nodelay':
# No zeros at array start
result = x[:n]
i = n
else:
# Start with `p` zeros
result = np.hstack([np.zeros(p), x[:n-p]])
i = n-p
# Make 2D array, cast to list for .append()
result = list(np.expand_dims(result, axis=0))
while i < len(x):
# Create next column, add `p` results from last col if given
col = x[i:i+(n-p)]
if p != 0:
col = np.hstack([result[-1][-p:], col])
# Append zeros if last row and not length `n`
if len(col):
col = np.hstack([col, np.zeros(n - len(col))])
# Combine result with next row
result.append(np.array(col))
i += (n - p)
return np.vstack(result).astype(x.dtype)
def windowing(framed_buffer, window_type='hanning'):
"""windowing(framed_buffer, window_type='hanning')
input
-----
framed_buffer: np.array, (frame_num, frame_length), output of buffering
window_type: str, default 'hanning'
"""
if window_type == 'hanning':
window = np.hanning(framed_buffer.shape[1])
else:
assert False, "Unknown window type in windowing"
return framed_buffer * window.astype(framed_buffer.dtype)
def silence_handler(wav, sr, fl=320, fs=80,
max_thres_below=30,
min_thres=-55,
shortest_len_in_ms=50,
flag_output=0,
flag_norm_amp=True,
flag_only_startend_sil = False,
opt_silence_handler = -1):
"""silence_handler(wav, sr, fl=320, fs=80,
max_thres_below=30,
min_thres=-55,
shortest_len_in_ms=50,
flag_output=0,
flag_norm_amp=True,
flag_only_startend_sil = False,
opt_silence_handler = 1)
Based on the Speech activity detector mentioned in Sec5.1 of
Tomi Kinnunen, and Haizhou Li.
An Overview of Text-Independent Speaker Recognition: From Features to
Supervectors. Speech Communication 52 (1).
Elsevier: 12–40. doi:10.1016/j.specom.2009.08.009. 2010.
input
-----
wav: np.array, (wav_length, ), wavform data
sr: int, sampling rate
fl: int, frame length, default 320
fs: int, frame shift, in number of waveform poings, default 80
flag_output: int, flag to select output
0: return wav_no_sil, sil_wav, time_tag
1: return wav_no_sil
2: return sil_wav
max_thres_below: int, default 30, max_enenergy - max_thres_below
is the lower threshold for speech frame
min_thres: int, default -55, the lower threshold for speech frame
shortest_len_in_ms: int, ms, default 50 ms,
segment less than this length is treated as speech
flag_norm_amp: bool, whether normalize the waveform amplitude
based on window function (default True)
flag_only_startend_sil (obsolete): bool, whether only consider silence in
the begining and end. If False, silence within the utterance
will be marked / removed (default False)
opt_silence_handler: int, option to silence trim handler
0: equivalent to flag_only_startend_sil = False
1: equivalent to flag_only_startend_sil = True
2: remove only silence between words
-1: not use this option, but follow flag_only_startend_sil
output
------
wav_no_sil: np.array, (length_1, ), waveform after removing silence
sil_wav: np.array, (length_2, ), waveform in silence regions
frame_tag: np.array, [0, 0, 0, 1, 1, ..., 1, 0, ], where 0 indicates
silence frame, and 1 indicates a non-silence frame
Note: output depends on flag_output
"""
assert fs < fl, "Frame shift should be smaller than frame length"
# frame the singal
frames = buffering(wav, fl, fl - fs, 'nodelay')
# apply window to each frame
windowed_frames = windowing(frames)
# buffer to save window prototype, this is used to normalize the amplitude
window_proto = windowing(np.ones_like(frames))
# compute the frame energy and assign a sil/nonsil flag
frame_energy = 20*np.log10(np.std(frames, axis=1)+np.finfo(np.float32).eps)
frame_energy_max = np.max(frame_energy)
frame_tag = np.bitwise_and(
(frame_energy > (frame_energy_max - max_thres_below)),
frame_energy > min_thres)
frame_tag = np.asarray(frame_tag, dtype=np.int)
# post filtering of the sil/nonsil flag sequence
seg_len_thres = shortest_len_in_ms * sr / 1000 / fs
# function to ignore short segments
def ignore_short_seg(frame_tag, seg_len_thres):
frame_tag_new = np.zeros_like(frame_tag) + frame_tag
# boundary of each segment
seg_bound = np.diff(np.concatenate(([0], frame_tag, [0])))
# start of each segment
seg_start = np.argwhere(seg_bound == 1)[:, 0]
# end of each segment
seg_end = np.argwhere(seg_bound == -1)[:, 0]
assert seg_start.shape[0] == seg_end.shape[0], \
"Fail to extract segment boundaries"
# length of segment
seg_len = seg_end - seg_start
seg_short_ids = np.argwhere(seg_len < seg_len_thres)[:, 0]
for idx in seg_short_ids:
start_frame_idx = seg_start[idx]
end_frame_idx = seg_end[idx]
frame_tag_new[start_frame_idx:end_frame_idx] = 0
return frame_tag_new
# remove short sil segments
# 1-frame_tag indicates non-speech frames
frame_process_sil = ignore_short_seg(1-frame_tag, seg_len_thres)
# reverse the sign
frame_process_sil = 1 - frame_process_sil
# remove short nonsil segments
frame_process_all = ignore_short_seg(frame_process_sil, seg_len_thres)
frame_tag = frame_process_all
if opt_silence_handler < 0:
# if only consder silence in the front and end
if flag_only_startend_sil:
tmp_nonzero = np.flatnonzero(frame_tag)
# start of the first nonsil segment
#start_nonsil = np.asarray(frame_tag == 1).nonzero()[0]
if np.any(tmp_nonzero):
start_nonsil = np.flatnonzero(frame_tag)[0]
# end of the last nonsil segment
end_nonsil = np.flatnonzero(frame_tag)[-1]
# all segments between are switched to nonsil
frame_tag[start_nonsil:end_nonsil] = 1
else:
# no non-silence data, just let it pass
pass
elif opt_silence_handler == 1:
# if only consder silence in the front and end
tmp_nonzero = np.flatnonzero(frame_tag)
# start of the first nonsil segment
#start_nonsil = np.asarray(frame_tag == 1).nonzero()[0]
if np.any(tmp_nonzero):
start_nonsil = np.flatnonzero(frame_tag)[0]
# end of the last nonsil segment
end_nonsil = np.flatnonzero(frame_tag)[-1]
# all segments between are switched to nonsil
frame_tag[start_nonsil:end_nonsil] = 1
else:
# no non-silence data, just let it pass
pass
elif opt_silence_handler == 2:
# if only consder silence in the front and end
tmp_nonzero = np.flatnonzero(frame_tag)
# start of the first nonsil segment
#start_nonsil = np.asarray(frame_tag == 1).nonzero()[0]
if np.any(tmp_nonzero):
start_nonsil = np.flatnonzero(frame_tag)[0]
# end of the last nonsil segment
end_nonsil = np.flatnonzero(frame_tag)[-1]
# all segments between are switched to nonsil
frame_tag[:start_nonsil] = 1
frame_tag[end_nonsil:] = 1
else:
# no non-silence data, just let it pass
pass
else:
pass
# separate non-speech and speech segments
# do overlap and add
# buffer for speech segments
spe_buf = np.zeros([np.sum(frame_tag) * fs + fl], dtype=wav.dtype)
spe_buf_win = np.zeros([np.sum(frame_tag) * fs + fl], dtype=wav.dtype)
# buffer for non-speech segments
sil_buf = np.zeros([np.sum(1-frame_tag) * fs + fl], dtype=wav.dtype)
sil_buf_win = np.zeros([np.sum(1-frame_tag) * fs + fl], dtype=wav.dtype)
spe_fr_pt = 0
non_fr_pt = 0
for frame_idx, flag_speech in enumerate(frame_tag):
if flag_speech:
spe_buf[spe_fr_pt*fs:spe_fr_pt*fs+fl] += windowed_frames[frame_idx]
spe_buf_win[spe_fr_pt*fs:spe_fr_pt*fs+fl] += window_proto[frame_idx]
spe_fr_pt += 1
else:
sil_buf[non_fr_pt*fs:non_fr_pt*fs+fl] += windowed_frames[frame_idx]
sil_buf_win[non_fr_pt*fs:non_fr_pt*fs+fl] += window_proto[frame_idx]
non_fr_pt += 1
# normalize the amplitude if necessary
if flag_norm_amp:
spe_buf_win[spe_buf_win < 0.0001] = 1.0
sil_buf_win[sil_buf_win < 0.0001] = 1.0
spe_buf /= spe_buf_win
sil_buf /= sil_buf_win
if flag_output == 1:
return spe_buf
elif flag_output == 2:
return sil_buf
else:
return spe_buf, sil_buf, frame_tag
###################
# wrapper functions
###################
def silence_handler_wrapper(wav, sr, fl=320, fs=80,
max_thres_below=30,
min_thres=-55,
shortest_len_in_ms=50,
flag_output=0,
flag_norm_amp=True,
flag_only_startend_sil=False):
"""Wrapper over silence_handler
Many APIs used in this project assume (length, 1) shape.
Thus, this API is a wrapper to accept (length, 1) and output (length, 1)
See more on silence_handler
"""
output = silence_handler(
wav[:, 0], sr, fl, fs, max_thres_below,
min_thres, shortest_len_in_ms,
flag_output, flag_norm_amp, flag_only_startend_sil)
if flag_output == 1:
# from (length) to (length, 1)
return np.expand_dims(output, axis=1)
elif flag_output == 2:
return np.expand_dims(output, axis=1)
else:
return np.expand_dims(output[0], axis=1), \
np.expand_dims(output[1], axis=1), \
output[2]
###################
# Other tools
###################
def wav_get_amplitude(waveform, method='max'):
"""
input
-----
wavform: np.array, (length, 1)
method: str,
'max': compute np.max(np.abs(waveform))
'mean': compute np.mean(np.abs(waveform))
output
------
amp: np.array (1)
"""
if method == 'max':
return np.max(np.abs(waveform))
else:
return np.mean(np.abs(waveform))
def wav_norm_amplitude(waveform, method='max', floor=1e-12):
"""
input
-----
wavform: np.array, (length, 1)
method: str,
'max': compute np.max(np.abs(waveform))
'mean': compute np.mean(np.abs(waveform))
output
------
amp: np.array (1)
"""
amp = wav_get_amplitude(waveform, method=method)
amp = amp + floor if amp < floor else amp
return waveform / amp
def wav_scale_amplitude_to(waveform, amp, method = 'max'):
"""
input
-----
wavform: np.array, (length, 1)
get_amp_method: str,
'max': compute np.max(np.abs(wavform))
'mean': compute np.mean(np.abs(wavform))
output
------
waveform: np.array, (length, 1)
"""
return wav_norm_amplitude(waveform, method=method) * amp
###################
# legacy functions
###################
def wavformRaw2MuLaw(wavdata, bit=16, signed=True, quanLevel = 256.0):
"""
wavConverted = wavformRaw2MuLaw(wavdata, bit=16, signed=True, \
quanLevel = 256.0)
Assume wavData is int type:
step1. convert int wav -> float wav
step2. convert linear scale wav -> mu-law wav
Args:
wavdata: np array of int-16 or int-32 waveform
bit: number of bits to encode waveform
signed: input is signed or not
quanLevel: level of quantization (default 2 ^ 8)
Returned:
wav: integer stored as float numbers
"""
if wavdata.dtype != np.int16 and wavdata.dtype != np.int32:
print("Input waveform data in not int16 or int32")
sys.exit(1)
# convert to float numbers
if signed==True:
wavdata = np.array(wavdata, dtype=np.float32) / \
np.power(2.0, bit-1)
else:
wavdata = np.array(wavdata, dtype=np.float32) / \
np.power(2.0, bit)
tmp_quan_level = quanLevel - 1
# mu-law compansion
wavtrans = np.sign(wavdata) * \
np.log(1.0 + tmp_quan_level * np.abs(wavdata)) / \
np.log(1.0 + tmp_quan_level)
wavtrans = np.round((wavtrans + 1.0) * tmp_quan_level / 2.0)
return wavtrans
def wavformMuLaw2Raw(wavdata, quanLevel = 256.0):
"""
waveformMuLaw2Raw(wavdata, quanLevel = 256.0)
Convert Mu-law waveform back to raw waveform
Args:
wavdata: np array
quanLevel: level of quantization (default: 2 ^ 8)
Return:
raw waveform: np array, float
"""
tmp_quan_level = quanLevel - 1
wavdata = wavdata * 2.0 / tmp_quan_level - 1.0
wavdata = np.sign(wavdata) * (1.0/ tmp_quan_level) * \
(np.power(quanLevel, np.abs(wavdata)) - 1.0)
return wavdata
def float2wav(rawData, wavFile, bit=16, samplingRate = 16000):
"""
float2wav(rawFile, wavFile, bit=16, samplingRate = 16000)
Convert float waveform into waveform in int
This is identitcal to waveFloatToPCMFile
To be removed
Args:
rawdata: float waveform data in np-arrary
wavFile: output file path
bit: number of bits to encode waveform in output *.wav
samplingrate:
"""
rawData = rawData * np.power(2.0, bit-1)
rawData[rawData >= np.power(2.0, bit-1)] = np.power(2.0, bit-1)-1
rawData[rawData < -1*np.power(2.0, bit-1)] = -1*np.power(2.0, bit-1)
# write as signed 16bit PCM
if bit == 16:
rawData = np.asarray(rawData, dtype=np.int16)
elif bit == 32:
rawData = np.asarray(rawData, dtype=np.int32)
else:
print("Only be able to save wav in int16 and int32 type")
print("Save to int16")
rawData = np.asarray(rawData, dtype=np.int16)
scipy.io.wavfile.write(wavFile, samplingRate, rawData)
return
#################################
# Other utilities based on Numpy
#################################
def f_overlap_cat(data_list, overlap_length):
"""Wrapper for overlap and concatenate
input:
-----
data_list: list of np.array, [(length1, dim), (length2, dim)]
output
------
data: np.array, (length1 + length2 ... - overlap_length * N, dim)
"""
data_dtype = data_list[0].dtype
if data_list[0].ndim == 1:
dim = 1
else:
dim = data_list[0].shape[1]
total_length = sum([x.shape[0] for x in data_list])
data_gen = np.zeros([total_length, dim], dtype=data_dtype)
prev_end = 0
for idx, data_trunc in enumerate(data_list):
tmp_len = data_trunc.shape[0]
if data_trunc.ndim == 1:
data_tmp = np.expand_dims(data_trunc, 1)
else:
data_tmp = data_trunc
if idx == 0:
data_gen[0:tmp_len] = data_tmp
prev_end = tmp_len
else:
win_len = min([prev_end, overlap_length, tmp_len])
win_cof = np.arange(0, win_len)/win_len
win_cof = np.expand_dims(win_cof, 1)
data_gen[prev_end - win_len:prev_end] *= 1.0 - win_cof
data_tmp[:win_len] *= win_cof
data_gen[prev_end-win_len:prev_end-win_len+tmp_len] += data_tmp
prev_end = prev_end-win_len+tmp_len
return data_gen[0:prev_end]
if __name__ == "__main__":
print("Definition of tools for wav")
| 23,187 | 31.340307 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/data_io/wav_augmentation.py | #!/usr/bin/env python
"""
Functions for waveform augmentation.
Note that
1. functions here are based on numpy, and they are intended to be used
before data are converted into torch tensors.
data on disk -> DataSet.__getitem__() -----> Collate ----> Pytorch model
numpy.tensor torch.tensor
These functions don't work on pytorch tensors
2. RawBoost functions are based on those by H.Tak and M.Todisco
See code here
https://github.com/TakHemlata/RawBoost-antispoofing
Hemlata Tak, Madhu R Kamble, Jose Patino, Massimiliano Todisco, and
Nicholas W D Evans. RawBoost: A Raw Data Boosting and Augmentation Method
Applied to Automatic Speaker Verification Anti-Spoofing. Proc. ICASSP. 2022
"""
from __future__ import absolute_import
import os
import sys
import copy
import numpy as np
from scipy import signal
from pathlib import Path
try:
from pydub import AudioSegment
except ModuleNotFoundError:
pass
import core_scripts.data_io.wav_tools as nii_wav_tools
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
################
# Tool
################
def unify_length_shape(data, target_data):
""" output = unify_length_shape(data, target_data)
input
-----
data: np.array, either (L), or (L, 1)
target_data: np.array, either (L) or (L, 1)
output
------
output: np.array that has same shape as target_data
"""
output_buf = np.zeros_like(target_data)
min_len = min([data.shape[0], target_data.shape[0]])
if data.ndim == target_data.ndim:
output_buf[:min_len] = data[:min_len]
elif data.ndim == 1:
output_buf[:min_len, 0] = data[:min_len]
elif data.ndim == 2:
output_buf[:min_len] = data[:min_len, 0]
else:
print("Implementation error in unify_length_shape")
sys.exit(1)
return output_buf
################
# Time domain
################
def wav_rand_sil_trim(wav,
sr,
random_trim_sil = False,
random_trim_nosil = False):
""" output = wav_rand_sil_trim(
wav, sr,
random_trim_sil = False,
random_trim_nosil = False)
Randomly trim the leading and ending silence.
input
-----
wav: np.array, (length, 1)
sr: int, waveform sampling rate
random_trim_sil: bool, randomly trim silence (default False)
random_trim_nosil: bool, randomly trim no-silence segments
output
------
output: np.array, (length, 1)
start_idx: int, starting time index in the input wav
end_idx: int, ending time index in the input wav
output <- wav[start_idx:end_idx]
"""
# original length
orig_len = wav.shape[0]
# frame shift for silence detection, fixed here
fs=80
# get the time flag for silence region
_, _, frame_tag = nii_wav_tools.silence_handler_wrapper(
wav, sr, fs=fs, flag_only_startend_sil = True)
# get the ending time of the leading silence
# get the starting time of the trailing silence
#
if len(np.flatnonzero(frame_tag)):
# start and end position
start_nonsil = np.flatnonzero(frame_tag)[0] * fs
end_nonsil = np.flatnonzero(frame_tag)[-1] * fs
else:
# no silence, use the original entire data
start_nonsil = 0
end_nonsil = wav.shape[0]
# if further randomly trim,
if random_trim_sil:
prob = np.random.rand()
start_nosil_new = int(start_nonsil * prob)
end_nosil_new = int((orig_len - end_nonsil)*prob) + end_nonsil
else:
start_nosil_new = start_nonsil
end_nosil_new = end_nonsil
# get the non-silence region
if start_nosil_new < end_nosil_new and start_nosil_new > 0:
input_new = wav[start_nosil_new:end_nosil_new]
else:
input_new = wav
return input_new, start_nosil_new, end_nosil_new
def wav_time_mask(input_data, wav_samp_rate):
""" output = wav_time_mask(input_data, wav_samp_rate)
Apply time mask and zero-out segments
input
-----
input_data: np.array, (length, 1)
wav_samp_rate: int, waveform sampling rate
output
------
output: np.array, (length, 1)
"""
# choose the codec
seg_width = int(np.random.rand() * 0.2 * wav_samp_rate)
start_idx = int(np.random.rand() * (input_data.shape[0] - seg_width))
if start_idx < 0:
start_idx = 0
if (start_idx + seg_width) > input_data.shape[0]:
seg_width = input_data.shape[0] - start_idx
tmp = np.ones_like(input_data)
tmp[start_idx:start_idx+seg_width] = 0
return input_data * tmp
def batch_siltrim_for_multiview(input_data_batch, wav_samp_rate,
random_trim_sil=False,
random_trim_nosil=False):
""" output = batch_trim(input_data, wav_samp_rate)
For multi-view data, trim silence
input
-----
input_data: list of np.array, (length, 1)
wav_samp_rate: int, waveform sampling rate
output
------
output: list of np.array, (length, 1)
"""
# output buffer
output_data_batch = []
# original length
orig_len = input_data_batch[0].shape[0]
# get the starting and ending of non-silence region
# (computed based on the first wave in the list)
_, start_time, end_time = wav_rand_sil_trim(
input_data_batch[0], wav_samp_rate,
random_trim_sil, random_trim_nosil)
# do trimming on all waveforms in the input list
if start_time < end_time and start_time > 0:
for data in input_data_batch:
output_data_batch.append(data[start_time:end_time])
else:
for data in input_data_batch:
output_data_batch.append(data)
return output_data_batch
def batch_pad_for_multiview(input_data_batch_, wav_samp_rate, length,
random_trim_nosil=False, repeat_pad=False):
""" output = batch_pad_for_multiview(
input_data_batch, wav_samp_rate, length, random_trim_nosil=False)
If input_data_batch is a single trial, trim it to a fixed length
For multi-view data, trim all the trials to a fixed length, using the same
random start and end
input
-----
input_data: list of np.array, (length, 1)
wav_samp_rate: int, waveform sampling rate
output
------
output: list of np.array, (length, 1)
"""
# unify the length of input data before further processing
def _ad_length(x, length, repeat_pad):
# adjust the length of the input x
if length > x.shape[0]:
if repeat_pad:
rt = int(length / x.shape[0]) + 1
tmp = np.tile(x, (rt, 1))[0:length]
else:
tmp = np.zeros([length, 1])
tmp[0:x.shape[0]] = x
else:
tmp = x[0:length]
return tmp
# use the first data in the list
firstlen = input_data_batch_[0].shape[0]
input_data_batch = [_ad_length(x, firstlen, repeat_pad) \
for x in input_data_batch_]
#
new_len = input_data_batch[0].shape[0]
if repeat_pad is False:
# if we simply trim longer sentence but not pad shorter sentence
if new_len < length:
start_len = 0
end_len = new_len
elif random_trim_nosil:
start_len = int(np.random.rand() * (new_len - length))
end_len = start_len + length
else:
start_len = 0
end_len = length
input_data_batch_ = input_data_batch
else:
if new_len < length:
start_len = 0
end_len = length
rt = int(length / new_len) + 1
# repeat multiple times
input_data_batch_ = [np.tile(x, (rt, 1)) for x in input_data_batch]
elif random_trim_nosil:
start_len = int(np.random.rand() * (new_len - length))
end_len = start_len + length
input_data_batch_ = input_data_batch
else:
start_len = 0
end_len = length
input_data_batch_ = input_data_batch
output_data_batch = [x[start_len:end_len] for x in input_data_batch_]
return output_data_batch
##################
# Frequency domain
##################
def wav_freq_mask_fixed(input_data, wav_samp_rate, start_b, end_b):
""" output = wav_freq_mask_fixed(input_data, wav_samp_rate, start_b, end_b)
Mask the frequency range, fixed
input
-----
input_data: np.array, (length, 1)
wav_samp_rate: int, waveform sampling rate
start_b: float
end_b: float
output
------
output: np.array, (length, 1)
"""
# order of the filder, fixed to be 10
# change it to a random number later
filter_order = 10
if start_b < 0.01:
sos = signal.butter(filter_order, end_b, 'highpass', output='sos')
elif end_b > 0.99:
sos = signal.butter(filter_order, start_b, 'lowpass',output='sos')
else:
sos = signal.butter(
filter_order, [start_b, end_b], 'bandstop', output='sos')
filtered = signal.sosfilt(sos, input_data[:, 0])
# change dimension
output = np.expand_dims(filtered, axis=1)
return output
def wav_freq_mask(input_data, wav_samp_rate):
""" output = wav_freq_mask(input_data, wav_samp_rate)
Randomly mask the signal in frequency domain
input
-----
input_data: np.array, (length, 1)
wav_samp_rate: int, waveform sampling rate
output
------
output: np.array, (length, 1)
"""
# order of the filder, fixed to be 10
# change it to a random number later
filter_order = 10
# maximum value of the bandwidth for frequency masking
max_band_witdh = 0.2
# actual band_w
band_w = np.random.rand() * max_band_witdh
if band_w < 0.05:
# if the bandwidth is too small, do no masking
output = input_data
else:
# start
start_b = np.random.rand() * (1 - band_w)
# end
end_b = start_b + band_w
output = wav_freq_mask_fixed(input_data, wav_samp_rate, start_b, end_b)
return output
##################
# Compression codec
##################
def wav_codec(input_dat, wav_samp_rate):
""" A wrapper to use pyDub and ffmpeg
This requires pyDub and ffmpeg installed.
"""
tmpdir = '/tmp/xwtemp'
if not os.path.isdir(tmpdir):
os.mkdir(tmpdir)
randomname = "{:s}/{:010d}".format(tmpdir, np.random.randint(100000))
while os.path.isfile(randomname + '.empty'):
randomname = "{:s}/{:010d}".format(tmpdir, np.random.randint(100000))
Path(randomname + '.empty').touch()
# write to file (16bit PCM)
nii_wav_tools.waveFloatToPCMFile(input_dat[:, 0], randomname + '.wav')
data = AudioSegment.from_wav(randomname + '.wav')
# choose the codec
rand_codec = np.random.randint(2)
if rand_codec == 0:
# mp3
rand_bitrate = np.random.randint(6)
if rand_bitrate == 0:
rate = '16k'
elif rand_bitrate == 1:
rate = '32k'
elif rand_bitrate == 2:
rate = '64k'
elif rand_bitrate == 3:
rate = '128k'
elif rand_bitrate == 4:
rate = '256k'
else:
rate = '320k'
data.export(randomname + '.tmp', format='mp3',
codec='libmp3lame', bitrate=rate)
data_codec = AudioSegment.from_mp3(randomname + '.tmp')
output_dat = np.array(data_codec.get_array_of_samples() /
np.power(2, 16-1))
# clean
#os.remove(randomname + '.wav')
try:
os.remove(randomname + '.tmp')
except FileNotFoundError:
pass
#os.remove(randomname + '.empty')
else:
# opus ogg
rand_bitrate = np.random.randint(5)
if rand_bitrate == 0:
rate = '16k'
elif rand_bitrate == 1:
rate = '32k'
elif rand_bitrate == 2:
rate = '64k'
elif rand_bitrate == 3:
rate = '128k'
else:
rate = '256k'
data.export(randomname + '.tmp', format='opus',
bitrate = rate, codec='libopus')
data_codec = AudioSegment.from_file(
randomname + '.tmp', format='ogg', codec='libopus')
data_codec = data_codec.set_frame_rate(16000)
output_dat = np.array(data_codec.get_array_of_samples() /
np.power(2, 16-1))
# clean
#os.remove(randomname + '.wav')
try:
os.remove(randomname + '.tmp')
except FileNotFoundError:
pass
#os.remove(randomname + '.empty')
try:
os.remove(randomname + '.wav')
os.remove(randomname + '.empty')
except FileNotFoundError:
pass
if output_dat.shape[0] != input_dat.shape[0]:
output_dat_ = np.zeros_like(input_dat[:, 0])
minlen = min([output_dat.shape[0], input_dat.shape[0]])
output_dat_[0:minlen] = output_dat_[0:minlen]
output_dat = output_dat_
return np.expand_dims(output_dat, axis=1)
##################
# Waveform morphing
##################
def morph_wavform(wav1, wav2, para=0.5, method=2,
fl = 320, fs = 160, nfft = 1024):
""" output = morph_waveform(wav1, wav2, method=2, para=0.5
fl = 320, fs = 160, nfft = 1024)
input
-----
wav1: np.array, (L,) or (L,1), input waveform 1
wav2: np.array, (L,) or (L,1), input waveform 2
para: float, coefficient for morphing
method: int,
method = 1, waveform level morphing
output = wav1 * para + wav2 * (1-para)
method = 2, spec amplitude morphing
amplitude = wav1_amp * para + wav2_amp * (1-para)
phase is from wav2
method = 3, phase morphing
...
amplitude is from wav1
method = 4, both spec and phase
fl: int, frame length for STFT analysis when method > 1
fs: int, frame shift for STFT analysis when method > 1
nfft: int, fft points for STFT analysis when method > 1
output
------
output: np.array, same shape as wav1 and wav2
"""
length = min([wav1.shape[0], wav2.shape[0]])
if wav1.ndim > 1:
data1 = wav1[0:length, 0]
else:
data1 = wav1[0:length]
if wav2.ndim > 1:
data2 = wav2[0:length, 0]
else:
data2 = wav2[0:length]
if method == 1 or method == 'wav':
# waveform level
data = data1 * para + data2 * (1.0 - para)
elif method == 2 or method == 'specamp':
# spectrum amplitude
_, _, Zxx1 = signal.stft(
data1, nperseg=fl, noverlap=fl - fs, nfft=nfft)
_, _, Zxx2 = signal.stft(
data2, nperseg=fl, noverlap=fl - fs, nfft=nfft)
amp1 = np.abs(Zxx1)
amp2 = np.abs(Zxx2)
pha1 = np.angle(Zxx1)
pha2 = np.angle(Zxx2)
# merge amplitude
amp = np.power(amp1, para) * np.power(amp2, (1.0 - para))
#
Zxx = amp * np.cos(pha1) + 1j * amp * np.sin(pha1)
_, data = signal.istft(
Zxx, nperseg = fl, noverlap = fl - fs, nfft = nfft)
elif method == 3 or method == 'phase':
# phase,
_, _, Zxx1 = signal.stft(
data1, nperseg=fl, noverlap=fl - fs, nfft=nfft)
_, _, Zxx2 = signal.stft(
data2, nperseg=fl, noverlap=fl - fs, nfft=nfft)
amp1 = np.abs(Zxx1)
amp2 = np.abs(Zxx2)
pha1 = np.unwrap(np.angle(Zxx1))
pha2 = np.unwrap(np.angle(Zxx2))
#amp = amp1 * para + amp2 * (1.0 - para)
pha = pha1 * para + pha2 * (1.0 - para)
Zxx = amp1 * np.cos(pha1) + 1j * amp1 * np.sin(pha)
_, data = signal.istft(
Zxx, nperseg=fl, noverlap=fl-fs, nfft=nfft)
elif method == 4 or method == 'specamp-phase':
# both
_, _, Zxx1 = signal.stft(
data1, nperseg=fl, noverlap=fl - fs, nfft=nfft)
_, _, Zxx2 = signal.stft(
data2, nperseg=fl, noverlap=fl - fs, nfft=nfft)
amp1 = np.abs(Zxx1)
amp2 = np.abs(Zxx2)
pha1 = np.unwrap(np.angle(Zxx1))
pha2 = np.unwrap(np.angle(Zxx2))
amp = np.power(amp1, para) * np.power(amp2, (1.0 - para))
pha = pha1 * para + pha2 * (1.0 - para)
Zxx = amp * np.cos(pha1) + 1j * amp * np.sin(pha)
_, data = signal.istft(
Zxx, nperseg=fl, noverlap=fl-fs, nfft=nfft)
# adjust length & shape
data = unify_length_shape(data, wav1)
return data
##################
# reverberation
##################
def wav_reverb(waveform, rir, use_fft=True, keep_alignment=False):
""" output = wav_reverb(waveform, rir, use_fft=True, keep_alignment=False)
input
-----
waveform: np.array, (length, 1), input waveform
rir: np.array, (length, 1), room impulse response
use_fft: bool,
True: use FFT to do convolution (default)
False: use scipy.lfilter to do convolution (not implemented yet)
keep_alignment: bool
True: shift RIR so that max of RIR starts from 1st time step
False: do nothing (default)
output
------
output_wav: np.array, (length, 1)
"""
if use_fft:
# handling different length
signal_length = max([waveform.shape[0], rir.shape[0]])
# buffer
waveform_buf = np.zeros([signal_length])
rir_buf = np.zeros([signal_length])
waveform_buf[:waveform.shape[0]] = waveform[:, 0]
rir_buf[:rir.shape[0]] = rir[:, 0]
# alignment
if keep_alignment:
# get the max value of RIR
max_index = np.argmax(rir, axis=0)[0]
# circular shift the buffer
rir_buf = np.roll(rir_buf, -max_index)
# fft
convolved = np.fft.rfft(waveform_buf) * np.fft.rfft(rir_buf)
# ifft
output_wav = np.fft.irfft(convolved)
# adjust volume
orig_amp = nii_wav_tools.wav_get_amplitude(
waveform, method='max')
output_wav = nii_wav_tools.wav_scale_amplitude_to(
output_wav, orig_amp, method='max')
else:
print("Not implemented")
sys.exit(1)
return np.expand_dims(output_wav, axis=1)
##################
# RawBoost
#
# https://github.com/TakHemlata/RawBoost-antispoofing/blob/main/RawBoost.py
#
# MIT license
# Copyright (c) 2021 Hemlata
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
##################
def randRange(x1, x2, integer):
y = np.random.uniform(low=x1, high=x2, size=(1,))
if integer:
y = int(y)
return y
def normWav(x, always):
if always:
x = x/np.amax(abs(x))
elif np.amax(abs(x)) > 1:
x = x/np.amax(abs(x))
return x
def genNotchCoeffs(nBands, minF, maxF, minBW, maxBW,
minCoeff, maxCoeff, minG, maxG, fs):
b = 1
for i in range(0, nBands):
fc = randRange(minF,maxF,0);
bw = randRange(minBW,maxBW,0);
c = randRange(minCoeff,maxCoeff,1);
if c/2 == int(c/2):
c = c + 1
f1 = fc - bw/2
f2 = fc + bw/2
if f1 <= 0:
f1 = 1/1000
if f2 >= fs/2:
f2 = fs/2-1/1000
b = np.convolve(
signal.firwin(c, [float(f1), float(f2)], window='hamming', fs=fs),
b)
G = randRange(minG,maxG,0);
_, h = signal.freqz(b, 1, fs=fs)
b = pow(10, G/20)*b/np.amax(abs(h))
return b
def filterFIR(x,b):
N = b.shape[0] + 1
xpad = np.pad(x, (0, N), 'constant')
y = signal.lfilter(b, 1, xpad)
y = y[int(N/2):int(y.shape[0]-N/2)]
return y
def LnL_convolutive_noise(x,
fs=16000, N_f=5, nBands=5,
minF=20, maxF=8000, minBW=100, maxBW=1000,
minCoeff=10, maxCoeff=100, minG=0, maxG=0,
minBiasLinNonLin=5, maxBiasLinNonLin=20):
# Linear and non-linear convolutive noise
y = [0] * x.shape[0]
for i in range(0, N_f):
if i == 1:
minG = minG-minBiasLinNonLin;
maxG = maxG-maxBiasLinNonLin;
b = genNotchCoeffs(nBands, minF, maxF, minBW,
maxBW, minCoeff, maxCoeff,minG,maxG,fs)
y = y + filterFIR(np.power(x, (i+1)), b)
y = y - np.mean(y)
y = normWav(y,0)
return y
def ISD_additive_noise(x, P=10, g_sd=2):
# Impulsive signal dependent noise
beta = randRange(0, P, 0)
y = copy.deepcopy(x)
x_len = x.shape[0]
n = int(x_len*(beta/100))
p = np.random.permutation(x_len)[:n]
f_r= np.multiply(((2*np.random.rand(p.shape[0]))-1),
((2*np.random.rand(p.shape[0]))-1))
r = g_sd * x[p] * f_r
y[p] = x[p] + r
y = normWav(y,0)
return y
def SSI_additive_noise(x, SNRmin, SNRmax, nBands, minF, maxF, minBW, maxBW,
minCoeff, maxCoeff, minG, maxG, fs):
# Stationary signal independent noise
noise = np.random.normal(0, 1, x.shape[0])
b = genNotchCoeffs(nBands, minF, maxF, minBW, maxBW,
minCoeff, maxCoeff, minG, maxG, fs)
noise = filterFIR(noise, b)
noise = normWav(noise,1)
SNR = randRange(SNRmin, SNRmax, 0)
noise = noise / np.linalg.norm(noise,2) * np.linalg.norm(x,2) / 10.0**(0.05 * SNR)
x = x + noise
return x
def RawBoostWrapper12(x, fs=16000):
""" RawBoost strategy 1+2
"""
if x.ndim > 1:
x_ = x[:, 0]
else:
x_ = x
y = LnL_convolutive_noise(x_, fs)
y = ISD_additive_noise(y)
# make the length equal
length = min(x.shape[0], y.shape[0])
y_ = np.zeros_like(x_)
y_[0:length] = y[0:length]
if x.ndim > 1:
y_ = np.expand_dims(y_, axis=1)
return y_
if __name__ == "__main__":
print("Waveform augmentation tools")
| 23,456 | 29.463636 | 86 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/data_io/customize_dataset.py | #!/usr/bin/env python
"""
customized dataset
NII_MergeDataSetLoader (to one minibatch):
We want to load dataset 1, 2, and 3,
We also want to draw sample from each dataset for one minibatch.
One epoch over the merged datasets will be decided by the smallest dataset
"""
from __future__ import absolute_import
import os
import sys
import numpy as np
import torch
import torch.utils.data
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_collate_fn as nii_collate_fn
import core_scripts.data_io.customize_sampler as nii_sampler_fn
import core_scripts.data_io.conf as nii_dconf
import core_scripts.data_io.seq_info as nii_seqinfo
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
###############################################
# Dataset definition to merge multiple datasets
###############################################
class merge_loader():
""" merge_loader
Data loader for customized data with m_concate_set = None
By default, draw equal number of samples from each subset
__iter__():
__next__(): load data and merge into minibatch
Note: pay attention to adjust the data index in the dataset.
See comment of adjust_utt_idx
"""
def __init__(self, datasets):
# list of datasets
self.m_datasets = datasets
# initialized iterators
self.m_loaders = [x.get_loader() for x in self.m_datasets]
# utterance index shift
# if we merge dataset 1, 2, 3, then
# index for dataset 2: index += dataset_1.get_seq_num()
# index for dataset 3: index += dataset_1 + dataset_2.get_seq_num()
self.m_idx_shift = np.cumsum([0] +
[x.get_seq_num() for x in self.m_datasets])
return
def adjust_utt_idx(self, data_tuple, dataset_idx):
""" adjust_utt_idx(data_tutple, dataset_idx)
when merging dataset 1, 2, 3 ...
index for dataset 2: index += dataset_1.get_seq_num()
index for dataset 3: index += dataset_1 + dataset_2.get_seq_num()
We have to call dataset.f_adjust_idx because it is the dataset itself
that knows how to parse the data_tuple
"""
return self.m_datasets[dataset_idx].get_dataset().f_adjust_idx(
data_tuple, self.m_idx_shift[dataset_idx])
def __iter__(self):
"""
create the list of iterators
"""
self.m_loader_iter = [iter(x) for x in self.m_loaders]
return self
def __next__(self):
""" try to load data from m_datasets, and merge them into a
single minibatch
"""
try:
data_list = []
for dataset_idx, dataloader in enumerate(self.m_loader_iter):
data_list.append(
self.adjust_utt_idx(next(dataloader), dataset_idx))
#
return nii_collate_fn.customize_collate_from_batch(data_list)
except StopIteration:
raise StopIteration
class ConcatDataset(torch.utils.data.Dataset):
""" ConcatDataset
Torch.Dataset that concatenate multiple sub-datasets.
Adopted from
https://discuss.pytorch.org/t/train-simultaneously-on-two-datasets/649/2
But here we concatenate data corpora directly. Minibatch may contain data
from each sub corpus
"""
def __init__(self, datasets):
""" ConcatDataset(datasets)
Args
----
datasets: list of torch.utils.data.Dataset or derived classes
it must have __len__, __getitem__, and adjust_utt_idx
"""
# all the sub sets
self.datasets = datasets
self.num_subset = len(datasets)
# initial the len and len_top, len_bot
self.__init_sub()
return
def __init_sub(self):
# len of each sub set
self.len_buffer = [x.__len__() for x in self.datasets]
# for later use, to decide from which subset we draw the sample
self.len_top = np.cumsum(self.len_buffer)
self.len_bot = np.cumsum([0] + self.len_buffer[:-1])
# name <--> idx mapper (idx in the whole dataset)
self.name_idx_map = {}
for idx_u, idx_d, subset in \
zip(self.len_top, self.len_bot, self.datasets):
data_name_list = subset.f_get_seq_name_list()
for idx_data, data_name in enumerate(data_name_list):
# similar to the login in getitem, we need to add the
# shift idx_d for each subset
self.name_idx_map[data_name] = idx_data + idx_d
# done
return
def __getitem__(self, i):
""" getitem from the corresponding subcorpus
"""
# for example, data1 = [a], data2 = [b, c]
# self.len_buffer = [1, 2]
# self.len_top = [1, 3]
# self.len_bot = [0, 1]
# __getitem__(0) -> data1[0-0] = a
# __getitem__(1) -> data2[1-1] = b
# __getitem__(2) -> data2[2-1] = c
#
# Notice that the index of b is 0 in data2 but 1 in concatenated set
# The index must be adjusted using f_adjust_idx
for idx_u, idx_d, subset in \
zip(self.len_top, self.len_bot, self.datasets):
if i < idx_u:
# get the data from subdataset
orig_data_tuple = subset.__getitem__(i - idx_d)
# adjust the data idx
return subset.f_adjust_idx(orig_data_tuple, idx_d)
else:
# keep going to the next subset
pass
nii_warn.f_die("Merge dataset: fatal error in __getitem__")
return None
def __len__(self):
return sum(self.len_buffer)
def f_get_seq_len_list(self):
""" length = f_get_seq_len_list()
Total length of utterance in the merged dataset
"""
tmp = []
for sub_dataset in self.datasets:
tmp += sub_dataset.f_get_seq_len_list()
return tmp
def f_get_updated_seq_len_for_sampler_list(self):
""" Similar to f_get_seq_len_list
but it returns the updated data sequence length only for
length-based shuffling in sampler
"""
tmp = []
for sub_dataset in self.datasets:
tmp += sub_dataset.f_get_updated_seq_len_for_sampler_list()
return tmp
def f_update_seq_len_for_sampler_list(self, data_info):
"""
"""
for one_info in data_info:
data_idx = nii_seqinfo.parse_idx(one_info)
data_len = nii_seqinfo.parse_length(one_info)
for idx_u, idx_d, subset in \
zip(self.len_top, self.len_bot, self.datasets):
if data_idx < idx_u:
subset.f_update_seq_len_for_sampler_list(data_idx, data_len)
break
else:
pass
return
def f_manage_data(self, lst_data_idx, opt):
""" f_manage_data(self, lst_data_idx, opt)
"""
# manage the data in each subset
for idx_u, idx_d, subset in \
zip(self.len_top, self.len_bot, self.datasets):
# adjust the index for each sub dataset
tmp_data_idx = [x - idx_d for x in lst_data_idx \
if x >= idx_d and x < idx_u]
subset.f_manage_data(tmp_data_idx, opt)
# re-initialize len, len_top, and len_bot
self.__init_sub()
return None
def f_get_seq_idx_from_name(self, data_names):
""" list_of_idx = f_get_seq_idx_from_name(data_names)
"""
return [self.name_idx_map[x] for x in data_names]
class NII_MergeDataSetLoader():
""" DatasetLoader for loading multiple data corpora as a single one
Similar to NIIDataSetLoader, this merges the DataSet and DataLoader
into a single class.
"""
def __init__(self,
dataset_name, \
list_file_list, \
list_input_dirs, input_exts, input_dims, input_reso, \
input_norm, \
list_output_dirs, output_exts, output_dims, output_reso, \
output_norm, \
stats_path, \
data_format = nii_dconf.h_dtype_str, \
params = None, \
truncate_seq = None, \
min_seq_len = None,
save_mean_std = True, \
wav_samp_rate = None, \
flag_lang = 'EN', \
way_to_merge = 'concatenate',
global_arg = None,
dset_config = None,
input_augment_funcs = None,
output_augment_funcs = None,
inoutput_augment_func = None):
""" Signature is similar to default_io.NIIDataSetLoader.
file_list, input_dirs, and output_dirs are different.
One additional optional argument is way_to_merge.
Args
----
data_set_name: a string to name this dataset
this will be used to name the statistics files
such as the mean/std for this dataset
list_file_list: a list of file_name path
list_input_dirs: a list of lists of dirs for input features
input_exts: a list of input feature name extentions
input_dims: a list of input feature dimensions
input_reso: a list of input feature temporal resolution,
or None
input_norm: a list of bool, whether normalize input feature or not
list_output_dirs: a list of lists of dirs for output features
output_exts: a list of output feature name extentions
output_dims: a list of output feature dimensions
output_reso: a list of output feature temporal resolution,
or None
output_norm: a list of bool, whether normalize target feature or not
stats_path: path to the directory of statistics(mean/std)
data_format: method to load the data
'<f4' (default): load data as float32m little-endian
'htk': load data as htk format
params: parameter for torch.utils.data.DataLoader
truncate_seq: None or int,
truncate data sequence into smaller truncks
truncate_seq > 0 specifies the trunck length
min_seq_len: None (default) or int, minimum length of an utterance
utterance shorter than min_seq_len will be ignored
save_mean_std: bool, True (default): save mean and std
wav_samp_rate: None (default) or int, if input data has waveform,
please set sampling rate. It is used by _data_writer
flag_lang: str, 'EN' (default), if input data has text, text will
be converted into code indices. flag_lang indicates the
language for the text processer. It is used by _data_reader
wav_to_merge: string, 'concatenate' (default) or 'merge'
'concatenate': simply concatenate multiple corpora
'merge': create minibatch by merging data from each copora
global_arg: argument parser returned by arg_parse.f_args_parsed()
default None
dset_config: object, data set configuration, default None
input_augment_funcs: list of functions for input data transformation
default None
output_augment_funcs: list of output data transformation functions
default None
inoutput_augment_func: a single data augmentation function
default None
Methods
-------
get_loader(): return a torch.util.data.DataLoader
get_dataset(): return a torch.util.data.DataSet
"""
########
# check whether input_dirs and output_dirs are lists
if type(list_input_dirs[0]) is list and \
type(list_output_dirs[0]) is list and \
type(list_file_list) is list and \
len(list_input_dirs) == len(list_output_dirs) and \
len(list_input_dirs) == len(list_file_list):
pass
else:
mes = "NII_MergeDataSetLoader: input_dirs, output_dirs, "
mes += "and file_list should be list of lists. "
mes += "They should have equal length. But we have:"
mes += "{:s}\n{:s}\n{:s}".format(
str(list_input_dirs), str(list_output_dirs),
str(list_file_list))
nii_warn.f_die(mes)
if type(dataset_name) is list:
if len(dataset_name) != len(list_input_dirs):
mes = "dataset_name should have {:d} elements. ".format(
len(list_file_list))
mes += "But we have: {:s}".format(str(dataset_name))
nii_warn.f_die(mes)
elif len(list(set(dataset_name))) != len(list_input_dirs):
mes = "dataset_name has duplicated elements: {:s}".format(
str(dataset_name))
nii_warn.f_die(mes)
else:
tmp_dnames = dataset_name
else:
tmp_dnames = [dataset_name + '_sub_{:d}'.format(idx) \
for idx in np.arange(len(list_input_dirs))]
#######
# create individual datasets
lst_dset = []
cnt = 0
for sub_input_dirs, sub_output_dirs, sub_file_list, tmp_name in \
zip(list_input_dirs, list_output_dirs, list_file_list, tmp_dnames):
inaug = input_augment_funcs[cnt] if input_augment_funcs else None
ouaug = output_augment_funcs[cnt] if output_augment_funcs else None
lst_dset.append(
nii_default_dset.NIIDataSetLoader(
tmp_name, sub_file_list, \
sub_input_dirs, input_exts, input_dims, input_reso, \
input_norm, \
sub_output_dirs, output_exts, output_dims, output_reso, \
output_norm, \
stats_path, data_format, params, truncate_seq, min_seq_len,\
save_mean_std, wav_samp_rate, flag_lang, \
global_arg, dset_config, inaug, ouaug,\
inoutput_augment_func))
cnt += 1
# list of the datasets
self.m_datasets = lst_dset
#######
# merge multiple datasets (i.e., build the DataLoader)
self.way_to_merge = way_to_merge
if way_to_merge == 'concatenate':
# to create DataLoader, we need the pytorch.dataset
self.m_concate_set = ConcatDataset(
[x.get_dataset() for x in self.m_datasets])
# concatenate multiple datasets
if params is None:
tmp_params = nii_dconf.default_loader_conf
else:
tmp_params = params.copy()
# save parameters
self.m_params = tmp_params
# create data loader
self.m_loader = self.build_loader_concate_merge()
else:
self.m_concate_set = None
# sample mini-batches of equal size from each sub dataset
# use specific dataloader
self.m_loader = merge_loader(lst_dset)
self.m_params = lst_dset[0].get_loader_params()
return
def build_loader_concate_merge(self):
"""
build dataloader for a merged dataset
"""
# legacy implementation, no need to use
####
# Although members in l_dset have Dataloader, we need to
# create a dataloder for the concatenate dataset
###
tmp_params = self.m_params.copy()
# creatr sampler
if 'sampler' in tmp_params:
tmp_sampler = None
if tmp_params['sampler'] == nii_sampler_fn.g_str_sampler_bsbl:
if 'batch_size' in tmp_params \
and tmp_params['batch_size'] > 1:
# initialize the sampler
tmp_sampler = nii_sampler_fn.SamplerBlockShuffleByLen(
self.m_concate_set.f_get_seq_len_list(),
tmp_params['batch_size'])
# turn off automatic shuffle
tmp_params['shuffle'] = False
else:
nii_warn.f_print("{:s} off as batch-size is 1".format(
nii_sampler_fn.g_str_sampler_bsbl))
tmp_params['sampler'] = tmp_sampler
# collate function
if 'batch_size' in tmp_params and tmp_params['batch_size'] > 1:
# use customize_collate to handle data with unequal length
# we cannot use default collate_fn
collate_fn = nii_collate_fn.customize_collate
else:
collate_fn = None
# use default DataLoader
return torch.utils.data.DataLoader(
self.m_concate_set, collate_fn=collate_fn, **tmp_params)
def get_loader_params(self):
return self.m_params
def get_loader(self):
""" get_loader():
Return the dataLoader (torch.util.data.DataLoader)
"""
return self.m_loader
def get_dataset(self):
""" get_dataset():
Return the dataset (torch.util.data.Dataset)
"""
return self.m_datasets
def get_data_mean_std(self):
"""
"""
# temporary solution: just use the first one
return self.m_datasets[0].get_data_mean_std()
def print_info(self):
"""
"""
nii_warn.f_print_message("Merge datasets by: " + self.way_to_merge)
for dset in self.m_datasets:
dset.print_info()
return
def putitem(self, output_data, save_dir, filename_prefix, data_infor_str):
""" Decompose the output_data from network into
separate files
"""
# Since all datasets have similar configuration on feat dim,
# use anyone is OK
self.m_datasets[0].putitem(output_data, save_dir, filename_prefix,
data_infor_str)
def get_in_dim(self):
""" Return the dimension of input features
"""
# Since all datasets have similar configuration on feat dim,
# use anyone is OK
return self.m_datasets[0].get_in_dim()
def get_out_dim(self):
""" Return the dimension of output features
"""
# Since all datasets have similar configuration on feat dim,
# use anyone is OK
return self.m_datasets[0].get_out_dim()
def get_seq_num(self):
""" Return the number of sequences (after truncation)
"""
return sum([x.get_seq_num() for x in self.m_datasets])
def get_seq_info(self):
""" Return the full information of each data,
including name, index, segmentation information
"""
tmp = []
for dataset in self.m_datasets:
tmp += dataset.get_seq_info()
return tmp
def get_seq_name_list(self):
""" list = get_seq_name_list()
Return a list of data sequence name
"""
tmp = []
for dataset in self.m_datasets:
tmp += dataset.get_seq_name_list()
return tmp
def get_seq_idx_from_name(self, data_names):
""" idx = get_seq_idx_from_name(data_names)
Return a list of data idx corresponding to the data file names
"""
# re-build data loader
if self.way_to_merge == 'concatenate':
return self.m_concate_set.f_get_seq_idx_from_name(data_names)
else:
nii_warn.f_print("Not implemented get_seq_idx_from_name")
nii_warn.f_die("--way-to-merge-datasets concatenate")
return None
def update_seq_len_in_sampler_sub(self, data_info):
"""
"""
# assume data_info logs the new data length that can be used for
# sampler shuffle_by_length
if self.way_to_merge == 'concatenate':
self.m_concate_set.f_update_seq_len_for_sampler_list(data_info)
else:
print("Not implemented")
sys.exit(1)
return
def update_seq_len_in_sampler(self):
""" update_seq_len()
Update sequence length if sequence length has been changed
(for example, during silence trim process)
This is necessary when using shuffle_by_seq_length sampler
and the sequences were trimmed in data augmentation function.
"""
# call each subdataset and update the sequence length
for idx, _ in enumerate(self.m_datasets):
self.m_datasets[idx].update_seq_len_in_sampler()
# update loader of this database
if self.way_to_merge == 'concatenate':
if self.m_params['sampler'] == nii_sampler_fn.g_str_sampler_bsbl \
and hasattr(self.m_loader.sampler, 'update_seq_length'):
self.m_loader.sampler.update_seq_length(
self.m_concate_set.f_get_updated_seq_len_for_sampler_list())
return
def manage_data(self, lst_data_idx, opt):
""" manage_data(data_index_list, opt)
Manage data in the dataset. Current, we can either keep or delete
the specified list of data
Args:
lst_data_idx: list of data indices, lst_data_idx[n] is
the index of the sample in the merged dataset
opt: 'keep', keep only data in idx
'delete', delete data in idx
"""
# re-build data loader
if self.way_to_merge == 'concatenate':
self.m_concate_set.f_manage_data(lst_data_idx, opt)
if self.get_seq_num() < 1:
# DataLoader will raise Error when no data is DataSet
self.m_loader = None
else:
self.m_loader = self.build_loader_concate_merge()
else:
nii_warn.f_print("Active learning requires ")
nii_warn.f_die("--way-to-merge-datasets concatenate")
return
def add_dataset(self, new_data_wrapper):
"""add_dataset(new_data_wrapper)
Add an existing merged dataset to this dataset
Args:
new_data_wrapper: must be a NII_MergeDataSetLoader
"""
self.m_datasets = self.m_datasets + new_data_wrapper.m_datasets
if self.way_to_merge == 'concatenate':
self.m_concate_set = ConcatDataset(
[x.get_dataset() for x in self.m_datasets])
self.m_loader = self.build_loader_concate_merge()
else:
nii_warn.f_print("Active learning requires ")
nii_warn.f_die("--way-to-merge-datasets concatenate")
return
if __name__ == "__main__":
print("Definition of customized Pytorch dataset")
| 23,353 | 37.665563 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/data_io/default_data_io.py | #!/usr/bin/env python
"""
data_io
Interface to load data
"""
from __future__ import absolute_import
import os
import sys
import numpy as np
from inspect import signature
import torch
import torch.utils.data
import core_scripts.other_tools.list_tools as nii_list_tools
import core_scripts.other_tools.display as nii_warn
import core_scripts.other_tools.str_tools as nii_str_tk
import core_scripts.data_io.io_tools as nii_io_tk
import core_scripts.data_io.wav_tools as nii_wav_tk
import core_scripts.data_io.text_process.text_io as nii_text_tk
import core_scripts.data_io.conf as nii_dconf
import core_scripts.data_io.seq_info as nii_seqinfo
import core_scripts.math_tools.stats as nii_stats
import core_scripts.data_io.customize_collate_fn as nii_collate_fn
import core_scripts.data_io.customize_sampler as nii_sampler_fn
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
###
## functions wrappers to read/write data for this data_io
###
def _data_reader(file_path, dim, flag_lang, g2p_tool):
""" A wrapper to read raw binary data, waveform, or text
"""
file_name, file_ext = os.path.splitext(file_path)
if file_ext == '.wav':
sr, data = nii_wav_tk.waveReadAsFloat(file_path)
if data.ndim > 1 and data.shape[-1] != dim:
nii_warn.f_print("Expect {:d} channel(s)".format(dim), 'error')
nii_warn.f_die("But {:s} has {:d} channel(s)".format(
file_path, data.shape[-1]))
elif file_ext == '.flac':
sr, data = nii_wav_tk.flacReadAsFloat(file_path)
if data.ndim > 1 and data.shape[-1] != dim:
nii_warn.f_print("Expect {:d} channel(s)".format(dim), 'error')
nii_warn.f_die("But {:s} has {:d} channel(s)".format(
file_path, data.shape[-1]))
elif file_ext == '.txt':
data = nii_text_tk.textloader(file_path, flag_lang, g2p_tool)
else:
data = nii_io_tk.f_read_raw_mat(file_path, dim)
return data
def _data_writer(data, file_path, sr = 16000):
""" A wrapper to write raw binary data or waveform
"""
file_name, file_ext = os.path.splitext(file_path)
if file_ext == '.wav':
nii_wav_tk.waveFloatToPCMFile(data, file_path, sr = sr)
elif file_ext == '.txt':
nii_warn.f_die("Cannot write to {:s}".format(file_path))
else:
nii_io_tk.f_write_raw_mat(data, file_path)
return
def _data_len_reader(file_path):
""" A wrapper to read length of data
"""
file_name, file_ext = os.path.splitext(file_path)
if file_ext == '.wav':
#sr, data = nii_wav_tk.waveReadAsFloat(file_path)
#length = data.shape[0]
length = nii_wav_tk.readWaveLength(file_path)
elif file_ext == '.flac':
sr, data = nii_wav_tk.flacReadAsFloat(file_path)
length = data.shape[0]
elif file_ext == '.txt':
# txt, no need to account length
# note that this is for tts task
length = 0
else:
length = nii_io_tk.f_read_raw_mat_length(file_path)
return length
###
# Definition of DataSet
###
class NIIDataSet(torch.utils.data.Dataset):
""" General class for NII speech dataset
For definition of customized Dataset, please refer to
https://pytorch.org/tutorials/beginner/data_loading_tutorial.html
"""
def __init__(self,
dataset_name, \
file_list, \
input_dirs, input_exts, input_dims, input_reso, \
input_norm, \
output_dirs, output_exts, output_dims, output_reso, \
output_norm, \
stats_path, \
data_format = nii_dconf.h_dtype_str, \
truncate_seq = None, \
min_seq_len = None, \
save_mean_std = True, \
wav_samp_rate = None, \
flag_lang = 'EN', \
global_arg = None, \
dset_config = None, \
input_augment_funcs = None, \
output_augment_funcs = None,
inoutput_augment_func = None):
"""
args
----
dataset_name: name of this data set
file_list: a list of file name strings (without extension)
or, path to the file that contains the file names
input_dirs: a list of dirs from which input feature is loaded
input_exts: a list of input feature name extentions
input_dims: a list of input feature dimensions
input_reso: a list of input feature temporal resolutions
input_norm: a list of bool, whether normalize input feature or not
output_dirs: a list of dirs from which output feature is loaded
output_exts: a list of output feature name extentions
output_dims: a list of output feature dimensions
output_reso: a list of output feature temporal resolutions
output_norm: a list of bool, whether normalize target feature or not
stat_path: path to the directory that saves mean/std,
utterance length
data_format: method to load the data
'<f4' (default): load data as float32m little-endian
'htk': load data as htk format
truncate_seq: None (default) or int, truncate sequence into truncks.
truncate_seq > 0 specifies the trunck length
min_seq_len: None (default) or int, minimum length of an utterance
utterance shorter than min_seq_len will be ignored
save_mean_std: bool, True (default): save mean and std
wav_samp_rate: None (default) or int, if input data has waveform,
please set sampling rate. It is used by _data_writer
flag_lang: str, 'EN' (default), if input data has text, the text will
be converted into code indices. flag_lang indicates the
language for the text processer. It is used by _data_reader
global_arg: argument parser returned by arg_parse.f_args_parsed()
default None
dset_config: object, dataset configuration, default None
input_augment_funcs: list of functions for input data transformation
default None
output_augment_funcs: list of output data transformation functions
default None
inoutput_augment_func: a single data augmentation function,
default None
"""
# initialization
self.m_set_name = dataset_name
self.m_file_list = file_list
self.m_input_dirs = input_dirs
self.m_input_exts = input_exts
self.m_input_dims = input_dims
self.m_output_dirs = output_dirs
self.m_output_exts = output_exts
self.m_output_dims = output_dims
if len(self.m_input_dirs) != len(self.m_input_exts) or \
len(self.m_input_dirs) != len(self.m_input_dims):
nii_warn.f_print("Input dirs, exts, dims, unequal length",
'error')
nii_warn.f_print(str(self.m_input_dirs), 'error')
nii_warn.f_print(str(self.m_input_exts), 'error')
nii_warn.f_print(str(self.m_input_dims), 'error')
nii_warn.f_die("Please check input dirs, exts, dims")
if len(self.m_output_dims) != len(self.m_output_exts) or \
(self.m_output_dirs and \
len(self.m_output_dirs) != len(self.m_output_exts)):
nii_warn.f_print("Output dirs, exts, dims, unequal length", \
'error')
nii_warn.f_die("Please check output dirs, exts, dims")
# fill in m_*_reso and m_*_norm
def _tmp_f(list2, default_value, length):
if list2 is None:
return [default_value for x in range(length)]
else:
return list2
self.m_input_reso = _tmp_f(input_reso, 1, len(input_dims))
self.m_input_norm = _tmp_f(input_norm, True, len(input_dims))
self.m_output_reso = _tmp_f(output_reso, 1, len(output_dims))
self.m_output_norm = _tmp_f(output_norm, True, len(output_dims))
if len(self.m_input_reso) != len(self.m_input_dims):
nii_warn.f_die("len(input_reso) != len(input_dims) in config")
if len(self.m_output_reso) != len(self.m_output_dims):
nii_warn.f_die("len(output_reso) != len(input_dims) in config")
if len(self.m_input_norm) != len(self.m_input_dims):
nii_warn.f_die("len(input_norm) != len(input_dims) in config")
if len(self.m_output_norm) != len(self.m_output_dims):
nii_warn.f_die("len(output_norm) != len(output_dims) in config")
if global_arg is not None:
self.m_ignore_length_invalid = global_arg.ignore_length_invalid_data
self.m_ignore_cached_finfo = global_arg.ignore_cached_file_infor
self.m_force_skip_scanning = global_arg.force_skip_datadir_scanning
else:
self.m_ignore_length_invalid = False
self.m_ignore_cached_finfo = False
self.m_force_skip_scanning = False
# check augmentation funcctions
if input_augment_funcs:
if len(input_augment_funcs) != len(self.m_input_dims):
nii_warn.f_die("len(input_augment_funcs) != len(input_dims)")
self.m_inaug_funcs = input_augment_funcs
else:
self.m_inaug_funcs = []
if output_augment_funcs:
if len(output_augment_funcs) != len(self.m_output_dims):
nii_warn.f_die("len(output_augment_funcs) != len(output_dims)")
self.m_ouaug_funcs = output_augment_funcs
else:
self.m_ouaug_funcs = []
if inoutput_augment_func:
self.m_inouaug_func = inoutput_augment_func
else:
self.m_inouaug_func = None
# dimensions
self.m_input_all_dim = sum(self.m_input_dims)
self.m_output_all_dim = sum(self.m_output_dims)
self.m_io_dim = self.m_input_all_dim + self.m_output_all_dim
self.m_truncate_seq = truncate_seq
self.m_min_seq_len = min_seq_len
self.m_save_ms = save_mean_std
# in case there is waveform data in input or output features
self.m_wav_sr = wav_samp_rate
# option to process waveform with simple VAD
if global_arg is not None:
self.m_opt_wav_handler = global_arg.opt_wav_silence_handler
else:
self.m_opt_wav_handler = 0
# in case there is text data in input or output features
self.m_flag_lang = flag_lang
self.m_g2p_tool = None
if hasattr(dset_config, 'text_process_options') and \
type(dset_config.text_process_options) is dict:
self.m_flag_lang = dset_config.text_process_options['flag_lang']
if 'g2p_tool' in dset_config.text_process_options:
self.m_g2p_tool = dset_config.text_process_options['g2p_tool']
# sanity check on resolution configuration
# currently, only input features can have different reso,
# and the m_input_reso must be the same for all input features
if any([x != self.m_input_reso[0] for x in self.m_input_reso]):
nii_warn.f_print("input_reso {:s}".format(str(self.m_input_reso)),
'error')
nii_warn.f_print("NIIDataSet not support", 'error', end='')
nii_warn.f_die(" different input_reso")
if any([x != self.m_output_reso[0] for x in self.m_output_reso]):
nii_warn.f_print("output_reso {:s}".format(str(self.m_output_reso)),
'error')
nii_warn.f_print("NIIDataSet not support", 'error', end='')
nii_warn.f_die(" different output_reso")
if np.any(np.array(self.m_output_reso) < 0):
nii_warn.f_print("NIIDataSet not support negative reso",
'error', end='')
nii_warn.f_die(" Output reso: {:s}".format(str(self.m_output_reso)))
if np.any(np.array(self.m_input_reso) < 0):
nii_warn.f_print("input_reso: {:s}".format(str(self.m_input_reso)))
nii_warn.f_print("Data IO for unaligned input and output pairs")
if truncate_seq is not None:
nii_warn.f_print("truncate is set to None", 'warning')
self.m_truncate_seq = None
self.m_min_seq_len = None
# no need to contrain output_reso = 1
#if any([x != 1 for x in self.m_output_reso]):
# nii_warn.f_print("NIIDataSet only supports", 'error', end='')
# nii_warn.f_die(" output_reso = [1, 1, ... 1]")
#self.m_single_reso = self.m_input_reso[0]
self.m_single_reso = np.max(self.m_input_reso + self.m_output_reso)
# To make sure that target waveform length is exactly equal
# to the up-sampled sequence length
# self.m_truncate_seq must be changed to be N * up_sample
if self.m_truncate_seq is not None:
# assume input resolution is the same
self.m_truncate_seq = self.f_adjust_len(self.m_truncate_seq)
# similarly on self.m_min_seq_len
if self.m_min_seq_len is not None:
# assume input resolution is the same
self.m_min_seq_len = self.f_adjust_len(self.m_min_seq_len)
# method to load/write raw data
if data_format == nii_dconf.h_dtype_str:
self.f_load_data = lambda x, y: _data_reader(
x, y, self.m_flag_lang, self.m_g2p_tool)
self.f_length_data = _data_len_reader
self.f_write_data = lambda x, y: _data_writer(x, y, self.m_wav_sr)
else:
nii_warn.f_print("Unsupported dtype {:s}".format(data_format))
nii_warn.f_die("Only supports {:s} ".format(nii_dconf.h_dtype_str))
# whether input file name in list contains part of the path
# this will be confirmed after reading the file list in the next step
self.flag_filename_with_path = False
# log down statiscs
# 1. length of each data utterance
# 2. mean / std of feature feature file
def get_name(stats_path, set_name, file_name):
tmp = set_name + '_' + file_name
return os.path.join(stats_path, tmp)
if global_arg is not None and global_arg.path_cache_file:
nii_warn.f_print("Cached files are re-directed to {:s}".format(
global_arg.path_cache_file))
tmp_stats_path = global_arg.path_cache_file
else:
tmp_stats_path = stats_path
self.m_ms_input_path = get_name(tmp_stats_path, self.m_set_name, \
nii_dconf.mean_std_i_file)
self.m_ms_output_path = get_name(tmp_stats_path, self.m_set_name, \
nii_dconf.mean_std_o_file)
self.m_data_len_path = get_name(tmp_stats_path, self.m_set_name, \
nii_dconf.data_len_file)
# load and check the validity of data list
self.f_check_file_list(self.m_data_len_path)
# initialize data length and mean /std, read prepared data stats
flag_cal_len = self.f_init_data_len_stats(self.m_data_len_path)
flag_cal_mean_std = self.f_init_mean_std(self.m_ms_input_path,
self.m_ms_output_path)
# if data information is not available, read it again from data
if flag_cal_len or flag_cal_mean_std:
self.f_calculate_stats(flag_cal_len, flag_cal_mean_std)
# if some additional flags are turned on
if hasattr(global_arg, "flag_reverse_data_loading_order") and \
global_arg.flag_reverse_data_loading_order:
self.m_flag_reverse_load_order = True
else:
self.m_flag_reverse_load_order = False
#
if hasattr(global_arg, "force_update_seq_length") and \
global_arg.force_update_seq_length:
self.m_force_update_seq_length = True
else:
self.m_force_update_seq_length = False
# check
if self.__len__() < 1:
nii_warn.f_print("Fail to load any data", "error")
nii_warn.f_print("Possible reasons: ", "error")
mes = "1. Old cache {:s}. Do rm it.".format(self.m_data_len_path)
mes += "\n2. input_dirs, input_exts, "
mes += "output_dirs, or output_exts incorrect."
mes += "\n3. all data are less than minimum_len in length. "
mes += "\nThe last case may happen if truncate_seq == mininum_len "
mes += "and truncate_seq % input_reso != 0. Then, the actual "
mes += "truncate_seq becomes truncate_seq//input_reso*input_reso "
mes += "and it will be shorter than minimum_len. Please change "
mes += "truncate_seq and minimum_len so that "
mes += "truncate_seq % input_reso == 0."
nii_warn.f_print(mes, "error")
nii_warn.f_die("Please check configuration file")
# done
return
def __len__(self):
""" __len__():
Return the number of samples in the list
"""
return len(self.m_seq_info)
def __getitem__(self, idx_input):
""" __getitem__(self, idx):
Return input, output
For test set data, output can be None
"""
# option to select the (N - i + 1)-th sample
if self.m_flag_reverse_load_order:
idx = len(self.m_seq_info) - idx_input - 1
else:
idx = idx_input
# get the sample information
try:
tmp_seq_info = self.m_seq_info[idx]
except IndexError:
nii_warn.f_die("Sample {:d} is not in seq_info".format(idx))
# file_name
file_name = tmp_seq_info.seq_tag()
# For input data
input_reso = self.m_input_reso[0]
seq_len = int(tmp_seq_info.seq_length() // input_reso)
s_idx = int(tmp_seq_info.seq_start_pos() // input_reso)
e_idx = s_idx + seq_len
# in case the input length not account using tmp_seq_info.seq_length
if seq_len < 0:
seq_len = 0
s_idx = 0
e_idx = 0
input_dim = self.m_input_all_dim
in_data = np.zeros([seq_len, input_dim], dtype=nii_dconf.h_dtype)
s_dim = 0
e_dim = 0
# loop over each feature type
for t_dir, t_ext, t_dim, t_res in \
zip(self.m_input_dirs, self.m_input_exts, \
self.m_input_dims, self.m_input_reso):
e_dim = s_dim + t_dim
# get file path and load data
file_path = nii_str_tk.f_realpath(t_dir, file_name, t_ext)
try:
tmp_d = self.f_load_data(file_path, t_dim)
except IOError:
nii_warn.f_die("Cannot find {:s}".format(file_path))
# write data
if t_res < 0:
# if this is for input data not aligned with output
# make sure that the input is in shape (seq_len, dim)
# f_load_data should return data in shape (seq_len, dim)
if tmp_d.ndim == 1:
in_data = np.expand_dims(tmp_d, axis=1)
elif tmp_d.ndim == 2:
in_data = tmp_d
else:
nii_warn.f_die("IO not support {:s}".format(file_path))
elif tmp_d.shape[0] == 1:
# input data has only one frame, duplicate
if tmp_d.ndim > 1:
in_data[:,s_dim:e_dim] = tmp_d[0,:]
elif t_dim == 1:
in_data[:,s_dim] = tmp_d
else:
nii_warn.f_die("Dimension wrong {:s}".format(file_path))
else:
# check
try:
# normal case
if tmp_d.ndim > 1:
# write multi-dimension data
in_data[:,s_dim:e_dim] = tmp_d[s_idx:e_idx,:]
elif t_dim == 1:
# write one-dimension data
in_data[:,s_dim] = tmp_d[s_idx:e_idx]
else:
nii_warn.f_die("Dimension wrong {:s}".format(file_path))
except ValueError:
if in_data.shape[0] != tmp_d[s_idx:e_idx].shape[0]:
mes = 'Expected length is {:d}.\n'.format(e_idx-s_idx)
mes += "Loaded length "+str(tmp_d[s_idx:e_idx].shape[0])
mes += '\nThis may be due to an incompatible cache *.dic.'
mes += '\nPlease check the length in *.dic'
mes += '\nPlease delete it if the cached length is wrong.'
nii_warn.f_print(mes)
nii_warn.f_die("fail to load {:s}".format(file_name))
else:
nii_warn.f_print("unknown data io error")
nii_warn.f_die("fail to load {:s}".format(file_name))
s_dim = e_dim
# load output data
if self.m_output_dirs:
output_reso = self.m_output_reso[0]
seq_len = int(tmp_seq_info.seq_length() // output_reso)
s_idx = int(tmp_seq_info.seq_start_pos() // output_reso)
e_idx = s_idx + seq_len
out_dim = self.m_output_all_dim
out_data = np.zeros([seq_len, out_dim], \
dtype = nii_dconf.h_dtype)
s_dim = 0
e_dim = 0
for t_dir, t_ext, t_dim in zip(self.m_output_dirs, \
self.m_output_exts, \
self.m_output_dims):
e_dim = s_dim + t_dim
# get file path and load data
file_path = nii_str_tk.f_realpath(t_dir, file_name, t_ext)
try:
tmp_d = self.f_load_data(file_path, t_dim)
except IOError:
nii_warn.f_die("Cannot find {:s}".format(file_path))
if tmp_d.shape[0] == 1:
if tmp_d.ndim > 1:
out_data[:,s_dim:e_dim] = tmp_d[0,:]
elif t_dim == 1:
out_data[:,s_dim]=tmp_d
else:
nii_warn.f_die("Dimension wrong {:s}".format(file_path))
else:
try:
if tmp_d.ndim > 1:
out_data[:,s_dim:e_dim] = tmp_d[s_idx:e_idx,:]
elif t_dim == 1:
out_data[:,s_dim]=tmp_d[s_idx:e_idx]
else:
nii_warn.f_die("Dim wrong {:s}".format(file_path))
except ValueError:
if out_data.shape[0] != tmp_d[s_idx:e_idx].shape[0]:
mes = 'Expected length is ' + str(e_idx-s_idx)
mes += ". Loaded "+str(tmp_d[s_idx:e_idx].shape[0])
mes += 'This may be due to an old cache *.dic.'
mes += '\nPlease check the length in *.dic\n'
mes += 'Please delete it if cached length is wrong.'
nii_warn.f_print(mes)
nii_warn.f_die("fail to load " +file_name)
else:
nii_warn.f_print("unknown data io error")
nii_warn.f_die("fail to load " +file_name)
s_dim = s_dim + t_dim
else:
out_data = []
# post processing if necessary
in_data, out_data, tmp_seq_info, idx = self.f_post_data_process(
in_data, out_data, tmp_seq_info, idx)
# return data
return in_data, out_data, tmp_seq_info.print_to_str(), idx
def f_post_data_process(self, in_data, out_data, seq_info, idx):
"""A wrapper to process the data after loading from files
"""
if self.m_opt_wav_handler == 0 \
and not self.m_inaug_funcs and not self.m_ouaug_funcs \
and not self.m_inouaug_func:
# no any post-processing process
return in_data, out_data, seq_info, idx
else:
# Do post processing one by one
# The order is:
# waveform silence handler -> input augementation functions ->
# output augmentation functions -> input&output augmentation
#
# Everthing can be handled in input&output augmentation.
# But to be compatible with old codes, we keep them all.
# It is recommended to use the unified input&output augmentation
###
# buffer infor
###
# create a new sequence information buffer for the input and output
tmp_seq_info = nii_seqinfo.SeqInfo(
seq_info.length, seq_info.seq_name, seq_info.seg_idx,
seq_info.start_pos, seq_info.info_id)
###
# waveform silence handler
###
# waveform handler, this is kept for compatibility
if self.m_opt_wav_handler > 0:
if len(self.m_input_exts) == 1 \
and self.m_input_exts[0][-3:] == 'wav':
if self.m_opt_wav_handler == 1:
tmp_flag_output = self.m_opt_wav_handler
tmp_only_twoends = False
elif self.m_opt_wav_handler == 2:
tmp_flag_output = self.m_opt_wav_handler
tmp_only_twoends = False
elif self.m_opt_wav_handler == 3:
tmp_flag_output = 1
tmp_only_twoends = True
else:
print("Unknown option for wav handler {:d}".format(
self.m_opt_wav_handler))
sys.exit(1)
in_data_n = nii_wav_tk.silence_handler_wrapper(
in_data, self.m_wav_sr,
flag_output = tmp_flag_output,
flag_only_startend_sil = tmp_only_twoends)
# this is temporary setting, if in_data.shape[0]
# corresponds to waveform length, update it
if tmp_seq_info.length == in_data.shape[0]:
tmp_seq_info.length = in_data_n.shape[0]
if self.m_force_update_seq_length:
seq_info.update_len_for_sampler(in_data_n.shape[0])
else:
in_data_n = in_data
if len(self.m_output_exts) == 1 \
and self.m_output_exts[0][-3:] == 'wav':
out_data_n = nii_wav_tk.silence_handler_wrapper(
out_data, self.m_wav_sr,
flag_output = self.m_opt_wav_handler,
flag_only_startend_sil = (self.m_opt_wav_handler==3))
# this is temporary setting, use length if it is compatible
if tmp_seq_info.length == out_data.shape[0]:
tmp_seq_info.length = out_data_n.shape[0]
if self.m_force_update_seq_length:
seq_info.update_len_for_sampler(out_data_n.shape[0])
else:
out_data_n = out_data
else:
in_data_n = in_data
out_data_n = out_data
###
# augmentation functions for input data
###
if self.m_inaug_funcs:
if len(self.m_input_exts) == 1:
# only a single input feature,
sig = signature(self.m_inaug_funcs[0])
if len(sig.parameters) == 1:
in_data_n = self.m_inaug_funcs[0](in_data_n)
elif len(sig.parameters) == 2:
in_data_n = self.m_inaug_funcs[0](in_data_n, seq_info)
else:
in_data_n = self.m_inaug_funcs[0](in_data_n)
# more rules should be applied to handle the data length
# here, simply set length
if type(in_data_n) == np.ndarray:
if tmp_seq_info.length > in_data_n.shape[0]:
tmp_seq_info.length = in_data_n.shape[0]
elif type(in_data_n) == dict:
if 'length' in in_data_n:
tmp_seq_info.length = in_data_n['length']
if 'data' in in_data_n:
in_data_n = in_data_n['data']
else:
print("Input data aug method does not return data")
sys.exit(1)
if self.m_force_update_seq_length:
# Update the data length so that correct data length
# can be used for --sampler block_shuffle_by_length
#
#tmp_len = seq_info.length
seq_info.update_len_for_sampler(tmp_seq_info.length)
#print("{:s} {:s} {:d} -> {:d}".format(
# seq_info.seq_name, seq_info.print_to_str(),
# tmp_len, seq_info.valid_len),
# flush=True)
else:
# multiple input features,
# must check whether func changes the feature length
# only fun that keeps the length will be applied
s_dim = 0
for func, dim in zip(self.m_inaug_funcs, self.m_input_dims):
e_dim = s_dim + dim
tmp_data = func(in_data_n[:, s_dim:e_dim])
if tmp_data.shape[0] == in_data_n.shape[0]:
in_data_n[:, s_dim:e_dim] = tmp_data
s_dim = s_dim + dim
###
# augmentation functions for output data
###
if self.m_ouaug_funcs:
if len(self.m_output_exts) == 1:
# only a single output feature type
sig = signature(self.m_ouaug_funcs[0])
if len(sig.parameters) == 1:
out_data_n = self.m_ouaug_funcs[0](out_data_n)
elif len(sig.parameters) == 2:
out_data_n = self.m_ouaug_funcs[0](out_data_n, seq_info)
else:
out_data_n = self.m_ouaug_funcs[0](out_data_n)
# more rules should be applied to handle the data length
# here, simply set length
#if tmp_seq_info.length > out_data_n.shape[0]:
# tmp_seq_info.length = out_data_n.shape[0]
else:
# multiple output features,
# must check whether func changes the feature length
# only fun that keeps the length will be applied
s_dim = 0
for func, dim in zip(self.m_ouaug_funcs,self.m_output_dims):
e_dim = s_dim + dim
tmp_data = func(out_data_n[:,s_dim:e_dim])
if tmp_data.shape[0] == out_data_n.shape[0]:
out_data_n[:, s_dim:e_dim] = tmp_data
s_dim = s_dim + dim
###
# a unified augmentation function for input and output
###
if self.m_inouaug_func:
# update input output features
in_data_n, out_data_n, tmp_len = self.m_inouaug_func(
in_data_n, out_data_n)
# update sequence length
tmp_seq_info.length = tmp_len
if self.m_force_update_seq_length:
seq_info.update_len_for_sampler(tmp_seq_info.length)
return in_data_n, out_data_n, tmp_seq_info, idx
def f_get_num_seq(self):
""" __len__():
Return the number of samples in the list
"""
return len(self.m_seq_info)
def f_get_seq_len_list(self):
""" Return length of each sequence as list
"""
return [x.seq_length() for x in self.m_seq_info]
def f_get_updated_seq_len_for_sampler_list(self):
""" Similar to f_get_seq_len_list
but it returns the updated data sequence length only for
length-based shuffling in sampler
"""
return [x.seq_len_for_sampler() for x in self.m_seq_info]
def f_update_seq_len_for_sampler_list(self, data_idx, data_len):
try:
self.m_seq_info[data_idx].update_len_for_sampler(data_len)
except IndexError:
nii_warn.f_die("Fail to index data {:d}".format(data_idx))
return
def f_get_mean_std_tuple(self):
return (self.m_input_mean, self.m_input_std,
self.m_output_mean, self.m_output_std)
def f_filename_has_folderpath(self):
""" Return True if file name in self.m_file_list contains '/',
Which indicates that the file name is path/filename
"""
return any([x.count(os.path.sep)>0 for x in self.m_file_list])
def f_check_file_list(self, data_len_buf_path):
""" f_check_file_list(data_len_buf_path):
Check the file list after initialization
Make sure that the file in file_list appears in every
input/output feature directory.
If not, get a file_list in which every file is avaiable
in every input/output directory
input
-----
data_len_buf_path: str, path to the data length buffer
"""
if self.m_file_list is None:
# get a initial file list if self.m_file_list is None
#
# if file list is not provided, we only search the directory
# without recursing sub directories
self.m_file_list = nii_list_tools.listdir_with_ext(
self.m_input_dirs[0], self.m_input_exts[0])
elif not isinstance(self.m_file_list, list):
# if m_file_list is a string
# load file list
if isinstance(self.m_file_list, str) and \
os.path.isfile(self.m_file_list):
# read the list if m_file_list is a str
self.m_file_list = nii_list_tools.read_list_from_text(
self.m_file_list)
else:
nii_warn.f_print("Cannot read {:s}".format(self.m_file_list))
nii_warn.f_print("Read file list from directories")
self.m_file_list = nii_list_tools.listdir_with_ext(
self.m_input_dirs[0], self.m_input_exts[0])
else:
# self.m_file_list is a list
pass
if type(self.m_file_list) is list and len(self.m_file_list) < 1:
mes = "either input data list is wrong"
mes += ", or {:s} is empty".format(self.m_input_dirs[0])
mes += "\nPlease check the folder and data list"
nii_warn.f_die(mes)
# decide whether the file name in self.m_file_list contains
# sub folders
flag_recur = self.f_filename_has_folderpath()
self.flag_filename_with_path = flag_recur
# if the stats cache will be loaded, let's skip the checking process
if os.path.isfile(data_len_buf_path) and not self.m_ignore_cached_finfo:
nii_warn.f_print("Skip scanning directories")
return
# check the list of files exist in all input/output directories
if not self.m_force_skip_scanning:
for tmp_d, tmp_e in zip(self.m_input_dirs, self.m_input_exts):
# read a file list from the input directory
tmp_list = nii_list_tools.listdir_with_ext(
tmp_d, tmp_e, flag_recur)
# get the common set of the existing files and those in list
tmp_new_list = nii_list_tools.common_members(
tmp_list, self.m_file_list)
if len(tmp_new_list) < 1:
nii_warn.f_print("Possible error when scanning:", 'error')
nii_warn.f_print(" {:s} for {:s}".format(tmp_d, tmp_e), 'error')
nii_warn.f_print('Some file names to be scanned:', 'error')
nii_warn.f_print(' ' + ' '.join(self.m_file_list[0:10]),'error')
if self.m_file_list[0].endswith(tmp_e):
nii_warn.f_print('Names should not have {:s}'.format(tmp_e))
if os.path.isfile(self.m_file_list[0]):
mes = "The above name seems not to be the data name. "
mes += "It seems to be a file path. "
mes += "\nPlease check test_list, trn_list, val_list."
nii_warn.f_print(mes, 'error')
self.m_file_list = tmp_new_list
break
else:
self.m_file_list = tmp_new_list
if len(self.m_file_list) < 1:
nii_warn.f_print("\nNo input features found after scanning",'error')
nii_warn.f_print("Please check %s" \
% (str(self.m_input_dirs)), 'error')
nii_warn.f_print("They should contain all files in file list",
'error')
nii_warn.f_print("Please also check filename extentions %s" \
% (str(self.m_input_exts)), 'error')
nii_warn.f_print("They should be correctly specified", 'error')
nii_warn.f_die("Failed to read input features")
# check output files if necessary
if self.m_output_dirs and not self.m_force_skip_scanning:
for tmp_d, tmp_e in zip(self.m_output_dirs, \
self.m_output_exts):
tmp_list = nii_list_tools.listdir_with_ext(tmp_d, tmp_e,
flag_recur)
self.m_file_list = nii_list_tools.common_members(
tmp_list, self.m_file_list)
if len(self.m_file_list) < 1:
nii_warn.f_print("\nNo output data found", 'error')
nii_warn.f_print("Please check %s" \
% (str(self.m_output_dirs)), 'error')
nii_warn.f_print("They should contain all files in file list",
'error')
nii_warn.f_print("Please also check filename extentions %s" \
% (str(self.m_output_exts)), 'error')
nii_warn.f_print("They should be correctly specified", 'error')
nii_warn.f_die("Failed to read output features")
else:
#nii_warn.f_print("Not loading output features")
pass
# done
return
def f_valid_len(self, t_1, t_2, min_length):
""" f_valid_time_steps(time_step1, time_step2, min_length)
When either t_1 > min_length or t_2 > min_length, check whether
time_step1 and time_step2 are too different
"""
if max(t_1, t_2) > min_length:
if (np.abs(t_1 - t_2) * 1.0 / t_1) > 0.1:
return False
return True
def f_check_specific_data(self, file_name):
""" check the data length of a specific file
"""
tmp_dirs = self.m_input_dirs.copy()
tmp_exts = self.m_input_exts.copy()
tmp_dims = self.m_input_dims.copy()
tmp_reso = self.m_input_reso.copy()
tmp_dirs.extend(self.m_output_dirs)
tmp_exts.extend(self.m_output_exts)
tmp_dims.extend(self.m_output_dims)
tmp_reso.extend(self.m_output_reso)
# loop over each input/output feature type
for t_dir, t_ext, t_dim, t_res in \
zip(tmp_dirs, tmp_exts, tmp_dims, tmp_reso):
file_path = nii_str_tk.f_realpath(t_dir, file_name, t_ext)
if not nii_io_tk.file_exist(file_path):
nii_warn.f_die("%s not found" % (file_path))
else:
t_len = self.f_length_data(file_path) // t_dim
print("%s, length %d, dim %d, reso: %d" % \
(file_path, t_len, t_dim, t_res))
return
def f_log_data_len(self, file_name, t_len, t_reso):
""" f_log_data_len(file_name, t_len, t_reso):
Log down the length of the data file.
When comparing the different input/output features for the same
file_name, only keep the shortest length
"""
# We need to exclude features that should not be considered when
# calculating the sequence length
# 1. sentence-level vector (t_len = 1)
# 2. unaligned feature (text in text-to-speech) (t_reso < 0)
valid_flag = t_len > 1 and t_reso > 0
if valid_flag:
# the length for the sequence with the fast tempoeral rate
# For example, acoustic-feature -> waveform 16kHz,
# if acoustic-feature is one frame per 5ms,
# tmp_len = acoustic feature frame length * (5 * 16)
# where t_reso = 5*16 is the up-sampling rate of acoustic feature
tmp_len = t_len * t_reso
# save length when have not read the file
if file_name not in self.m_data_length:
self.m_data_length[file_name] = tmp_len
# check length
if t_len == 1:
# cannot come here, keep this line as history
# if this is an utterance-level feature, it has only 1 frame
pass
elif self.f_valid_len(self.m_data_length[file_name], tmp_len, \
nii_dconf.data_seq_min_length):
# if the difference in length is small
if self.m_data_length[file_name] > tmp_len:
self.m_data_length[file_name] = tmp_len
else:
nii_warn.f_print("Sequence length mismatch:", 'error')
self.f_check_specific_data(file_name)
nii_warn.f_print("Please check the above features", 'error')
if self.m_ignore_length_invalid:
nii_warn.f_print("ignore-length-invalid-data is on")
nii_warn.f_print("ignore {:s}".format(file_name))
return False
else:
nii_warn.f_print("Or remove them from data list", 'error')
nii_warn.f_print("Or --ignore-length-invalid-data",'error')
nii_warn.f_die("Possible invalid data %s" % (file_name))
# adjust the length so that, when reso is used,
# the sequence length will be N * reso
tmp = self.m_data_length[file_name]
self.m_data_length[file_name] = self.f_adjust_len(tmp)
else:
# do nothing for unaligned input or sentence-level input
pass
return True
def f_adjust_len(self, length):
""" When input data will be up-sampled by self.m_single_reso,
Make sure that the sequence length at the up-sampled level is
= N * self.m_single_reso
For data without up-sampling m_single_reso = 1
"""
return length // self.m_single_reso * self.m_single_reso
def f_precheck_data_length(self):
""" For unaligned input and output, there is no way to know the
target sequence length before hand during inference stage
self.m_data_length will be empty
"""
if not self.m_data_length and not self.m_output_dirs and \
all([x < 0 for x in self.m_input_reso]):
# inference stage, when only input is given
# manually create a fake data length for each utterance
for file_name in self.m_file_list:
self.m_data_length[file_name] = 0
return
def f_log_seq_info(self):
""" After m_data_length has been created, create seq_info
"""
for file_name in self.m_file_list:
# if file_name is not logged, ignore this file
if file_name not in self.m_data_length:
nii_warn.f_eprint("Exclude %s from dataset" % (file_name))
continue
# if not truncate, save the seq_info directly
# otherwise, save truncate_seq info
length_remain = self.m_data_length[file_name]
start_pos = 0
seg_idx = 0
if self.m_truncate_seq is not None:
while(length_remain > 0):
info_idx = len(self.m_seq_info)
seg_length = min(self.m_truncate_seq, length_remain)
seq_info = nii_seqinfo.SeqInfo(seg_length,
file_name, seg_idx,
start_pos, info_idx)
if self.m_min_seq_len is None or \
seg_length >= self.m_min_seq_len:
self.m_seq_info.append(seq_info)
seg_idx += 1
start_pos += seg_length
length_remain -= seg_length
else:
info_idx = len(self.m_seq_info)
seq_info = nii_seqinfo.SeqInfo(length_remain,
file_name, seg_idx,
start_pos, info_idx)
if self.m_min_seq_len is None or \
length_remain >= self.m_min_seq_len:
self.m_seq_info.append(seq_info)
# get the total length
self.m_data_total_length = self.f_sum_data_length()
return
def f_init_mean_std(self, ms_input_path, ms_output_path):
""" f_init_mean_std
Initialzie mean and std vectors for input and output
"""
self.m_input_mean = np.zeros([self.m_input_all_dim])
self.m_input_std = np.ones([self.m_input_all_dim])
self.m_output_mean = np.zeros([self.m_output_all_dim])
self.m_output_std = np.ones([self.m_output_all_dim])
flag = True
if not self.m_save_ms:
# assume mean/std will be loaded from the network
# for example, for validation and test sets
flag = False
if not any(self.m_input_norm + self.m_output_norm):
# none of the input / output features needs norm
flag = False
if os.path.isfile(ms_input_path) and \
os.path.isfile(ms_output_path):
# load mean and std if exists
ms_input = self.f_load_data(ms_input_path, 1)
ms_output = self.f_load_data(ms_output_path, 1)
if ms_input.shape[0] != (self.m_input_all_dim * 2) or \
ms_output.shape[0] != (self.m_output_all_dim * 2):
if ms_input.shape[0] != (self.m_input_all_dim * 2):
nii_warn.f_print("%s incompatible" % (ms_input_path),
'warning')
if ms_output.shape[0] != (self.m_output_all_dim * 2):
nii_warn.f_print("%s incompatible" % (ms_output_path),
'warning')
nii_warn.f_print("mean/std will be recomputed", 'warning')
else:
self.m_input_mean = ms_input[0:self.m_input_all_dim]
self.m_input_std = ms_input[self.m_input_all_dim:]
self.m_output_mean = ms_output[0:self.m_output_all_dim]
self.m_output_std = ms_output[self.m_output_all_dim:]
nii_warn.f_print("Load mean/std from %s and %s" % \
(ms_input_path, ms_output_path))
flag = False
return flag
def f_sum_data_length(self):
"""
"""
return sum([x.seq_length() for x in self.m_seq_info])
def f_init_data_len_stats(self, data_path):
"""
flag = f_init_data_len_stats(self, data_path)
Check whether data length has been stored in data_pat.
If yes, load data_path and return False
Else, return True
"""
self.m_seq_info = []
self.m_data_length = {}
self.m_data_total_length = 0
flag = True
if os.path.isfile(data_path) and not self.m_ignore_cached_finfo:
# load data length from pre-stored *.dic
dic_seq_infos = nii_io_tk.read_dic(self.m_data_len_path)
for dic_seq_info in dic_seq_infos:
seq_info = nii_seqinfo.SeqInfo()
seq_info.load_from_dic(dic_seq_info)
self.m_seq_info.append(seq_info)
seq_tag = seq_info.seq_tag()
if seq_tag not in self.m_data_length:
self.m_data_length[seq_tag] = seq_info.seq_length()
else:
self.m_data_length[seq_tag] += seq_info.seq_length()
self.m_data_total_length = self.f_sum_data_length()
# check whether *.dic contains files in filelist
# note: one file is not found in self.m_data_length if it
# is shorter than the truncate_seq
if nii_list_tools.list_identical(self.m_file_list,\
self.m_data_length.keys()):
nii_warn.f_print("Read sequence info: %s" % (data_path))
flag = False
elif nii_list_tools.list_b_in_list_a(self.m_file_list,
self.m_data_length.keys()):
nii_warn.f_print("Read sequence info: %s" % (data_path))
nii_warn.f_print(
"However %d samples are ignoed" % \
(len(self.m_file_list)-len(self.m_data_length)))
tmp = nii_list_tools.members_in_a_not_in_b(
self.m_file_list, self.m_data_length.keys())
for tmp_name in tmp:
nii_warn.f_eprint("Exclude %s from dataset" % (tmp_name))
flag = False
else:
nii_warn.f_print("Incompatible cache: %s" % (data_path))
tmp = nii_list_tools.members_in_a_not_in_b(
self.m_data_length.keys(), self.m_file_list)
nii_warn.f_print("Possibly invalid data (a few examples):")
for tmp_name in tmp[:10]:
nii_warn.f_print(tmp_name)
nii_warn.f_print("...\nYou may carefully check these data.")
nii_warn.f_print("\nThey may not be in the provided data list.")
nii_warn.f_print("Re-read data statistics")
self.m_seq_info = []
self.m_data_length = {}
self.m_data_total_length = 0
# check wheteher truncating length has been changed
if self.m_truncate_seq is not None and flag is False:
tmp_max_len = max([x.seq_length() for x in self.m_seq_info])
if tmp_max_len != self.m_truncate_seq:
mes = "WARNING: truncate_seq conflicts with cached infor. "
mes += "Please delete cache files *.dic if you want to"
mes += " use the new truncate_seq"
nii_warn.f_print(mes, "warning")
return flag
def f_save_data_len(self, data_len_path):
"""
"""
if not self.m_ignore_cached_finfo:
nii_io_tk.write_dic([x.print_to_dic() for x in self.m_seq_info], \
data_len_path)
return
def f_save_mean_std(self, ms_input_path, ms_output_path):
"""
"""
# save mean and std
ms_input = np.zeros([self.m_input_all_dim * 2])
ms_input[0:self.m_input_all_dim] = self.m_input_mean
ms_input[self.m_input_all_dim :] = self.m_input_std
self.f_write_data(ms_input, ms_input_path)
ms_output = np.zeros([self.m_output_all_dim * 2])
ms_output[0:self.m_output_all_dim] = self.m_output_mean
ms_output[self.m_output_all_dim :] = self.m_output_std
self.f_write_data(ms_output, ms_output_path)
return
def f_print_info(self):
"""
"""
mes = "Dataset {}:".format(self.m_set_name)
mes += "\n Time steps: {:d} ".format(self.m_data_total_length)
if self.m_truncate_seq is not None:
mes += "\n Truncate length: {:d}".format(self.m_truncate_seq)
mes += "\n Data sequence num: {:d}".format(len(self.m_seq_info))
tmp_min_len = min([x.seq_length() for x in self.m_seq_info])
tmp_max_len = max([x.seq_length() for x in self.m_seq_info])
mes += "\n Maximum sequence length: {:d}".format(tmp_max_len)
mes += "\n Minimum sequence length: {:d}".format(tmp_min_len)
if self.m_min_seq_len is not None:
mes += "\n Shorter sequences are ignored"
mes += "\n Inputs\n Dirs:"
for subdir in self.m_input_dirs:
mes += "\n {:s}".format(subdir)
mes += "\n Exts:{:s}".format(str(self.m_input_exts))
mes += "\n Dims:{:s}".format(str(self.m_input_dims))
mes += "\n Reso:{:s}".format(str(self.m_input_reso))
mes += "\n Norm:{:s}".format(str(self.m_input_norm))
mes += "\n Outputs\n Dirs:"
for subdir in self.m_output_dirs:
mes += "\n {:s}".format(subdir)
mes += "\n Exts:{:s}".format(str(self.m_output_exts))
mes += "\n Dims:{:s}".format(str(self.m_output_dims))
mes += "\n Reso:{:s}".format(str(self.m_output_reso))
mes += "\n Norm:{:s}".format(str(self.m_output_norm))
if self.m_opt_wav_handler > 0:
# wav handler
if len(self.m_input_exts) == 1 \
and self.m_input_exts[0][-3:] == 'wav':
mes += "\n Waveform silence handler will be used on input"
else:
mes += "\n Waveform silence handler NOT used on input"
if len(self.m_input_exts) > 1:
mes += "\t because multiple input features are used"
if len(self.m_output_exts) == 1 \
and self.m_output_exts[0][-3:] == 'wav':
mes += "\n Waveform silence handler will be used on output"
else:
mes += "\n Waveform silence handler NOT used on output"
if len(self.m_output_exts) > 1:
mes += "\t because multiple output features are used"
if self.m_inaug_funcs:
mes += "\n Use input feature transformation functions"
if len(self.m_input_exts) > 1:
mes += "\n Functions that change data length are ignored"
mes += "\n If it is intend to change data length, "
mes += "\n please use inoutput_augment_func"
if self.m_ouaug_funcs:
mes += "\n Use output feature transformation functions"
if len(self.m_output_exts) > 1:
mes += "\n Functions that change data length are ignored"
mes += "\n If it is intend to change data length, "
mes += "\n please use inoutput_augment_func"
if self.m_inouaug_func:
mes += "\n Use a unified function to alter input and output data"
if self.m_flag_reverse_load_order:
mes += "\n Reverse the data loading order from dataset "
nii_warn.f_print_message(mes)
return
def f_calculate_stats(self, flag_cal_data_len, flag_cal_mean_std):
""" f_calculate_stats
Log down the number of time steps for each file
Calculate the mean/std
"""
# check
#if not self.m_output_dirs:
# nii_warn.f_print("Calculating mean/std", 'error')
# nii_warn.f_die("But output_dirs is not provided")
# prepare the directory, extension, and dimensions
tmp_dirs = self.m_input_dirs.copy()
tmp_exts = self.m_input_exts.copy()
tmp_dims = self.m_input_dims.copy()
tmp_reso = self.m_input_reso.copy()
tmp_norm = self.m_input_norm.copy()
tmp_dirs.extend(self.m_output_dirs)
tmp_exts.extend(self.m_output_exts)
tmp_dims.extend(self.m_output_dims)
tmp_reso.extend(self.m_output_reso)
tmp_norm.extend(self.m_output_norm)
# starting dimension of one type of feature
s_dim = 0
# ending dimension of one type of feature
e_dim = 0
# print information
load_cnt = 0
total_cnt = len(tmp_dirs) * len(self.m_file_list)
# print progress
nii_warn.f_print("Get data statistis (may be slow due to data I/O)")
bar_len = 50
loading_marker = total_cnt // bar_len + 1
nii_warn.f_print("".join(['-' for x in range(bar_len-2)])+">|", 'plain')
# list of invalid data
invalid_data_lst = []
# loop over each input/output feature type
for t_dir, t_ext, t_dim, t_reso, t_norm in \
zip(tmp_dirs, tmp_exts, tmp_dims, tmp_reso, tmp_norm):
s_dim = e_dim
e_dim = s_dim + t_dim
t_cnt = 0
mean_i, var_i = np.zeros([t_dim]), np.zeros([t_dim])
# loop over all the data
for file_name in self.m_file_list:
load_cnt += 1
if load_cnt % loading_marker == 0:
nii_warn.f_print('>', end='', flush=True, opt='')
# get file path
file_path = nii_str_tk.f_realpath(t_dir, file_name, t_ext)
if not nii_io_tk.file_exist(file_path):
nii_warn.f_die("%s not found" % (file_path))
# read the length of the data
if flag_cal_data_len:
t_len = self.f_length_data(file_path) // t_dim
if not self.f_log_data_len(file_name, t_len, t_reso):
# this data is not valid, ignore it
# but it is OK to use it to compute mean/std
invalid_data_lst.append(file_name)
# accumulate the mean/std recursively
if flag_cal_mean_std:
t_data = self.f_load_data(file_path, t_dim)
# if the is F0 data, only consider voiced data
if t_ext in nii_dconf.f0_unvoiced_dic:
unvoiced_value = nii_dconf.f0_unvoiced_dic[t_ext]
t_data = t_data[t_data > unvoiced_value]
# mean_i, var_i, t_cnt will be updated using online
# accumulation method
mean_i, var_i, t_cnt = nii_stats.f_online_mean_std(
t_data, mean_i, var_i, t_cnt)
# save mean and std for one feature type
if flag_cal_mean_std:
# if not normalize this dimension, set mean=0, std=1
if not t_norm:
mean_i[:] = 0
var_i[:] = 1
if s_dim < self.m_input_all_dim:
self.m_input_mean[s_dim:e_dim] = mean_i
std_i = nii_stats.f_var2std(var_i)
self.m_input_std[s_dim:e_dim] = std_i
else:
tmp_s = s_dim - self.m_input_all_dim
tmp_e = e_dim - self.m_input_all_dim
self.m_output_mean[tmp_s:tmp_e] = mean_i
std_i = nii_stats.f_var2std(var_i)
self.m_output_std[tmp_s:tmp_e] = std_i
if flag_cal_data_len:
# remove invalid data (remove duplicated entries first)
invalid_data_lst = list(set(invalid_data_lst))
for tmp_file_name in invalid_data_lst:
self.m_data_length.pop(tmp_file_name)
#
self.f_precheck_data_length()
# create seq_info
self.f_log_seq_info()
# save len information
self.f_save_data_len(self.m_data_len_path)
if flag_cal_mean_std:
self.f_save_mean_std(self.m_ms_input_path,
self.m_ms_output_path)
nii_warn.f_print('')
# done
return
def f_putitem(self, output_data, save_dir, filename_prefix, data_infor_str):
"""
"""
# Change the dimension to (length, dim)
if output_data.ndim == 3 and output_data.shape[0] == 1:
# When input data is (batchsize=1, length, dim)
output_data = output_data[0]
elif output_data.ndim == 2 and output_data.shape[0] == 1:
# When input data is (batchsize=1, length)
output_data = np.expand_dims(output_data[0], -1)
else:
nii_warn.f_print("Output data format not supported.", "error")
nii_warn.f_print("Format is not (batch, len, dim)", "error")
nii_warn.f_die("Please use batch_size = 1 in generation")
# Save output
if output_data.shape[1] != self.m_output_all_dim:
nii_warn.f_print("Output data dim != expected dim", "error")
nii_warn.f_print("Output:%d" % (output_data.shape[1]), \
"error")
nii_warn.f_print("Expected:%d" % (self.m_output_all_dim), \
"error")
nii_warn.f_die("Please check configuration")
if not os.path.isdir(save_dir):
try:
os.makedirs(save_dir, exist_ok=True)
except OSError:
nii_warn.f_die("Cannot carete {}".format(save_dir))
# read the sentence information
tmp_seq_info = nii_seqinfo.SeqInfo()
tmp_seq_info.parse_from_str(data_infor_str)
# write the data
file_name = tmp_seq_info.seq_tag()
if len(filename_prefix):
file_name = filename_prefix + file_name
seq_length = tmp_seq_info.seq_length()
s_dim = 0
e_dim = 0
for t_ext, t_dim, t_reso in \
zip(self.m_output_exts, self.m_output_dims, self.m_output_reso):
e_dim = s_dim + t_dim
file_path = nii_str_tk.f_realpath(save_dir, file_name, t_ext)
# if this file_name contains part of the path, make sure that the
# parent folder has been created
if self.flag_filename_with_path:
tmp_save_dir = os.path.dirname(file_path)
if not os.path.isdir(tmp_save_dir):
try:
os.makedirs(tmp_save_dir, exist_ok=True)
except OSError:
nii_warn.f_die("Cannot carete {}".format(tmp_save_dir))
# check the length and write the data
if seq_length > 0:
expect_len = seq_length // t_reso
# confirm that the generated file length is as expected
if output_data.shape[0] < expect_len:
nii_warn.f_print("Warning {:s}".format(file_path), "error")
nii_warn.f_print("Generated data is shorter than expected")
nii_warn.f_print("Please check the generated file")
if s_dim == 0 and e_dim == output_data.shape[1]:
# if there is only one output feature, directly output it
self.f_write_data(output_data[:expect_len], file_path)
else:
# else, output the corresponding dimentions
self.f_write_data(output_data[:expect_len, s_dim:e_dim],
file_path)
elif seq_length == 0:
# if seq_length == 0, this is for unaligned input
if s_dim == 0 and e_dim == output_data.shape[1]:
self.f_write_data(output_data, file_path)
else:
self.f_write_data(output_data[s_dim:e_dim], file_path)
else:
nii_warn.f_die("Error: seq_length < 0 in generation")
return
def f_input_dim(self):
"""
f_input_dim()
return the total dimension of input features
"""
return self.m_input_all_dim
def f_output_dim(self):
"""
f_output_dim
return the total dimension of output features
"""
return self.m_output_all_dim
def f_adjust_idx(self, data_tuple, idx_shift):
"""
f_adjust_idx
This is to be used by customize_dataset for idx adjustment.
When multiple data sets are merged, the idx from __getitem__
should be adjusted.
Only data_io itselts knows how to identify idx from the output of
__getitem__, we need to define the function here
"""
if isinstance(data_tuple[-1], list) \
or isinstance(data_tuple[-1], torch.Tensor):
# if data_tuple has been collated
for idx in np.arange(len(data_tuple[-1])):
data_tuple[-1][idx] += idx_shift
else:
# if data_tuple is from __getitem()__
data_tuple = (data_tuple[0], data_tuple[1],
data_tuple[2], data_tuple[-1] + idx_shift)
return data_tuple
def f_manage_data(self, idx, opt):
"""
f_mange_seq(self, idx)
Args:
idx: list of int, list of data indices
opt: 'keep', keep only data in idx
'delete', delete data in idx
"""
if type(idx) is not list:
nii_warn.f_die("f_delete_seq(idx) expects idx to be list")
# get a new list of data for this database
if opt == 'delete':
# convert to set of int
idx_set = set([int(x) for x in idx])
tmp_idx = [x for x in range(self.__len__()) if x not in idx_set]
else:
tmp_idx = [int(x) for x in idx]
# keep the specified data indices
self.m_seq_info = [self.m_seq_info[x] for x in tmp_idx \
if x < self.__len__() and x >= 0]
# re-compute the total length of data
self.m_data_total_length = self.f_sum_data_length()
return
def f_get_seq_name_list(self):
""" return list of data of names in the dataset
"""
return [x.seq_tag() for x in self.m_seq_info]
def f_get_seq_info(self):
return [x.print_to_str() for x in self.m_seq_info]
def f_get_seq_idx_from_name(self, data_names):
""" return the data index given the data names
This function is not used so often.
"""
data_list = self.f_get_seq_name_list()
try:
return [data_list.index(x) for x in data_names]
except ValueError:
nii_warn.f_print("Not all data names are in this dataset")
nii_warn.f_print("Return []")
return []
class NIIDataSetLoader:
""" NIIDataSetLoader:
A wrapper over torch.utils.data.DataLoader and DataSet
self.m_dataset will be the dataset
self.m_loader will be the dataloader
"""
def __init__(self,
dataset_name, \
file_list, \
input_dirs, input_exts, input_dims, input_reso, \
input_norm, \
output_dirs, output_exts, output_dims, output_reso, \
output_norm, \
stats_path, \
data_format = nii_dconf.h_dtype_str, \
params = None, \
truncate_seq = None, \
min_seq_len = None,
save_mean_std = True, \
wav_samp_rate = None, \
flag_lang = 'EN',
global_arg = None,
dset_config = None,
input_augment_funcs = None,
output_augment_funcs = None,
inoutput_augment_func = None):
"""
NIIDataSetLoader(
data_set_name,
file_list,
input_dirs, input_exts, input_dims, input_reso, input_norm,
output_dirs, output_exts, output_dims, output_reso, output_norm,
stats_path,
data_format = '<f4',
params = None,
truncate_seq = None,
min_seq_len = None,
save_mean_std = True, \
wav_samp_rate = None, \
flag_lang = 'EN',
global_arg = None,
dset_config = None,
input_augment_funcs = None,
output_augment_funcs = None,
inoutput_augment_func = None):
Args
----
data_set_name: a string to name this dataset
this will be used to name the statistics files
such as the mean/std for this dataset
file_list: a list of file name strings (without extension)
or, path to the file that contains the file names
input_dirs: a list of dirs from which input feature is loaded
input_exts: a list of input feature name extentions
input_dims: a list of input feature dimensions
input_reso: a list of input feature temporal resolution,
or None
input_norm: a list of bool, whether normalize input feature or not
output_dirs: a list of dirs from which output feature is loaded
output_exts: a list of output feature name extentions
output_dims: a list of output feature dimensions
output_reso: a list of output feature temporal resolution,
or None
output_norm: a list of bool, whether normalize target feature or not
stats_path: path to the directory of statistics(mean/std)
data_format: method to load the data
'<f4' (default): load data as float32m little-endian
'htk': load data as htk format
params: parameter for torch.utils.data.DataLoader
truncate_seq: None or int,
truncate data sequence into smaller truncks
truncate_seq > 0 specifies the trunck length
min_seq_len: None (default) or int, minimum length of an utterance
utterance shorter than min_seq_len will be ignored
save_mean_std: bool, True (default): save mean and std
wav_samp_rate: None (default) or int, if input data has waveform,
please set sampling rate. It is used by _data_writer
flag_lang: str, 'EN' (default), if input data has text, text will
be converted into code indices. flag_lang indicates the
language for the text processer, used by _data_reader
global_arg: argument parser returned by arg_parse.f_args_parsed()
default None
input_augment_funcs: list of functions for input data augmentation,
default None
output_augment_funcs: list of functions for output data augmentation
default None
inoutput_augment_func: a single data augmentation function
default None
Methods
-------
get_loader(): return a torch.util.data.DataLoader
get_dataset(): return a torch.util.data.DataSet
"""
nii_warn.f_print_w_date("Loading dataset %s" % (dataset_name),
level="h")
# create torch.util.data.DataSet
self.m_dataset = NIIDataSet(dataset_name, \
file_list, \
input_dirs, input_exts, \
input_dims, input_reso, \
input_norm, \
output_dirs, output_exts, \
output_dims, output_reso, \
output_norm, \
stats_path, data_format, \
truncate_seq, min_seq_len,\
save_mean_std, \
wav_samp_rate, \
flag_lang, \
global_arg,\
dset_config, \
input_augment_funcs,
output_augment_funcs,
inoutput_augment_func)
# create torch.util.data.DataLoader
if params is None:
tmp_params = nii_dconf.default_loader_conf
else:
tmp_params = params.copy()
# save parameters
self.m_params = tmp_params
# create data loader
self.m_loader = self.build_loader()
# done
return
def build_loader(self):
"""
"""
# initialize sampler if necessary
tmp_params = self.m_params.copy()
if 'sampler' in tmp_params:
tmp_sampler = None
if tmp_params['sampler'] == nii_sampler_fn.g_str_sampler_bsbl:
if 'batch_size' in tmp_params and tmp_params['batch_size']>1:
# initialize the sampler
tmp_sampler = nii_sampler_fn.SamplerBlockShuffleByLen(
self.m_dataset.f_get_seq_len_list(),
tmp_params['batch_size'])
# turn off automatic shuffle
tmp_params['shuffle'] = False
else:
nii_warn.f_print("{:s} off as batch-size is 1".format(
nii_sampler_fn.g_str_sampler_bsbl))
#nii_warn.f_die("Sampler requires batch size > 1")
tmp_params['sampler'] = tmp_sampler
# collate function
if 'batch_size' in tmp_params and tmp_params['batch_size'] > 1:
# for batch-size > 1, use customize_collate to handle
# data with different length
collate_fn = nii_collate_fn.customize_collate
else:
collate_fn = None
# return the loader
return torch.utils.data.DataLoader(
self.m_dataset, collate_fn=collate_fn, **tmp_params)
def get_loader_params(self):
return self.m_params
def get_loader(self):
""" get_loader():
Return the dataLoader (torch.util.data.DataLoader)
"""
return self.m_loader
def get_dataset(self):
""" get_dataset():
Return the dataset (torch.util.data.Dataset)
"""
return self.m_dataset
def get_data_mean_std(self):
"""
"""
return self.m_dataset.f_get_mean_std_tuple()
def print_info(self):
"""
"""
self.m_dataset.f_print_info()
print(str(self.m_params))
return
def get_seq_name_list(self):
return self.m_dataset.f_get_seq_name_list()
def get_seq_info(self):
return self.m_dataset.f_get_seq_info()
def get_seq_idx_from_name(self, data_names):
return self.m_dataset.f_get_seq_idx_from_name(data_names)
def putitem(self, output_data, save_dir, filename_prefix, data_infor_str):
""" Decompose the output_data from network into
separate files
"""
self.m_dataset.f_putitem(output_data, save_dir, filename_prefix,
data_infor_str)
def get_in_dim(self):
""" Return the dimension of input features
"""
return self.m_dataset.f_input_dim()
def get_out_dim(self):
""" Return the dimension of output features
"""
return self.m_dataset.f_output_dim()
def get_seq_num(self):
""" Return the number of sequences (after truncation)
"""
return self.m_dataset.f_get_num_seq()
def adjust_utt_idx(self, data_tuple, utt_idx_shift):
""" Return data tuple with adjusted utterance index in merged dataset
This is used by customize_dataset.
"""
return self.m_dataset.f_adjust_idx(data_tuple, utt_idx_shift)
def manage_data(self, data_idx, opt):
"""
manage_data(self, data_idx)
Args:
data_idx: list of indices, samples with these indices will be deleted
opt: 'keep', keep only data in idx
'delete', delete data in idx
"""
# delete the data from dataset
self.m_dataset.f_delete_seq(data_idx, opt)
# rebuild dataloader
self.m_loader = self.build_loader()
return
def update_seq_len_in_sampler_sub(self, data_info):
"""
"""
data_idx = seq_info.parse_idx(one_info)
data_len = seq_info.parse_length(one_info)
self.m_dataset.f_update_seq_len_for_sampler_list(data_idx, data_len)
return
def update_seq_len_in_sampler(self):
"""update_seq_len()
Update sequence length if sequence length has been changed
(for example, during silence trim process)
This is necessary when using shuffle_by_seq_length sampler
and the sequences were trimmed in data augmentation function.
"""
# only useful for shuffle_by_seq_length sampler
if self.m_params['sampler'] == nii_sampler_fn.g_str_sampler_bsbl:
if hasattr(self.m_loader.sampler, 'update_seq_length'):
self.m_loader.sampler.update_seq_length(
self.m_dataset.f_get_updated_seq_len_for_sampler_list())
else:
print("Unknown error in update_seq_len_in_sampler")
sys.exit(1)
return
if __name__ == "__main__":
pass
| 79,969 | 42.39121 | 84 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/data_io/customize_sampler.py | #!/usr/bin/env python
"""
customized sampler
1. Block shuffler based on sequence length
Like BinnedLengthSampler in https://github.com/fatchord/WaveRNN
e.g., data length [1, 2, 3, 4, 5, 6] -> [3,1,2, 6,5,4] if block size =3
"""
from __future__ import absolute_import
import os
import sys
import numpy as np
import torch
import torch.utils.data
import torch.utils.data.sampler as torch_sampler
import core_scripts.math_tools.random_tools as nii_rand_tk
import core_scripts.other_tools.display as nii_warn
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
# name of the sampler
g_str_sampler_bsbl = 'block_shuffle_by_length'
###############################################
# Sampler definition
###############################################
class SamplerBlockShuffleByLen(torch_sampler.Sampler):
""" Sampler with block shuffle based on sequence length
e.g., data length [1, 2, 3, 4, 5, 6] -> [3,1,2, 6,5,4] if block size =3
"""
def __init__(self, buf_dataseq_length, batch_size):
""" SamplerBlockShuffleByLength(buf_dataseq_length, batch_size)
args
----
buf_dataseq_length: list or np.array of int,
length of each data in a dataset
batch_size: int, batch_size
"""
if batch_size == 1:
mes = "Sampler block shuffle by length requires batch-size>1"
nii_warn.f_die(mes)
# hyper-parameter, just let block_size = batch_size * 3
self.m_block_size = batch_size * 4
# idx sorted based on sequence length
self.m_idx = np.argsort(buf_dataseq_length)
return
def __iter__(self):
""" Return a iterator to be iterated.
"""
tmp_list = list(self.m_idx.copy())
# shuffle within each block
# e.g., [1,2,3,4,5,6], block_size=3 -> [3,1,2,5,4,6]
nii_rand_tk.f_shuffle_in_block_inplace(tmp_list, self.m_block_size)
# shuffle blocks
# e.g., [3,1,2,5,4,6], block_size=3 -> [5,4,6,3,1,2]
nii_rand_tk.f_shuffle_blocks_inplace(tmp_list, self.m_block_size)
# return a iterator, list is iterable but not a iterator
# https://www.programiz.com/python-programming/iterator
return iter(tmp_list)
def __len__(self):
""" Sampler requires __len__
https://pytorch.org/docs/stable/data.html#torch.utils.data.Sampler
"""
return len(self.m_idx)
def update_seq_length(self, buf_dataseq_length):
"""Update sequence length if necessary
This will resort the sequences based on updated sequence length
"""
if len(buf_dataseq_length) == len(self.m_idx):
self.m_idx = np.argsort(buf_dataseq_length)
else:
print("Incompatible sequence length input: updata_seq_length")
sys.exit(1)
return
if __name__ == "__main__":
print("Definition of customized_sampler")
| 2,994 | 30.861702 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/data_io/dsp_tools.py | #!/usr/bin/env python
"""
dsp_tools
Interface to process waveforms with DSP tools
Note that functions here are based on numpy, and they are intended to be used
before data are converted into torch tensors.
data on disk -> DataSet.__getitem__() -----> Collate ----> Pytorch model
numpy.tensor torch.tensor
These functions don't work on pytorch tensors
"""
from __future__ import absolute_import
import os
import sys
import numpy as np
import scipy
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
class Melspec(object):
"""Melspec
A simple class to produce and invert mel-spectrogram
Note that this not compatible with librosa.melspec
Most of the API is written by Dr. Shinji Takaki
"""
def __init__(self, sf=16000, fl=400, fs=80, fftl=1024, mfbsize=80,
melmin=0, melmax=None, ver=1):
"""Melspec(sf, fl, fs, fftl, mfbsize, melmin, melmax)
Args
----
sf: int, sampling rate
fl: int, frame length (number of waveform points)
fs: int, frame shift
fftl: int, FFT points
mfbsize: int, mel-filter bank size
melmin: float, lowest freq. covered by mel-filter bank, default 0
melmax: float, highest freq. covered by mel-filter bank, default sf/2
Note
----
configuration for Voiceprivacy challenge:
dsp_tools.Melspec(fftl=1024, fl=400, fs=160, ver=2)
"""
#
self.ver = ver
# sampling rate
self.sf = sf
# frame length
self.fl = fl
# frame shift
self.fs = fs
# fft length
self.fftl = fftl
# mfbsize
self.mfbsize = mfbsize
# mel.min frequency (in Hz)
self.melmin = melmin
# mel.max frequency (in Hz)
if melmax is None:
self.melmax = sf/2
else:
self.melmax = melmax
# windows
self.window = np.square(np.blackman(self.fl).astype(np.float32))
winpower = np.sqrt(np.sum(self.window))
if self.ver == 2:
self.window = np.blackman(self.fl).astype(np.float32) / winpower
else:
self.window = self.window / winpower
# create mel-filter bank
self.melfb = self._melfbank(self.melmin, self.melmax)
# eps = 1.0E-12
self.eps = 1.0E-12
return
def _freq2mel(self, freq):
return 1127.01048 * np.log(freq / 700.0 + 1.0)
def _mel2freq(self, mel):
return (np.exp(mel / 1127.01048) - 1.0) * 700.0
def _melfbank(self, melmin, melmax):
linear_freq = 1000.0
mfbsize = self.mfbsize - 1
bFreq = np.linspace(0, self.sf / 2.0, self.fftl//2 + 1,
dtype=np.float32)
minMel = self._freq2mel(melmin)
maxMel = self._freq2mel(melmax)
iFreq = self._mel2freq(np.linspace(minMel, maxMel, mfbsize + 2,
dtype=np.float32))
linear_dim = np.where(iFreq<linear_freq)[0].size
iFreq[:linear_dim+1] = np.linspace(iFreq[0], iFreq[linear_dim],
linear_dim+1)
diff = np.diff(iFreq)
so = np.subtract.outer(iFreq, bFreq)
lower = -so[:mfbsize] / np.expand_dims(diff[:mfbsize], 1)
upper = so[2:] / np.expand_dims(diff[1:], 1)
fb = np.maximum(0, np.minimum(lower, upper))
enorm = 2.0 / (iFreq[2:mfbsize+2] - iFreq[:mfbsize])
fb *= enorm[:, np.newaxis]
fb0 = np.hstack([np.array(2.0*(self.fftl//2)/self.sf, np.float32),
np.zeros(self.fftl//2, np.float32)])
fb = np.vstack([fb0, fb])
return fb
def _melfbank_pinv(self, melfb):
"""get the pseudo inverse of melfb
"""
return
def _frame(self, X):
"""framing
"""
X = np.concatenate([np.zeros(self.fl//2, np.float32), X,
np.zeros(self.fl//2, np.float32)])
frame_num = (X.shape[0] - self.fl) // self.fs + 1
F = np.zeros([frame_num, self.fl])
for frame_idx in np.arange(frame_num):
F[frame_idx, :] = X[frame_idx*self.fs : frame_idx*self.fs+self.fl]
return F
def _anawindow(self, F):
W = F * self.window
return W
def _rfft(self, W):
Y = np.fft.rfft(W, n=self.fftl).astype(np.complex64)
return Y
def _amplitude(self, Y):
A = np.fmax(np.absolute(Y), self.eps)
return A
def _logmelfbspec(self, A):
M = np.log(np.dot(A, self.melfb.T))
return M
def _preprocess(self, X):
if self.ver == 2:
# in ver2, assume wave in 16 bits
return X * np.power(2, 15)
else:
return X
def analyze(self, X):
"""Mel = analysze(X)
input: X, np.array, waveform data, (length, )
output: Mel, np.array, melspec., (frame_length, melfb_size)
"""
X = self._preprocess(X)
M = self._amplitude(self._rfft(self._anawindow(self._frame(X))))
M = self._logmelfbspec(M)
return M
class LPClite(object):
""" A lite LPC analyzer & synthesizr
Note that this is based on numpy, not Pytorch
It can be used for pre-processing when loading data, or use
it as data transformation function
(see message at top)
Example:
# load waveform
sr, wav = wav_tools.waveReadAsFloat(wav_path)
m_lpc = LPClite(320, 80)
# LPC analysis
lpc_coef, _, rc, gain, err, err_overlapped = m_lpc.analysis(wav)
# LPC synthesis
wav_re = m_lpc.synthesis(lpc_coef, err, gain)
# rc to LPC
lpc_coef_tmp = m_lpc._rc2lpc(lpc_coef)
np.std(lpc_coef_tmp - lpc_coef)
"""
def __init__(self, fl=320, fs=80, order=29, window='blackman',
flag_emph=True, emph_coef=0.97):
"""LPClite(fl=320, fs=80, order=30, window='blackman')
Args
----
fl: int, frame length
fs: int, frame shift
order: int, order of LPC, [1, a_1, a_2, ..., a_order]
window: str, 'blackman' or 'hanning'
flag_emph: bool, whether use pre-emphasis (default True)
emph_coef: float, coefficit for pre-emphasis filter (default 0.97)
Note that LPC model is defined as:
1 Gain
-------- ---------------------------------------------
1- bz^-1 a_0 + a_1 z^-1 + ... + a_order z^-(order)
b = emph_coef if flag_emph is True
b = 0 otherwise
"""
self.fl = fl
self.fs = fs
#
self.order = order
self.flag_emph = flag_emph
self.emph_coef = emph_coef
if np.abs(emph_coef) >= 1.0:
print("Warning: emphasis coef {:f} set to 0.97".format(emph_coef))
self.emph_coef = 0.97
if window == 'hanning':
self.win = np.hanning(self.fl)
else:
self.win = np.blackman(self.fl)
return
def analysis(self, wav):
"""lpc_coef, ld_err, gamma, gain, framed_err, err_signal = analysis(wav)
LPC analysis on each frame
input
-----
wav: np.array, (length, 1)
output
------
lpc_coef: np.array, LPC coeff, (frame_num, lpc_order + 1)
ld_err: np.array, LD analysis error, (frame_num, lpc_order + 1)
gamma: np.array, reflection coefficients, (frame_num,lpc_order)
gain: np.array, gain, (frame_num, 1)
framed_err: np.array, LPC error per frame, (frame_num, frame_length)
eer_signal: np.array, overlap-added excitation (length, 1)
Note that framed_err is the excitation signal from LPC analysis on each
frame. eer_signal is the overlap-added excitation signal.
"""
if self.flag_emph:
wav_tmp = self._preemphasis(wav)
else:
wav_tmp = wav
# framing & windowing
frame_wined = self._windowing(self._framing(wav_tmp[:, 0]))
# auto-correlation
auto = self._auto_correlation(frame_wined)
# LD analysis
lpc_coef, lpc_err, gamma_array, gain = self._levison_durbin(auto)
# get LPC excitation signals in each frame
framed_err = self._lpc_analysis_core(lpc_coef, frame_wined, gain)
# overlap-add for excitation signal
err_signal = self._overlapadd(framed_err)
return lpc_coef, lpc_err, gamma_array, gain, framed_err, err_signal
def synthesis(self, lpc_coef, framed_err, gain):
"""wav = synthesis(lpc_coef, framed_err, gain):
LPC synthesis (and overlap-add)
input
-----
lpc_coef: np.array, LPC coeff, (frame_num, lpc_order + 1)
framed_err: np.array, LPC excitations, (frame_num, frame_length)
gain: np.array, LPC gain, (frame_num, 1)
output
------
wav: np.array, (length, 1)
This function does LPC synthesis in each frame and create
the output waveform by overlap-adding
"""
framed_x = self._lpc_synthesis_core(lpc_coef, framed_err, gain)
wav_tmp = self._overlapadd(framed_x)
if self.flag_emph:
wav_tmp = self._deemphasis(wav_tmp)
return wav_tmp
def _preemphasis(self, wav):
""" wav_out = _preemphasis(wav)
input
-----
wav: np.array, (length)
output
------
wav: np.array, (length)
"""
wav_out = np.zeros_like(wav) + wav
wav_out[1:] = wav_out[1:] - wav_out[0:-1] * self.emph_coef
return wav_out
def _deemphasis(self, wav):
""" wav_out = _deemphasis(wav)
input
-----
wav: np.array, (length)
output
------
wav: np.array, (length)
"""
wav_out = np.zeros_like(wav) + wav
for idx in range(1, wav.shape[0]):
wav_out[idx] = wav_out[idx] + wav_out[idx-1] * self.emph_coef
return wav_out
def _framing(self, wav):
"""F = _framed(wav)
Framing the signal
input
-----
wav: np.array, (length)
output
------
F: np.array, (frame_num, frame_length)
"""
frame_num = (wav.shape[0] - self.fl) // self.fs + 1
F = np.zeros([frame_num, self.fl], dtype=wav.dtype)
for frame_idx in np.arange(frame_num):
F[frame_idx, :] = wav[frame_idx*self.fs : frame_idx*self.fs+self.fl]
return F
def _windowing(self, framed_x):
"""windowing
"""
return framed_x * self.win
def _overlapadd(self, framed_x):
"""wav = _overlapadd(framed_x)
Do overlap-add on framed (and windowed) signal
input
-----
framed_x: np.array, (frame_num, frame_length)
output
------
wav: np.array, (length, 1)
length = (frame_num - 1) * frame_shift + frame_length
"""
# waveform length
wavlen = (framed_x.shape[0] - 1) * self.fs + self.fl
wavbuf = np.zeros([wavlen])
# buf to save overlapped windows (to normalize the signal amplitude)
protobuf = np.zeros([wavlen])
win_prototype = self._windowing(self._framing(np.ones_like(protobuf)))
# overlap and add
for idx in range(framed_x.shape[0]):
frame_s = idx * self.fs
wavbuf[frame_s : frame_s + self.fl] += framed_x[idx]
protobuf[frame_s : frame_s + self.fl] += win_prototype[idx]
# remove the impact of overlapped windows
#protobuf[protobuf<1e-05] = 1.0
wavbuf = wavbuf / protobuf.mean()
return np.expand_dims(wavbuf, axis=1)
def _lpc_analysis_core(self, lpc_coef, framed_x, gain):
"""framed_err = _lpc_analysis_core(lpc_coef, framed_x, gain)
LPC analysis on frame
MA filtering: e[n] = \sum_k=0 a_k x[n-k] / gain
input
-----
lpc_coef: np.array, (frame_num, order + 1)
framed_x: np.array, (frame_num, frame_length)
gain: np.array, (frame_num, 1)
output
------
framed_err: np.array, (frame_num, frame_length)
Note that lpc_coef[n, :] = (1, a_1, a_2, ..., a_order) for n-th frame
framed_x[n, :] = (x[0], x[1], ..., x[frame_len]) for n-th frame
"""
#
frame_num = framed_x.shape[0]
frame_len = framed_x.shape[1]
# lpc order (without the a_0 term)
order = lpc_coef.shape[1] - 1
# pad zero, every frame has [0, ..., 0, x[0], x[1], ..., x[frame_len]]
tmp_framed = np.concatenate(
[np.zeros([frame_num, order + 1]), framed_x], axis=1)
# flip to (x[frame_len], ... x[1], x[0], 0, ..., 0)
tmp_framed = tmp_framed[:, ::-1]
# LPC excitation buffer
framed_err = np.zeros_like(framed_x)
# e[n] = \sum_k=0 a[k] x[n-k]
# do this for all frames and n simultaneously
for k in range(self.order + 1):
# a[k]
tmp_coef = lpc_coef[:, k:k+1]
# For each frame
# RHS = [x[n-k], x[n-k-1], ..., ] * a[k]
#
# By doing this for k in [0, order]
# LHS = [e[n], e[n-1], ...]
# [x[n-0], x[n-0-1], ..., ] * a[0]
# + [x[n-1], x[n-1-1], ..., ] * a[1]
# + [x[n-2], x[n-2-1], ..., ] * a[2]
# + ...
# We get the excitation for one frame
# This process is conducted for all frames at the same time
framed_err += tmp_framed[:, 0:frame_len] * tmp_coef
# roll to [x[n-k-1], x[n-k-2], ..., ]
tmp_framed = np.roll(tmp_framed, -1, axis=1)
# revese to (e[0], e[1], ..., e[frame_len])
return framed_err[:, ::-1] / gain
def _lpc_synthesis_core(self, lpc_coef, framed_err, gain):
"""framed_x = _lpc_synthesis_core(lpc_coef, framed_err, gain)
AR filtering: x[n] = gain * e[n] - \sum_k=0 a_k x[n-k]
LPC synthesis on frame
input
-----
lpc_coef: np.array, (frame_num, order + 1)
framed_err: np.array, (frame_num, frame_length)
gain: np.array, (frame_num, 1)
output
------
framed_x: np.array, (frame_num, frame_length)
Note that
lpc_coef[n, :] = (1, a_1, a_2, ..., a_order), for n-th frame
framed_x[n, :] = (x[0], x[1], ..., x[frame_len]), for n-th frame
"""
frame_num = framed_err.shape[0]
frame_len = framed_err.shape[1]
order = lpc_coef.shape[1] - 1
# pad zero
# the buffer looks like
# [[0, 0, 0, 0, 0, ... x[0], x[1], x[frame_length -1]], -> 1st frame
# [0, 0, 0, 0, 0, ... x[0], x[1], x[frame_length -1]], -> 2nd frame
# ...]
framed_x = np.concatenate(
[np.zeros([frame_num, order]), np.zeros_like(framed_err)], axis=1)
# flip the cofficients of each frame as [a_order, ..., a_1, 1]
lpc_coef_tmp = lpc_coef[:, ::-1]
# synthesis (all frames are down at the same time)
for idx in range(frame_len):
# idx+order so that it points to the shifted time idx
# idx+order
# [0, 0, 0, 0, 0, ... x[0], x[1], ... x[idx], ]
# gain * e[n]
framed_x[:, idx+order] = framed_err[:, idx] * gain[:, 0]
# [x[idx-1-order], ..., x[idx-1]] * [a_order, a_1]
pred = np.sum(framed_x[:, idx:idx+order] * lpc_coef_tmp[:, :-1],
axis=1)
# gain * e[n] - [x[idx-1-order], ..., x[idx-1]] * [a_order, a_1]
framed_x[:, idx+order] = framed_x[:, idx+order] - pred
# [0, 0, 0, 0, 0, ... x[0], x[1], ... ] -> [x[0], x[1], ...]
return framed_x[:, order:]
def _auto_correlation(self, framed_x):
""" autocorr = _auto_correlation(framed_x)
input
-----
framed_x: np.array, (frame_num, frame_length), frame windowed signal
output
------
autocorr: np.array, auto-correlation coeff (frame_num, lpc_order+1)
"""
# (frame_num, order)
autocor = np.zeros([framed_x.shape[0], self.order+1])
# loop and compute auto-corr (for all frames simultaneously)
for i in np.arange(self.order+1):
autocor[:, i] = np.sum(
framed_x[:, 0:self.fl-i] * framed_x[:, i:],
axis=1)
#print(autocor[0, i])
#autocor[:, i] = 0
#for idx in np.arange(self.fl):
# if (idx + i) < self.fl:
# autocor[:, i] += framed_x[:, idx] * framed_x[:, idx + i]
# else:
# break
#print(autocor[0, i])
# (frame_num, order)
return autocor
def _levison_durbin(self, autocor):
"""lpc_coef_ou, lpc_err, gamma_array, gain = _levison_durbin(autocor)
Levison durbin
input
-----
autocor: np.array, auto-correlation, (frame_num, lpc_order+1)
output
------
lpc_coef: np.array, LPC coefficients, (frame_num, lpc_order+1)
lpc_err: np.array, LPC error, (frame_num, lpc_order+1)
gamma: np.array, reflection coeff, (frame_num, lpc_order)
gain: np.array, gain, (frame_num, 1)
Note that lpc_coef[n] = (1, a_2, ... a_order) for n-th frame
"""
# (frame_num, order)
frame_num, order = autocor.shape
order = order - 1
polyOrder = order + 1
# to log down the invalid frames
tmp_order = np.zeros([frame_num], dtype=np.int32) + polyOrder
lpc_coef = np.zeros([frame_num, 2, polyOrder])
lpc_err = np.zeros([frame_num, polyOrder])
gamma_array = np.zeros([frame_num, order])
gain = np.zeros([frame_num])
lpc_err[:, 0] = autocor[:, 0]
lpc_coef[:, 0, 0] = 1.0
for index in np.arange(1, polyOrder):
lpc_coef[:, 1, index] = 1.0
# compute gamma
# step1.
gamma = np.sum(lpc_coef[:, 0, 0:(index)] * autocor[:, 1:(index+1)],
axis=1)
# step2. check validity of lpc_err
ill_idx = lpc_err[:,index-1] < 1e-07
# also frames that should have been stopped in previous iter
ill_idx = np.bitwise_or(ill_idx, tmp_order < polyOrder)
# step3. make invalid frame gamma=0
gamma[ill_idx] = 0
gamma[~ill_idx] = gamma[~ill_idx] / lpc_err[~ill_idx,index-1]
gamma_array[:, index-1] = gamma
# step4. log down the ill frames
tmp_order[ill_idx] = index
lpc_coef[:, 1, 0] = -1.0 * gamma
if index > 1:
lpc_coef[:, 1, 1:index] = lpc_coef[:, 0, 0:index-1] \
+ lpc_coef[:, 1, 0:1] * lpc_coef[:, 0, 0:index-1][:, ::-1]
lpc_err[:, index] = lpc_err[:, index-1] * (1 - gamma * gamma)
lpc_coef[:, 0, :] = lpc_coef[:, 1, :]
# flip to (1, a_1, ..., a_order)
lpc_coef = lpc_coef[:, 0, ::-1]
# output LPC coefficients
lpc_coef_ou = np.zeros([frame_num, polyOrder])
# if high-order LPC analysis is not working
# each frame may require a different truncation length
for idx in range(frame_num):
lpc_coef_ou[idx, 0:tmp_order[idx]] = lpc_coef[idx, 0:tmp_order[idx]]
# get the gain, when tmp_order = polyOrder, tmp_order-2 -> order-1,
# last element of the lpc_err buffer
gain = np.sqrt(lpc_err[np.arange(len(tmp_order)), tmp_order-2])
# if the gain is zero, it means analysis error is zero,
gain[gain < 1e-07] = 1.0
# (frame_num, order)
return lpc_coef_ou, lpc_err, gamma_array, np.expand_dims(gain, axis=1)
def _rc2lpc(self, rc):
"""lpc_coef = _rc2lpc(rc)
from reflection coefficients to LPC coefficients
forward Levinson recursion
input
-----
rc: np.array, (frame_num, lpc_order)
output
------
lpc_coef, np.array, (frame_num, lpc_order+1)
Note that LPC model is defined as:
Gain
---------------------------------------------
a_0 + a_1 z^-1 + ... + a_order z^-(order)
Thus, the reflection coefficitns [gamma_1, ... gamma_order]
"""
# (frame_num, order)
frame_num, order = rc.shape
polyOrder = order + 1
lpc_coef = np.zeros([frame_num, 2, polyOrder])
lpc_coef[:, 0, 0] = 1.0
for index in np.arange(1, polyOrder):
lpc_coef[:, 1, index] = 1.0
gamma = rc[:, index-1]
lpc_coef[:, 1, 0] = -1.0 * gamma
if index > 1:
lpc_coef[:, 1, 1:index] = lpc_coef[:, 0, 0:index-1] \
+ lpc_coef[:, 1, 0:1] * lpc_coef[:, 0, 0:index-1][:, ::-1]
lpc_coef[:, 0, :] = lpc_coef[:, 1,:]
lpc_coef = lpc_coef[:, 0, ::-1]
return lpc_coef
def f0resize(input_f0, input_reso, output_reso):
"""output_f0 = f0size(input_f0, input_reso, output_reso)
input
-----
input_f0: array, (length, )
input_reso: int, frame_shift, ms
output_reso: int, frame_shift, ms
output
------
output_f0: array, (length2, )
where length2 ~ np.ceil(length * input_reso / output_reso)
"""
# function to merge two f0 value
# average them unless there is u/v mismatch
def merge_f0(val1, val2):
if val1 < 1 and val2 < 1:
return (val1 + val2)/2
elif val1 < 1:
return val2
elif val2 < 1:
return val1
else:
return (val1 + val2)/2
def retrieve_f0(buf, idx):
if idx > 0 and idx < buf.shape[0]:
return buf[idx]
else:
return 0
# input length
input_len = input_f0.shape[0]
# output length
output_len = int(np.ceil(input_len * input_reso / output_reso))
# output buffer
output_f0 = np.zeros([output_len])
for idx in np.arange(output_len):
input_idx = idx * output_reso / input_reso
input_idx_left = int(np.floor(input_idx))
input_idx_right = int(np.ceil(input_idx))
# get the nearest value from input f0
val1 = retrieve_f0(input_f0, input_idx_left)
val2 = retrieve_f0(input_f0, input_idx_right)
output_f0[idx] = merge_f0(val1, val2)
return output_f0
def spectra_substraction(input1, input2, ratio=0.1,
frame_length = 512, frame_shift = 256, fft_n = 512):
"""
output = spectra_substraction(input1, input2, ratio=0.1,
frame_length = 512, frame_shift = 256, fft_n = 512)
input
-----
input1: array, (length1 ), waveform to be denoised
input2: array, (length2 ), waveform background noise
ratio: float, weight to average spectra of noise
frame_length, frame_shift, fft_n
output
------
output: array, (length 1)
"""
_, _, input_spec1 = scipy.signal.stft(
input1, nperseg = frame_length,
noverlap = frame_length - frame_shift, nfft=fft_n)
_, _, input_spec2 = scipy.signal.stft(
input1, nperseg = frame_length,
noverlap = frame_length - frame_shift, nfft=fft_n)
# ampltiude and phase
amp1 = np.abs(input_spec1)
pha1 = np.angle(input_spec1)
# nosie average spectrum
amp2 = np.abs(input_spec2)
amp2 = amp2.mean(axis=1, keepdims=1)
#idx = np.bitwise_and(amp1 > 0.0000001, amp2 > 0.0000001)
#amp_new = amp1
#amp_new[idx] = np.exp(np.log(amp1[idx]) - np.log((amp2[idx] * ratio)))
# spectra substraction
amp_new = amp1 - amp2 * ratio
# keep amplitude none-negative
amp_new[amp_new<0] = 0.0
# reconstruct
spec_new = amp_new * np.cos(pha1) + 1j * amp_new * np.sin(pha1)
_, output = scipy.signal.istft(
spec_new, nperseg=frame_length,
noverlap=frame_length - frame_shift, nfft = fft_n)
return output
def GriffinLim(sp_amp, n_iter, fl, fs, fft_n,
window='hann', momentum=0.99, init='rand'):
"""
wav = GriffinLim(sp_amp, n_iter, fl, fs, fft_n,
window='hann', momentum=0.99, init='rand')
Code based on librosa API.
input
-----
sp_amp: array, (frame, fft_n//2+1), spectrum amplitude (linear domain)
n_iter: int, number of GL iterations
fl: int, frame length
fs: int, frame shift
fft_n: int, number of FFT points,
window: str, default hann window
momentum: float, momentum for fast GL iteration default 0.99
init: str, initialization method of initial phase, default rand
output
------
wav: array, (length, ), reconstructed waveform
Example
-------
nfft = 512
fl = 512
fs = 256
_, _, data_stft = scipy.signal.stft(data1, window='hann', nperseg=fl,
noverlap=fl - fs, nfft = nfft)
data_stft = np.abs(data_stft)
wav = GriffinLim(data_stft, 32, fl, fs, nfft)
"""
def angle_to_complex(x):
return np.cos(x) + 1j * np.sin(x)
# check data shape
if sp_amp.shape[0] != fft_n // 2 + 1:
spec_amp = sp_amp.T
if spec_amp.shape[0] != fft_n // 2 + 1:
print("Input sp_amp has shape {:s}".format(str(sp_amp)))
print("FFT bin number is {:d}, incompatible".format(fft_n))
else:
spec_amp = sp_amp
# small value
eps = 0.0000001
# buffer for angles
angles = np.zeros(spec_amp.shape, dtype=np.complex64)
# initialize phase
if init == "rand":
angles[:] = angle_to_complex(2*np.pi * np.random.rand(*spec_amp.shape))
else:
angles[:] = 1.0
# Place-holders for temporary data and reconstructed buffer
rebuilt = None
tprev = None
inverse = None
# Absorb magnitudes into angles
angles *= spec_amp
for _ in range(n_iter):
# Invert
_, inverse = scipy.signal.istft(angles, window = window,
nperseg=fl, noverlap=fl - fs, nfft = fft_n)
# rebuild
_, _, rebuilt = scipy.signal.stft(inverse, window = window,
nperseg=fl, noverlap=fl - fs, nfft = fft_n)
# update
angles[:] = rebuilt
if tprev is not None:
angles -= (momentum / (1 + momentum)) * tprev
angles /= np.abs(angles) + eps
angles *= spec_amp
#
rebuilt, tprev = tprev, rebuilt
# reconstruct
_, wav = scipy.signal.istft(angles, window = window,
nperseg=fl, noverlap=fl - fs, nfft = fft_n)
return wav
def warp_function_bilinear(normed_freq, alpha=0):
""" warped_freq = warp_function_quadratic(normed_freq)
Frequency warping using bi-linear function
input
-----
normed_freq: np.array, (N, ), normalized frequency values
between 0 and pi
alpha: float, warping coefficient. alpha=0 means no warping
output
------
warped_freq: np.array, (N, ), warpped normalized frequency
Example
-------
orig_rad = np.arange(0, 512)/512 * np.pi
warp_rad = warp_function_bilinear(orig_rad, alpha=0.3)
"""
if np.any(normed_freq < 0) or np.any(normed_freq > np.pi):
print("Input freq. out of range")
sys.exit(1)
nom = (1 - alpha * alpha) * np.sin(normed_freq)
den = (1 + alpha * alpha) * np.cos(normed_freq) - 2 * alpha
output = np.arctan(nom / den)
output[output < 0] = output[output < 0] + np.pi
return output
def warp_interpolation(spec, alpha, warp_func=None):
"""output = wrap_interpolation(spec, spec)
Do frequency Warping and linear interpolation of spectrum.
This is used for Vocal-tract pertubation
input
-----
spec: spectra evelope, (L, N), where L is the frame number
alpha: float, coefficients for warping
warp_func: a warp function,
if None, we will use warp_function_bilinear in dsp_tools.py
output
------
output: spectra evelope, (L, N), where L is the frame number
Example
-------
# let us vocal-tract length perturbation
# let's do warping on spectrum envelope
# we use pyworld to extract spectrum envelope
import pyworld as pw
x, sf = some_waveread_function(audio_file)
# World analysis
_f0, t = pw.dio(x, sf) # raw pitch extractor
f0 = pw.stonemask(x, _f0, t, sf) # pitch refinement
sp = pw.cheaptrick(x, f0, t, sf) # extract smoothed spectrogram
ap = pw.d4c(x, f0, t, sf) # extract aperiodicity
# Synthesis without warpping
y = pw.synthesize(f0, sp, ap, sf)
# Synthesis after warpping
alpha = 0.1
sp_wrapped = warp_interpolation(sp, warp_function_bilinear, alpha)
ap_wrapped = warp_interpolation(ap, warp_function_bilinear, alpha)
y_wrapped = pw.synthesize(f0, sp_wrapped, ap_wrapped, sf)
# please listen and compare y and y_wrapped
"""
nbins = spec.shape[1]
orig_rad = np.arange(0, nbins) / nbins * np.pi
warp_rad = warp_func(orig_rad, alpha=alpha)
if np.mean(np.abs(warp_rad - orig_rad)) < 0.0001:
return spec
else:
output = np.zeros_like(spec)
for rad_idx in np.arange(nbins):
warp = warp_rad[rad_idx]
warp_idx = warp / np.pi * nbins
idx_left = int(np.floor(warp_idx))
idx_right = int(np.ceil(warp_idx))
if idx_left < 0:
idx_left = 0
if idx_right >= nbins:
idx_right = nbins - 1
if idx_left == idx_right:
w_l, w_r = 0.0, 1.0
else:
w_l = warp_idx - idx_left
w_r = idx_right - warp_idx
# weighted sum for interpolation
output[:,rad_idx] = spec[:,idx_left] * w_l + spec[:,idx_right] * w_r
return output
if __name__ == "__main__":
print("DSP tools using numpy")
# Example for downing LPC analysis
sr, data1 = wav_tools.waveReadAsFloat('media/arctic_a0001.wav')
m_lpc = LPClite(320, 80)
# LPC analysis
lpc_coef, _, rc, gain, err, err_overlapped = m_lpc.analysis(
np.expand_dims(data1, axis=1))
# LPC synthesis
wav_re = m_lpc.synthesis(lpc_coef, err, gain)
# excitation with Gain
excitation_new = m_lpc._overlapadd(err * gain)
# need to import
# from tutorials.plot_tools import plot_API
# from tutorials.plot_tools import plot_lib
plot_API.plot_API([wav_re[:, 0], data1,
err_overlapped[:, 0],
excitation_new[:, 0]],
plot_lib.plot_spec, 'v')
plot_API.plot_API([wav_re[:, 0] - err_overlapped[:, 0]],
plot_lib.plot_spec, 'single')
# RC to LPC
lpc_coef_tmp = m_lpc._rc2lpc(rc)
print(np.std(lpc_coef_tmp - lpc_coef))
| 32,385 | 31.680121 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/data_io/conf.py | #!/usr/bin/env python
"""
config.py
Configurations for data_io
"""
from __future__ import absolute_import
import os
import sys
import numpy as np
import torch
import torch.utils.data
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
# ---------------------------
# Numerical configuration
# ---------------------------
# data type for host
h_dtype = np.float32
# data type string format for numpy
h_dtype_str = '<f4'
# data type for device (GPU)
d_dtype = torch.float32
# std_floor
std_floor = 0.00000001
# ---------------------------
# File name configuration
# ---------------------------
# name of the mean/std file for input features
mean_std_i_file = 'mean_std_input.bin'
# name of the mean/std file for output features
mean_std_o_file = 'mean_std_output.bin'
# name of the the uttrerance length file
data_len_file = 'utt_length.dic'
# ---------------------------
# F0 extention and unvoiced value
# ---------------------------
# dictionary: key is F0 file extention, value is unvoiced value
f0_unvoiced_dic = {'.f0' : 0}
# ---------------------------
# Data configuration
# ---------------------------
# minimum length of data. Sequence shorter than this will be ignored
data_seq_min_length = 40
# default configuration for torch.DataLoader
default_loader_conf = {'batch_size':1, 'shuffle':False, 'num_workers':0}
| 1,383 | 21.688525 | 72 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/data_io/customize_collate_fn.py | #!/usr/bin/env python
"""
customize_collate_fn
Customized collate functions for DataLoader, based on
github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py
PyTorch is BSD-style licensed, as found in the LICENSE file.
"""
from __future__ import absolute_import
import os
import sys
import torch
import re
import collections
#from torch._six import container_abcs, string_classes, int_classes
from torch._six import string_classes
"""
The primary motivation is to handle batch of data with varied length.
Default default_collate cannot handle that because of stack:
github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py
Here we modify the default_collate to take into consideration of the
varied length of input sequences in a single batch.
Notice that the customize_collate_fn only pad the sequences.
For batch input to the RNN layers, additional pack_padded_sequence function is
necessary. For example, this collate_fn does something similar to line 56-66,
but not line 117 in this repo:
https://gist.github.com/HarshTrivedi/f4e7293e941b17d19058f6fb90ab0fec
"""
__author__ = "Xin Wang"
__email__ = "[email protected]"
np_str_obj_array_pattern = re.compile(r'[SaUO]')
customize_collate_err_msg = (
"customize_collate: batch must contain tensors, numpy arrays, numbers, "
"dicts or lists; found {}")
def pad_sequence(batch, padding_value=0.0):
""" output_batch = pad_sequence(batch)
input
-----
batch: list of tensor, [data_1, data2, ...], and data_1 is (len, dim, ...)
output
------
output_batch: list of tensor, [data_1_padded, data_2_padded, ...]
Pad a batch of data sequences to be same length (maximum length in batch).
This function is based on
pytorch.org/docs/stable/_modules/torch/nn/utils/rnn.html#pad_sequence.
Output list of tensor can be stacked into (batchsize, len, dim,...).
See customize_collate(batch) below
"""
# get the rest of the dimensions (dim, ...)
dim_size = batch[0].size()
trailing_dims = dim_size[1:]
# get the maximum length
max_len = max([s.size(0) for s in batch])
if all(x.shape[0] == max_len for x in batch):
# if all data sequences in batch have the same length, no need to pad
return batch
else:
# else, we need to pad
out_dims = (max_len, ) + trailing_dims
output_batch = []
for i, tensor in enumerate(batch):
# check the rest of dimensions
if tensor.size()[1:] != trailing_dims:
print("Data in batch has different dimensions:")
for data in batch:
print(str(data.size()))
raise RuntimeError('Fail to create batch data')
# save padded results
out_tensor = tensor.new_full(out_dims, padding_value)
out_tensor[:tensor.size(0), ...] = tensor
output_batch.append(out_tensor)
return output_batch
def customize_collate(batch):
""" customize_collate(batch)
Collate a list of data into batch. Modified from default_collate.
"""
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
# this is the main part to handle varied length data in a batch
# batch = [data_tensor_1, data_tensor_2, data_tensor_3 ... ]
#
batch_new = pad_sequence(batch)
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
# allocate the memory based on maximum numel
numel = max([x.numel() for x in batch_new]) * len(batch_new)
storage = elem.storage()._new_shared(numel)
# updated according to latest collate function
# otherwise, it raises warning
# pytorch/blob/master/torch/utils/data/_utils/collate.py
out = elem.new(storage).resize_(
len(batch_new), *list(batch_new[0].size()))
#print(batch_new.shape[0], batch_new.shape[1])
return torch.stack(batch_new, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(customize_collate_err_msg.format(elem.dtype))
# this will go to loop in the last case
return customize_collate([torch.as_tensor(b) for b in batch])
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
#elif isinstance(elem, int_classes):
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
#elif isinstance(elem, container_abcs.Mapping):
elif isinstance(elem, collections.abc.Mapping):
return {key: customize_collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
return elem_type(*(customize_collate(samples) \
for samples in zip(*batch)))
#elif isinstance(elem, container_abcs.Sequence):
elif isinstance(elem, collections.abc.Sequence):
# check to make sure that the elements in batch have consistent size
it = iter(batch)
elem_size = len(next(it))
if not all(len(elem) == elem_size for elem in it):
raise RuntimeError('each element in batch should be of equal size')
# zip([[A, B, C], [a, b, c]]) -> [[A, a], [B, b], [C, c]]
transposed = zip(*batch)
return [customize_collate(samples) for samples in transposed]
raise TypeError(customize_collate_err_msg.format(elem_type))
def pad_sequence_batch(list_batch, padding_value=0.0):
""" output_batch = pad_sequence(list_batch)
input
-----
batch: list of batch, [batch_1, batch_2, ...], and batch_1 is
(batch_size, len, dim1, dim2, ...)
output
------
output_batch: list of tensor, [batch_1_padded, batch_2_padded, ...]
Different from pad_sequence, list_batch is a list of batched tensors
"""
# each batched_tensor has shape (batch, length, dim1, ...)
# get dimensions for (dim1, ...)
dim_size = list_batch[0].size()
if len(dim_size) <= 2:
return list_batch
trailing_dims = dim_size[2:]
# get the maximum length for each batched tensor
max_len = max([s.size(1) for s in list_batch])
if all(x.shape[1] == max_len for x in list_batch):
# if all data sequences in batch have the same length, no need to pad
return list_batch
else:
output_batch = []
for i, tensor in enumerate(list_batch):
# shape (batch, max_len, dim1, dim2, ...)
out_dims = (tensor.shape[0], max_len, ) + trailing_dims
# check the rest of dimensions
if tensor.size()[2:] != trailing_dims:
print("Data in batch has different dimensions:")
raise RuntimeError('Fail to pad batched data')
# save padded results
out_tensor = tensor.new_full(out_dims, padding_value)
out_tensor[:, :tensor.size(1), ...] = tensor
output_batch.append(out_tensor)
return output_batch
def customize_collate_from_batch(batch):
""" output = customize_collate_from_batch
input
-----
batch: list of tensor, [tensor1, tensor2, ...], where
each tensor has shape (batch, length, dim1, dim2, ...)
output
------
output: tensor (batch_sum, length, dim1, dim2, ...)
Similar to customize_collate, but input is a list of batch data that have
been collated through customize_collate.
The difference is use torch.cat rather than torch.stack to merge tensors.
Also, list of data is directly concatenated
This is used in customize_dataset when merging data from multiple datasets.
It is better to separate this function from customize_collate
"""
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
batch_new = pad_sequence_batch(batch)
out = None
if torch.utils.data.get_worker_info() is not None:
numel = max([x.numel() for x in batch_new]) * len(batch_new)
storage = elem.storage()._new_shared(numel)
# we need to resize_ to suppress a warning
# this is based on
# pytorch/blob/master/torch/utils/data/_utils/collate.py
# [batch_1, length, dim], [batch_2, length, dim] ...
# batch_new[0][0].size() -> length, dim, ...
# [x.shape[0] for x in batch_new] -> [batch_1, batch_2, ...]
out = elem.new(storage).resize_(
sum([x.shape[0] for x in batch_new]),
*list(batch_new[0][0].size()))
# here is the difference
# concateante (batch1, length, dim1, dim2, ...) (batch2, length, ...)
# into (batch1+batch2+..., length, dim1, dim2, ...)
return torch.cat(batch_new, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(customize_collate_err_msg.format(elem.dtype))
return customize_collate_from_batch(
[torch.as_tensor(b) for b in batch])
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
#elif isinstance(elem, int_classes):
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, tuple):
# concatenate two tuples
tmp = elem
for tmp_elem in batch[1:]:
tmp += tmp_elem
return tmp
#elif isinstance(elem, container_abcs.Sequence):
elif isinstance(elem, collections.abc.Sequence):
it = iter(batch)
elem_size = len(next(it))
if not all(len(elem) == elem_size for elem in it):
raise RuntimeError('each element in batch should be of equal size')
transposed = zip(*batch)
return [customize_collate_from_batch(samples) for samples in transposed]
raise TypeError(customize_collate_err_msg.format(elem_type))
if __name__ == "__main__":
print("Definition of customized collate function")
| 11,034 | 36.662116 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/op_manager/lr_scheduler.py | #!/usr/bin/env python
"""
op_manager
A simple wrapper to create lr scheduler
"""
from __future__ import absolute_import
import os
import sys
import numpy as np
import torch
import torch.optim as torch_optim
import torch.optim.lr_scheduler as torch_optim_steplr
import core_scripts.other_tools.display as nii_warn
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
class LRScheduler():
""" Wrapper over different types of learning rate Scheduler
"""
def __init__(self, optimizer, args):
# learning rate decay
self.lr_decay = args.lr_decay_factor
# lr scheduler type
# please check arg_parse.py for the number ID
self.lr_scheduler_type = args.lr_scheduler_type
# patentience for ReduceLROnPlateau
self.lr_patience = args.lr_patience
# step size for stepLR
self.lr_stepLR_size = args.lr_steplr_size
if self.lr_decay > 0:
if self.lr_scheduler_type == 1:
# StepLR
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer = optimizer, step_size = self.lr_stepLR_size,
gamma = self.lr_decay)
elif self.lr_scheduler_type == 2:
# StepLR
self.lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
optimizer = optimizer, gamma = self.lr_decay)
elif self.lr_scheduler_type == 3:
# Cosine
self.lr_shceduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
optimizer = optimizer, T_0 = self.lr_patience)
else:
# by default, ReduceLROnPlateau
self.lr_scheduler = torch_optim_steplr.ReduceLROnPlateau(
optimizer=optimizer, factor=self.lr_decay,
patience=self.lr_patience)
self.flag = True
else:
self.lr_scheduler = None
self.flag =False
return
def f_valid(self):
""" Whether this LR scheduler is valid
"""
return self.flag
def f_print_info(self):
""" Print information about the LR scheduler
"""
if not self.flag:
mes = ""
else:
if self.lr_scheduler_type == 1:
mes = "\n LR scheduler, StepLR [gamma %f, step %d]" % (
self.lr_decay, self.lr_stepLR_size)
elif self.lr_scheduler_type == 2:
mes = "\n LR scheduler, ExponentialLR [gamma %f]" % (
self.lr_decay)
else:
mes = "\n LR scheduler, ReduceLROnPlateau "
mes += "[decay %f, patience %d]" % (
self.lr_decay, self.lr_patience)
return mes
def f_last_lr(self):
""" Return the last lr
"""
if self.f_valid():
if hasattr(self.lr_scheduler, "get_last_lr"):
return self.lr_scheduler.get_last_lr()
else:
return self.lr_scheduler._last_lr
else:
return []
def f_load_state_dict(self, state):
if self.f_valid():
self.lr_scheduler.load_state_dict(state)
return
def f_state_dict(self):
if self.f_valid():
return self.lr_scheduler.state_dict()
else:
return None
def f_step(self, loss_val):
if self.f_valid():
if self.lr_scheduler_type == 1:
self.lr_scheduler.step()
elif self.lr_scheduler_type == 2:
self.lr_scheduler.step()
else:
self.lr_scheduler.step(loss_val)
return
def f_allow_early_stopping(self):
if self.f_valid():
if self.lr_scheduler_type == 1:
return True
elif self.lr_scheduler_type == 2:
return True
else:
# ReduceLROnPlateau no need to use early stopping
return False
else:
return True
if __name__ == "__main__":
print("Definition of lr_scheduler")
| 4,244 | 29.106383 | 89 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/op_manager/op_manager.py | #!/usr/bin/env python
"""
op_manager
A simple wrapper to create optimizer
"""
from __future__ import absolute_import
import os
import sys
import numpy as np
import torch
import torch.optim as torch_optim
import torch.optim.lr_scheduler as torch_optim_steplr
import core_scripts.other_tools.list_tools as nii_list_tools
import core_scripts.other_tools.display as nii_warn
import core_scripts.other_tools.str_tools as nii_str_tk
import core_scripts.op_manager.conf as nii_op_config
import core_scripts.op_manager.op_process_monitor as nii_op_monitor
import core_scripts.op_manager.lr_scheduler as nii_lr_scheduler
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
class OptimizerWrapper():
""" Wrapper over optimizer
"""
def __init__(self, model, args):
""" Initialize an optimizer over model.parameters()
"""
# check valildity of model
if not hasattr(model, "parameters"):
nii_warn.f_print("model is not torch.nn", "error")
nii_warn.f_die("Error in creating OptimizerWrapper")
# set optimizer type
self.op_flag = args.optimizer
self.lr = args.lr
self.l2_penalty = args.l2_penalty
# grad clip norm is directly added in nn_manager
self.grad_clip_norm = args.grad_clip_norm
# create optimizer
if self.op_flag == "Adam":
if self.l2_penalty > 0:
self.optimizer = torch_optim.Adam(model.parameters(),
lr=self.lr,
weight_decay=self.l2_penalty)
else:
self.optimizer = torch_optim.Adam(model.parameters(),
lr=self.lr)
elif self.op_flag == 'AdamW':
if self.l2_penalty > 0:
self.optimizer = torch_optim.AdamW(model.parameters(),
lr=self.lr,
weight_decay=self.l2_penalty)
else:
self.optimizer = torch_optim.AdamW(model.parameters(),
lr=self.lr)
else:
nii_warn.f_print("%s not availabel" % (self.op_flag), "error")
nii_warn.f_die("Please add optimizer to op_manager")
# number of epochs
if args.active_learning_cycle_num:
# for active learning, epochs * number_of_cycle
self.epochs = args.epochs * np.abs(args.active_learning_cycle_num)
else:
self.epochs = args.epochs
self.no_best_epochs = args.no_best_epochs
# lr scheduler
self.lr_scheduler = nii_lr_scheduler.LRScheduler(self.optimizer, args)
return
def print_info(self):
""" print message of optimizer
"""
mes = "Optimizer:\n Type: {} ".format(self.op_flag)
mes += "\n Learing rate: {:2.6f}".format(self.lr)
mes += "\n Epochs: {:d}".format(self.epochs)
mes += "\n No-best-epochs: {:d}".format(self.no_best_epochs)
if self.lr_scheduler.f_valid():
mes += self.lr_scheduler.f_print_info()
if self.l2_penalty > 0:
mes += "\n With weight penalty {:f}".format(self.l2_penalty)
if self.grad_clip_norm > 0:
mes += "\n With grad clip norm {:f}".format(self.grad_clip_norm)
nii_warn.f_print_message(mes)
def get_epoch_num(self):
return self.epochs
def get_no_best_epoch_num(self):
return self.no_best_epochs
def get_lr_info(self):
if self.lr_scheduler.f_valid():
# no way to look into the updated lr rather than using _last_lr
tmp = ''
for updated_lr in self.lr_scheduler.f_last_lr():
if np.abs(self.lr - updated_lr) > 0.0000001:
tmp += "{:.2e} ".format(updated_lr)
if tmp:
tmp = " LR -> " + tmp
return tmp
else:
return None
if __name__ == "__main__":
print("Optimizer Wrapper")
| 4,193 | 33.661157 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/nn_manager/nn_manager_GAN.py | #!/usr/bin/env python
"""
nn_manager_gan
A simple wrapper to run the training / testing process for GAN
"""
from __future__ import print_function
import time
import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.display as nii_display
import core_scripts.other_tools.str_tools as nii_str_tk
import core_scripts.op_manager.op_process_monitor as nii_monitor
import core_scripts.op_manager.op_display_tools as nii_op_display_tk
import core_scripts.nn_manager.nn_manager_tools as nii_nn_tools
import core_scripts.nn_manager.nn_manager_conf as nii_nn_manage_conf
import core_scripts.other_tools.debug as nii_debug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#############################################################
def f_run_one_epoch_GAN(
args, pt_model_G, pt_model_D,
loss_wrapper, \
device, monitor, \
data_loader, epoch_idx,
optimizer_G = None, optimizer_D = None, \
target_norm_method = None):
"""
f_run_one_epoch_GAN:
run one poech over the dataset (for training or validation sets)
Args:
args: from argpase
pt_model_G: pytorch model (torch.nn.Module) generator
pt_model_D: pytorch model (torch.nn.Module) discriminator
loss_wrapper: a wrapper over loss function
loss_wrapper.compute(generated, target)
device: torch.device("cuda") or torch.device("cpu")
monitor: defined in op_procfess_monitor.py
data_loader: pytorch DataLoader.
epoch_idx: int, index of the current epoch
optimizer_G: torch optimizer or None, for generator
optimizer_D: torch optimizer or None, for discriminator
if None, the back propgation will be skipped
(for developlement set)
target_norm_method: method to normalize target data
(by default, use pt_model.normalize_target)
"""
# timer
start_time = time.time()
# loop over samples
for data_idx, (data_in, data_tar, data_info, idx_orig) in \
enumerate(data_loader):
#############
# prepare
#############
# send data to device
if optimizer_G is not None:
optimizer_G.zero_grad()
if optimizer_D is not None:
optimizer_D.zero_grad()
# Put data to devices
# to device (we assume noise will be generated by the model itself)
# here we only provide external condition
data_in = data_in.to(device, dtype=nii_dconf.d_dtype)
if isinstance(data_tar, torch.Tensor):
data_tar = data_tar.to(device, dtype=nii_dconf.d_dtype)
else:
nii_display.f_die("target data is required")
############################
# 1. Generate the sample
############################
if args.model_forward_with_target:
# if model.forward requires (input, target) as arguments
# for example, for auto-encoder & autoregressive model
if isinstance(data_tar, torch.Tensor):
data_tar_tm = data_tar.to(device, dtype=nii_dconf.d_dtype)
if args.model_forward_with_file_name:
data_gen = pt_model_G(data_in, data_tar_tm, data_info)
else:
data_gen = pt_model_G(data_in, data_tar_tm)
else:
nii_display.f_print("--model-forward-with-target is set")
nii_display.f_die("but data_tar is not loaded")
else:
if args.model_forward_with_file_name:
# specifcal case when model.forward requires data_info
data_gen = pt_model_G(data_in, data_info)
else:
# normal case for model.forward(input)
data_gen = pt_model_G(data_in)
############################
# 2. update discrminator
############################
pt_model_D.zero_grad()
if optimizer_D is not None:
optimizer_D.zero_grad()
# compute discriminator loss
if hasattr(pt_model_D, 'loss_for_D'):
errD = pt_model_D.loss_for_D(data_tar, data_gen.detach(), data_in)
else:
# for compatiblity
# get the discrminator's outputs for real and fake data
# data_gen.detach() is required
# https://github.com/pytorch/examples/issues/116
# https://stackoverflow.com/questions/46774641/
d_out_fake = pt_model_D(data_gen.detach(), data_in)
d_out_real = pt_model_D(data_tar, data_in)
errD_real = loss_wrapper.compute_gan_D_real(d_out_real)
errD_fake = loss_wrapper.compute_gan_D_fake(d_out_fake)
errD = errD_real + errD_fake
# update discriminator weight
if optimizer_D is not None:
errD.backward()
optimizer_D.step()
############################
# 3. update generator
############################
pt_model_G.zero_grad()
if optimizer_G is not None:
optimizer_G.zero_grad()
# compute the loss for generator
if hasattr(pt_model_D, 'loss_for_G'):
errG = pt_model_D.loss_for_G(data_tar, data_gen, data_in)
if hasattr(pt_model_G, 'loss_aux'):
errG += pt_model_G.loss_aux(data_tar, data_gen, data_in)
else:
# get the discrminator's outputs again
d_out_fake_for_G = pt_model_D(data_gen, data_in)
d_out_real_for_G = pt_model_D(data_tar, data_in)
# for compatibility
errG_gan = loss_wrapper.compute_gan_G(d_out_fake_for_G)
# if defined, calculate auxilliart loss
if hasattr(loss_wrapper, "compute_aux"):
errG_aux = loss_wrapper.compute_aux(data_gen, data_tar)
else:
errG_aux = torch.zeros_like(errG_gan)
# if defined, calculate feat-matching loss
if hasattr(loss_wrapper, "compute_feat_match"):
errG_feat = loss_wrapper.compute_feat_match(
d_out_real, d_out_fake_for_G)
else:
errG_feat = torch.zeros_like(errG_gan)
# sum loss for generator
errG = errG_gan + errG_aux + errG_feat
if optimizer_G is not None:
errG.backward()
optimizer_G.step()
# construct the loss for logging and early stopping
# only use errG_aux for early-stopping
#loss_computed = [
# [errG_aux, errD_real, errD_fake, errG_gan, errG_feat],
# [True, False, False, False, False]]
loss_computed = [[errG, errD], [True, True]]
# to handle cases where there are multiple loss functions
_, loss_vals, loss_flags = nii_nn_tools.f_process_loss(loss_computed)
# save the training process information to the monitor
end_time = time.time()
batchsize = len(data_info)
for idx, data_seq_info in enumerate(data_info):
# loss_value is supposed to be the average loss value
# over samples in the the batch, thus, just loss_value
# rather loss_value / batchsize
monitor.log_loss(loss_vals, loss_flags, \
(end_time-start_time) / batchsize, \
data_seq_info, idx_orig.numpy()[idx], \
epoch_idx)
# print infor for one sentence
if args.verbose == 1:
# here we use args.batch_size because len(data_info)
# may be < args.batch_size.
monitor.print_error_for_batch(
data_idx * args.batch_size + idx,
idx_orig.numpy()[idx],
epoch_idx)
#
# start the timer for a new batch
start_time = time.time()
# Save intermediate model for every n mini-batches (optional).
# Note that if we re-start trainining with this intermediate model,
# the data will start from the 1st sample, not the one where we stopped
if args.save_model_every_n_minibatches > 0 \
and (data_idx+1) % args.save_model_every_n_minibatches == 0 \
and optimizer is not None and data_idx > 0:
cp_names = nii_nn_manage_conf.CheckPointKey()
for pt_model, optimizer, model_tag in \
zip([pt_model_G, pt_model_D], [optimizer_G, optimizer_D],
model_tags):
tmp_model_name = nii_nn_tools.f_save_epoch_name(
args, epoch_idx,
'_{:05d}_{:s}'.format(data_idx+1, model_tag))
# save
tmp_dic = {
cp_names.state_dict : pt_model.state_dict(),
cp_names.optimizer : optimizer.state_dict(),
}
torch.save(tmp_dic, tmp_model_name)
# If debug mode is used, only run a specified number of mini-batches
if args.debug_batch_num > 0 and data_idx >= (args.debug_batch_num - 1):
nii_display.f_print("Debug mode is on. This epoch is finished")
break
# lopp done
return
def f_run_one_epoch_WGAN(
args, pt_model_G, pt_model_D,
loss_wrapper, \
device, monitor, \
data_loader, epoch_idx,
optimizer_G = None, optimizer_D = None, \
target_norm_method = None):
"""
f_run_one_epoch_WGAN:
similar to f_run_one_epoch_GAN, but for WGAN
"""
# timer
start_time = time.time()
# This should be moved to model definition
# number of critic (default 5)
num_critic = 5
# clip value
wgan_clamp = 0.01
# loop over samples
for data_idx, (data_in, data_tar, data_info, idx_orig) in \
enumerate(data_loader):
# send data to device
if optimizer_G is not None:
optimizer_G.zero_grad()
if optimizer_D is not None:
optimizer_D.zero_grad()
# prepare data
if isinstance(data_tar, torch.Tensor):
data_tar = data_tar.to(device, dtype=nii_dconf.d_dtype)
# there is no way to normalize the data inside loss
# thus, do normalization here
if target_norm_method is None:
normed_target = pt_model_G.normalize_target(data_tar)
else:
normed_target = target_norm_method(data_tar)
else:
nii_display.f_die("target data is required")
# to device (we assume noise will be generated by the model itself)
# here we only provide external condition
data_in = data_in.to(device, dtype=nii_dconf.d_dtype)
############################
# Update Discriminator
############################
# train with real
pt_model_D.zero_grad()
d_out_real = pt_model_D(data_tar)
errD_real = loss_wrapper.compute_gan_D_real(d_out_real)
if optimizer_D is not None:
errD_real.backward()
d_out_real_mean = d_out_real.mean()
# train with fake
# generate sample
if args.model_forward_with_target:
# if model.forward requires (input, target) as arguments
# for example, for auto-encoder & autoregressive model
if isinstance(data_tar, torch.Tensor):
data_tar_tm = data_tar.to(device, dtype=nii_dconf.d_dtype)
if args.model_forward_with_file_name:
data_gen = pt_model_G(data_in, data_tar_tm, data_info)
else:
data_gen = pt_model_G(data_in, data_tar_tm)
else:
nii_display.f_print("--model-forward-with-target is set")
nii_display.f_die("but data_tar is not loaded")
else:
if args.model_forward_with_file_name:
# specifcal case when model.forward requires data_info
data_gen = pt_model_G(data_in, data_info)
else:
# normal case for model.forward(input)
data_gen = pt_model_G(data_in)
# data_gen.detach() is required
# https://github.com/pytorch/examples/issues/116
d_out_fake = pt_model_D(data_gen.detach())
errD_fake = loss_wrapper.compute_gan_D_fake(d_out_fake)
if optimizer_D is not None:
errD_fake.backward()
d_out_fake_mean = d_out_fake.mean()
errD = errD_real + errD_fake
if optimizer_D is not None:
optimizer_D.step()
# clip weights of discriminator
for p in pt_model_D.parameters():
p.data.clamp_(-wgan_clamp, wgan_clamp)
############################
# Update Generator
############################
pt_model_G.zero_grad()
d_out_fake_for_G = pt_model_D(data_gen)
errG_gan = loss_wrapper.compute_gan_G(d_out_fake_for_G)
errG_aux = loss_wrapper.compute_aux(data_gen, data_tar)
errG = errG_gan + errG_aux
# only update after num_crictic iterations on discriminator
if data_idx % num_critic == 0 and optimizer_G is not None:
errG.backward()
optimizer_G.step()
d_out_fake_for_G_mean = d_out_fake_for_G.mean()
# construct the loss for logging and early stopping
# only use errG_aux for early-stopping
loss_computed = [[errG_aux, errG_gan, errD_real, errD_fake,
d_out_real_mean, d_out_fake_mean,
d_out_fake_for_G_mean],
[True, False, False, False, False, False, False]]
# to handle cases where there are multiple loss functions
loss, loss_vals, loss_flags = nii_nn_tools.f_process_loss(loss_computed)
# save the training process information to the monitor
end_time = time.time()
batchsize = len(data_info)
for idx, data_seq_info in enumerate(data_info):
# loss_value is supposed to be the average loss value
# over samples in the the batch, thus, just loss_value
# rather loss_value / batchsize
monitor.log_loss(loss_vals, loss_flags, \
(end_time-start_time) / batchsize, \
data_seq_info, idx_orig.numpy()[idx], \
epoch_idx)
# print infor for one sentence
if args.verbose == 1:
monitor.print_error_for_batch(data_idx*batchsize + idx,\
idx_orig.numpy()[idx], \
epoch_idx)
#
# start the timer for a new batch
start_time = time.time()
# lopp done
return
def f_train_wrapper_GAN(
args, pt_model_G, pt_model_D, loss_wrapper, device, \
optimizer_G_wrapper, optimizer_D_wrapper, \
train_dataset_wrapper, \
val_dataset_wrapper = None, \
checkpoint_G = None, checkpoint_D = None):
"""
f_train_wrapper_GAN(
args, pt_model_G, pt_model_D, loss_wrapper, device,
optimizer_G_wrapper, optimizer_D_wrapper,
train_dataset_wrapper, val_dataset_wrapper = None,
check_point = None):
A wrapper to run the training process
Args:
args: argument information given by argpase
pt_model_G: generator, pytorch model (torch.nn.Module)
pt_model_D: discriminator, pytorch model (torch.nn.Module)
loss_wrapper: a wrapper over loss functions
loss_wrapper.compute_D_real(discriminator_output)
loss_wrapper.compute_D_fake(discriminator_output)
loss_wrapper.compute_G(discriminator_output)
loss_wrapper.compute_G(fake, real)
device: torch.device("cuda") or torch.device("cpu")
optimizer_G_wrapper:
a optimizer wrapper for generator (defined in op_manager.py)
optimizer_D_wrapper:
a optimizer wrapper for discriminator (defined in op_manager.py)
train_dataset_wrapper:
a wrapper over training data set (data_io/default_data_io.py)
train_dataset_wrapper.get_loader() returns torch.DataSetLoader
val_dataset_wrapper:
a wrapper over validation data set (data_io/default_data_io.py)
it can None.
checkpoint_G:
a check_point that stores every thing to resume training
checkpoint_D:
a check_point that stores every thing to resume training
"""
nii_display.f_print_w_date("Start model training")
##############
## Preparation
##############
# get the optimizers for Generator and Discrminators
optimizer_G_wrapper.print_info()
optimizer_D_wrapper.print_info()
optimizer_G = optimizer_G_wrapper.optimizer
optimizer_D = optimizer_D_wrapper.optimizer
epoch_num = optimizer_G_wrapper.get_epoch_num()
no_best_epoch_num = optimizer_G_wrapper.get_no_best_epoch_num()
# get data loader for training set
train_dataset_wrapper.print_info()
train_data_loader = train_dataset_wrapper.get_loader()
train_seq_num = train_dataset_wrapper.get_seq_num()
# get the training process monitor
monitor_trn = nii_monitor.Monitor(epoch_num, train_seq_num)
# if validation data is provided, get data loader for val set
if val_dataset_wrapper is not None:
val_dataset_wrapper.print_info()
val_data_loader = val_dataset_wrapper.get_loader()
val_seq_num = val_dataset_wrapper.get_seq_num()
monitor_val = nii_monitor.Monitor(epoch_num, val_seq_num)
else:
monitor_val = None
# training log information
train_log = ''
model_tags = ["_G", "_D"]
# prepare for DataParallism if available
# pytorch.org/tutorials/beginner/blitz/data_parallel_tutorial.html
if torch.cuda.device_count() > 1 and args.multi_gpu_data_parallel:
nii_display.f_die("data_parallel not implemented for GAN")
else:
nii_display.f_print("\nUse single GPU: %s\n" % \
(torch.cuda.get_device_name(device)))
flag_multi_device = False
normtarget_f = None
# put models to device
pt_model_G.to(device, dtype=nii_dconf.d_dtype)
pt_model_D.to(device, dtype=nii_dconf.d_dtype)
# print the network
# check the definition of GAN generator. It must a valid model definition
nii_display.f_print("Setup generator")
nii_nn_tools.f_model_show(pt_model_G, model_type='GAN')
# check the definition of GAN discrminator. It is free to define
nii_display.f_print("Setup discriminator")
nii_nn_tools.f_model_show(pt_model_D,
do_model_def_check=False, model_type='GAN')
# check the loss function
if loss_wrapper is not None:
mes = "Separate Loss() in model.py is not recommended.\n"
mes += "It is better to define loss in generator and discrminator.\n"
mes += " ModelGenerator.loss_aux: for auxialliary loss of generator\n"
mes += " ModelDiscriminator.loss_for_G: loss for generator\n"
mes += " ModelDiscriminator.loss_for_D: loss for discrminator\n"
mes += "Each loss function should have a signature like: \n"
mes += " loss = loss_func(natural_data, generated_data, condition)"
nii_display.f_print(mes)
nii_nn_tools.f_loss_show(loss_wrapper, model_type='GAN')
###############################
## Resume training if necessary
###############################
# resume training or initialize the model if necessary
cp_names = nii_nn_manage_conf.CheckPointKey()
if checkpoint_G is not None or checkpoint_D is not None:
for checkpoint, optimizer, pt_model, model_name in \
zip([checkpoint_G, checkpoint_D], [optimizer_G, optimizer_D],
[pt_model_G, pt_model_D], ["Generator", "Discriminator"]):
nii_display.f_print("For %s" % (model_name))
if type(checkpoint) is dict:
# checkpoint
# load model parameter and optimizer state
if cp_names.state_dict in checkpoint:
# wrap the state_dic in f_state_dict_wrapper
# in case the model is saved when DataParallel is on
pt_model.load_state_dict(
nii_nn_tools.f_state_dict_wrapper(
checkpoint[cp_names.state_dict],
flag_multi_device))
# load optimizer state
if cp_names.optimizer in checkpoint:
optimizer.load_state_dict(checkpoint[cp_names.optimizer])
# optionally, load training history
if not args.ignore_training_history_in_trained_model:
#nii_display.f_print("Load ")
if cp_names.trnlog in checkpoint:
monitor_trn.load_state_dic(
checkpoint[cp_names.trnlog])
if cp_names.vallog in checkpoint and monitor_val:
monitor_val.load_state_dic(
checkpoint[cp_names.vallog])
if cp_names.info in checkpoint:
train_log = checkpoint[cp_names.info]
nii_display.f_print("Load check point, resume training")
else:
nii_display.f_print("Load pretrained model and optimizer")
elif checkpoint is not None:
# only model status
#pt_model.load_state_dict(checkpoint)
pt_model.load_state_dict(
nii_nn_tools.f_state_dict_wrapper(
checkpoint, flag_multi_device))
nii_display.f_print("Load pretrained model")
else:
nii_display.f_print("No pretrained model")
# done for resume training
######################
### User defined setup
######################
# Not implemented yet
######################
### Start training
######################
# other variables
flag_early_stopped = False
start_epoch = monitor_trn.get_epoch()
epoch_num = monitor_trn.get_max_epoch()
# select one wrapper, based on the flag in loss definition
if hasattr(loss_wrapper, "flag_wgan") and loss_wrapper.flag_wgan:
f_wrapper_gan_one_epoch = f_run_one_epoch_WGAN
else:
f_wrapper_gan_one_epoch = f_run_one_epoch_GAN
# print
_ = nii_op_display_tk.print_log_head()
nii_display.f_print_message(train_log, flush=True, end='')
# loop over multiple epochs
for epoch_idx in range(start_epoch, epoch_num):
# training one epoch
pt_model_D.train()
pt_model_G.train()
f_wrapper_gan_one_epoch(
args, pt_model_G, pt_model_D,
loss_wrapper, device, \
monitor_trn, train_data_loader, \
epoch_idx, optimizer_G, optimizer_D,
normtarget_f)
time_trn = monitor_trn.get_time(epoch_idx)
loss_trn = monitor_trn.get_loss(epoch_idx)
# if necessary, do validataion
if val_dataset_wrapper is not None:
# set eval() if necessary
if args.eval_mode_for_validation:
pt_model_G.eval()
pt_model_D.eval()
with torch.no_grad():
f_wrapper_gan_one_epoch(
args, pt_model_G, pt_model_D,
loss_wrapper, \
device, \
monitor_val, val_data_loader, \
epoch_idx, None, None, normtarget_f)
time_val = monitor_val.get_time(epoch_idx)
loss_dev = monitor_val.get_loss(epoch_idx)
else:
time_val, loss_dev = 0, np.zeros_like(loss_trn)
if val_dataset_wrapper is not None:
flag_new_best = monitor_val.is_new_best()
else:
flag_new_best = True
# print information
train_log += nii_op_display_tk.print_train_info(
epoch_idx, time_trn, loss_trn, time_val, loss_dev,
flag_new_best, optimizer_G_wrapper.get_lr_info())
# save the best model
if flag_new_best or args.force_save_lite_trained_network_per_epoch:
for pt_model, tmp_tag in zip([pt_model_G, pt_model_D], model_tags):
tmp_best_name = nii_nn_tools.f_save_trained_name(args, tmp_tag)
torch.save(pt_model.state_dict(), tmp_best_name)
# save intermediate model if necessary
if not args.not_save_each_epoch:
# save model discrminator and generator
for pt_model, optimizer, model_tag in \
zip([pt_model_G, pt_model_D], [optimizer_G, optimizer_D],
model_tags):
tmp_model_name = nii_nn_tools.f_save_epoch_name(
args, epoch_idx, model_tag)
if monitor_val is not None:
tmp_val_log = monitor_val.get_state_dic()
else:
tmp_val_log = None
# save
tmp_dic = {
cp_names.state_dict : pt_model.state_dict(),
cp_names.info : train_log,
cp_names.optimizer : optimizer.state_dict(),
cp_names.trnlog : monitor_trn.get_state_dic(),
cp_names.vallog : tmp_val_log
}
torch.save(tmp_dic, tmp_model_name)
if args.verbose == 1:
nii_display.f_eprint(str(datetime.datetime.now()))
nii_display.f_eprint("Save {:s}".format(tmp_model_name),
flush=True)
# early stopping
if monitor_val is not None and \
monitor_val.should_early_stop(no_best_epoch_num):
flag_early_stopped = True
break
# loop done
nii_op_display_tk.print_log_tail()
if flag_early_stopped:
nii_display.f_print("Training finished by early stopping")
else:
nii_display.f_print("Training finished")
nii_display.f_print("Model is saved to", end = '')
for model_tag in model_tags:
nii_display.f_print("{}".format(
nii_nn_tools.f_save_trained_name(args, model_tag)))
return
if __name__ == "__main__":
print("nn_manager for GAN")
| 27,182 | 39.211538 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/nn_manager/nn_manager_tools.py | #!/usr/bin/env python
"""
nn_manager
utilities used by nn_manager
"""
from __future__ import print_function
from collections import OrderedDict
import numpy as np
import torch
import core_scripts.other_tools.str_tools as nii_str_tk
import core_scripts.other_tools.display as nii_display
import core_scripts.nn_manager.nn_manager_conf as nii_nn_manage_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#############################################################
def f_state_dict_wrapper(state_dict, data_parallel=False):
""" a wrapper to take care of state_dict when using DataParallism
f_model_load_wrapper(state_dict, data_parallel):
state_dict: pytorch state_dict
data_parallel: whether DataParallel is used
https://discuss.pytorch.org/t/solved-keyerror-unexpected-
key-module-encoder-embedding-weight-in-state-dict/1686/3
"""
if data_parallel is True:
# if data_parallel is used
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if not k.startswith('module'):
# if key is not starting with module, add it
name = 'module.' + k
else:
name = k
new_state_dict[name] = v
return new_state_dict
else:
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if not k.startswith('module'):
name = k
else:
# remove module.
name = k[7:]
new_state_dict[name] = v
return new_state_dict
def f_process_loss(loss):
""" loss, loss_value = f_process_loss(loss):
Input:
loss: returned by loss_wrapper.compute
It can be a torch.tensor or a list of torch.tensor
When it is a list, it should look like:
[[loss_1, loss_2, loss_3],
[true/false, true/false, true.false]]
where true / false tells whether the loss should be taken into
consideration for early-stopping
Output:
loss: a torch.tensor
loss_value: a torch number of a list of torch number
"""
if type(loss) is list:
loss_sum = loss[0][0]
loss_list = [loss[0][0].item()]
if len(loss[0]) > 1:
for loss_tmp in loss[0][1:]:
loss_sum += loss_tmp
loss_list.append(loss_tmp.item())
return loss_sum, loss_list, loss[1]
else:
return loss, [loss.item()], [True]
def f_load_checkpoint(checkpoint, args, flag_multi_device, pt_model, optimizer,
monitor_trn, monitor_val, lr_scheduler):
""" f_load_checkpoint(checkpoint, args, pt_model, optimizer,
monitor_trn, monitor_val, lr_scheduler)
Load checkpoint.
Input:
checkpoint: check point saved by the script. Either a dict or a pt model
args: command line arguments when running the script
flag_multi_device: bool, does this code uses multiple GPUs?
Input (which will be modified by the function)
pt_model: Pytorch model, this will load the model saved in checkpoint
optimizer: optimizer, this will load the optimizer saved in checkpoint
monitor_trn: log of loss on training set
monitor_val: log of loss on validation set
lr_scheduler: scheudler of learning rate
Output:
train_log: str, text log of training loss
"""
#
train_log = ''
if checkpoint is None:
# no checkpoint
return train_log
# checkpoint exist
cp_names = nii_nn_manage_conf.CheckPointKey()
if args.allow_mismatched_pretrained_model:
if type(checkpoint) is dict:
# if it is a epoch*.pt, ignore training histories
# only load the model parameter
nii_display.f_print("allow-mismatched-pretrained-model is on")
nii_display.f_print("ignore training history in pre-trained model")
pretrained_dict = f_state_dict_wrapper(
checkpoint[cp_names.state_dict], flag_multi_device)
else:
# it is only a dictionary of trained parameters
pretrained_dict = f_state_dict_wrapper(
checkpoint, flag_multi_device)
# target model dict
model_dict = pt_model.state_dict()
# methods similar to f_load_pretrained_model_partially
# 1. filter out mismatched keys
pre_dict_tmp = {
k: v for k, v in pretrained_dict.items() \
if k in model_dict \
and model_dict[k].numel() == pretrained_dict[k].numel()}
mismatch_keys = [k for k in model_dict.keys() if k not in pre_dict_tmp]
if mismatch_keys:
print("Partially load model, ignoring buffers: {:s}".format(
' '.join(mismatch_keys)))
# 2. overwrite entries in the existing state dict
model_dict.update(pre_dict_tmp)
# 3. load the new state dict
pt_model.load_state_dict(model_dict)
else:
# the usual case
# only model status
pt_model.load_state_dict(pretrained_dict)
nii_display.f_print("Load pretrained model")
else:
if type(checkpoint) is dict:
# checkpoint is a dict (trained model + optimizer + other logs)
# load model parameter and optimizer state
if cp_names.state_dict in checkpoint:
# wrap the state_dic in f_state_dict_wrapper
# in case the model is saved when DataParallel is on
pt_model.load_state_dict(
f_state_dict_wrapper(checkpoint[cp_names.state_dict],
flag_multi_device))
# load optimizer state
if cp_names.optimizer in checkpoint and \
not args.ignore_optimizer_statistics_in_trained_model:
optimizer.load_state_dict(checkpoint[cp_names.optimizer])
# optionally, load training history
if not args.ignore_training_history_in_trained_model:
#nii_display.f_print("Load ")
if cp_names.trnlog in checkpoint:
monitor_trn.load_state_dic(checkpoint[cp_names.trnlog])
if cp_names.vallog in checkpoint and monitor_val:
monitor_val.load_state_dic(checkpoint[cp_names.vallog])
if cp_names.info in checkpoint:
train_log = checkpoint[cp_names.info]
if cp_names.lr_scheduler in checkpoint and \
checkpoint[cp_names.lr_scheduler] and lr_scheduler.f_valid():
lr_scheduler.f_load_state_dict(
checkpoint[cp_names.lr_scheduler])
nii_display.f_print("Load check point, resume training")
else:
nii_display.f_print("Load pretrained model and optimizer")
else:
# the usual case
# only model status
pt_model.load_state_dict(
f_state_dict_wrapper(checkpoint, flag_multi_device))
nii_display.f_print("Load pretrained model")
return train_log
def f_load_checkpoint_for_inference(checkpoint, pt_model):
""" f_load_checkpoint_for_inference(checkpoint, pt_model)
Load checkpoint for model inference
No matter what is inside the checkpoint, only load the model parameters
"""
cp_names = nii_nn_manage_conf.CheckPointKey()
if type(checkpoint) is dict and cp_names.state_dict in checkpoint:
pt_model.load_state_dict(checkpoint[cp_names.state_dict])
else:
pt_model.load_state_dict(checkpoint)
return
def f_load_pretrained_model_partially(model, model_paths, model_name_prefix):
""" f_load_pretrained_model_partially(model, model_paths, model_name_prefix)
Initialize part of the model with pre-trained models.
This function can be used directly. It is also called by nn_manager.py
if model.g_pretrained_model_path and model.g_pretrained_model_prefix are
defined.
For reference:
https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3
Input:
-----
model: torch model
model_paths: list of str, list of path to pre-trained models (.pt files)
model_prefix: list of str, list of model name prefix used by model
Output:
------
None
For example,
case1: A module in a pretrained model may be called model.***
This module will be model.m_part1.*** in the new model.
Then the prefix is "m_part1."
new_model.m_part1.*** <- pre_trained_model.***
case2: A module in a pretrained model may be called model.***
This module will still be model..*** in the new model.
Then the prefix is ""
new_model.*** <- pre_trained_model.***
Call f(model, ['./asr.pt', './tts.pt'], ['asr.', 'tts.']), then
model.asr <- load_dict(asr.pt)
model.tts <- load_dict(tts.pt)
"""
cp_names = nii_nn_manage_conf.CheckPointKey()
# change string to list
if type(model_paths) is str:
model_path_tmp = [model_paths]
else:
model_path_tmp = model_paths
if type(model_name_prefix) is str:
model_prefix_tmp = [model_name_prefix]
else:
model_prefix_tmp = model_name_prefix
# get the dictionary format of new model
model_dict = model.state_dict()
# for each pre-trained model
for model_path, prefix in zip(model_path_tmp, model_prefix_tmp):
if prefix == '':
pass
elif prefix[-1] != '.':
# m_part1. not m_part
prefix += '.'
pretrained_dict = torch.load(model_path)
# if this is a epoch***.pt, load only the network weight
if cp_names.state_dict in pretrained_dict:
pretrained_dict = pretrained_dict[cp_names.state_dict]
# 1. filter out unnecessary keys
pretrained_dict = {prefix + k: v \
for k, v in pretrained_dict.items() \
if prefix + k in model_dict}
print("Load model {:s} as {:s} ({:d} parameter buffers, ".format(
model_path, prefix, len(pretrained_dict.keys())), end=' ')
print("{:d} parameters)".format(
sum([pretrained_dict[x].numel() for x in pretrained_dict.keys()])))
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
model.load_state_dict(model_dict)
return
def f_save_epoch_name(args, epoch_idx, suffix='', prefix=''):
""" str = f_save_epoch_name(args, epoch_idx)
Return the name of the model file saved during training
Args:
args: argument object by arg_parse, we will use
args.save_epoch_name, args.save_model_dir, args.save_model_ext
epoch_idx:, int, epoch index
suffix: a suffix to the name (default '')
Return:
str: name of epoch state file, str, e.g. epoch_001.pt
"""
tmp_name = args.save_epoch_name + prefix
tmp_name = "{}_{:03d}".format(tmp_name, epoch_idx)
tmp_name = tmp_name + suffix
return nii_str_tk.f_realpath(args.save_model_dir, tmp_name, \
args.save_model_ext)
def f_save_trained_name(args, suffix=''):
""" str = f_save_trained_name(args)
Return the name of the best trained model file
Args:
args: argument object by arg_parse
args.save_trained_name, args.save_model_dir, args.save_model_ext
suffix: a suffix added to the name (default '')
Return:
str: name of trained network file, e.g., trained_network.pt
"""
return nii_str_tk.f_realpath(
args.save_model_dir, args.save_trained_name + suffix,
args.save_model_ext)
def f_model_check(pt_model, model_type=None):
""" f_model_check(pt_model)
Check whether the model contains all the necessary keywords
Args:
----
pt_model: a Pytorch model
model_type_flag: str or None, a flag indicating the type of network
Return:
-------
"""
nii_display.f_print("Model check:")
if model_type in nii_nn_manage_conf.nn_model_keywords_bags:
keywords_bag = nii_nn_manage_conf.nn_model_keywords_bags[model_type]
else:
keywords_bag = nii_nn_manage_conf.nn_model_keywords_default
for tmpkey in keywords_bag.keys():
flag_mandatory, mes = keywords_bag[tmpkey]
# mandatory keywords
if flag_mandatory:
if not hasattr(pt_model, tmpkey):
nii_display.f_print("Please implement %s (%s)" % (tmpkey, mes))
nii_display.f_die("[Error]: found no %s in Model" % (tmpkey))
else:
print("[OK]: %s found" % (tmpkey))
else:
if not hasattr(pt_model, tmpkey):
print("[OK]: %s is ignored, %s" % (tmpkey, mes))
else:
print("[OK]: use %s, %s" % (tmpkey, mes))
# done
nii_display.f_print("Model check done\n")
return
def f_model_show(pt_model, do_model_def_check=True, model_type=None):
""" f_model_show(pt_model, do_model_check=True)
Print the informaiton of the model
Args:
pt_model, a Pytorch model
do_model_def_check, bool, whether check model definition (default True)
model_type: str or None (default None), what type of network
Return:
None
"""
if do_model_def_check:
f_model_check(pt_model, model_type)
nii_display.f_print("Model infor:")
print(pt_model)
num = sum(p.numel() for p in pt_model.parameters() if p.requires_grad)
nii_display.f_print("Parameter number: {:d}\n".format(num), "normal")
return
def f_set_grad_to_none(pt_model):
""" f_set_grad_to_one(pt_model)
Set the grad of trainable weights to None.
Even if grad value is 0, the weight may change due to l1 norm, moment,
or so on. It is better to explicitly set the grad of the parameter to None
https://discuss.pytorch.org/t/the-grad-is-zero-the-value-change/22765/2
"""
for p in pt_model.parameters():
if p.requires_grad:
p.requires_grad = False
p.grad = None
return
def f_loss_check(loss_module, model_type=None):
""" f_loss_check(pt_model)
Check whether the loss module contains all the necessary keywords
Args:
----
loss_module, a class
model_type, a str or None
Return:
-------
"""
nii_display.f_print("Loss check")
if model_type in nii_nn_manage_conf.loss_method_keywords_bags:
keywords_bag = nii_nn_manage_conf.loss_method_keywords_bags[model_type]
else:
keywords_bag = nii_nn_manage_conf.loss_method_keywords_default
for tmpkey in keywords_bag.keys():
flag_mandatory, mes = keywords_bag[tmpkey]
# mandatory keywords
if flag_mandatory:
if not hasattr(loss_module, tmpkey):
nii_display.f_print("Please implement %s (%s)" % (tmpkey, mes))
nii_display.f_die("[Error]: found no %s in Loss" % (tmpkey))
else:
# no need to print other information here
pass #print("[OK]: %s found" % (tmpkey))
else:
if not hasattr(loss_module, tmpkey):
# no need to print other information here
pass #print("[OK]: %s is ignored, %s" % (tmpkey, mes))
else:
print("[OK]: use %s, %s" % (tmpkey, mes))
# done
nii_display.f_print("Loss check done")
return
def f_loss_show(loss_module, do_loss_def_check=True, model_type=None):
""" f_model_show(pt_model, do_model_check=True)
Print the informaiton of the model
Args:
pt_model, a Pytorch model
do_model_def_check, bool, whether check model definition (default True)
model_type: str or None (default None), what type of network
Return:
None
"""
# no need to print other information here
# because loss is usually not a torch.Module
#nii_display.f_print("Loss infor:")
if do_loss_def_check:
f_loss_check(loss_module, model_type)
#print(loss_module)
return
########################
# data buffer operations
########################
def f_split_data(data_in, data_tar, max_length, overlap):
""" in_list, tar_list = f_split_data(data_in, data_tar, length, overlap)
Args:
data_in: tensor, (batch, length, dim)
data_tar: tensor, (batch, length, dim)
length: int, max lengnth of each trunk
overlap: int, trunc will have this number of overlap
Return:
data_in_list: list of tensors
data_tar_list: list of tensors
"""
if not isinstance(data_in, torch.Tensor):
print("Not implemented for a list of data")
sys.exit(1)
if max_length <= 0:
print("Not implemented for a negative trunc length")
sys.exit(1)
if overlap > (max_length - 1):
overlap = max_length - 1
tmp_trunc_len = max_length - overlap
trunc_num = data_in.shape[1] // tmp_trunc_len
if trunc_num > 0:
# ignore the short segment at the end
if data_in.shape[1] % tmp_trunc_len > overlap:
trunc_num += 1
else:
# however, if input is too short, just don not segment
if data_in.shape[1] % tmp_trunc_len > 0:
trunc_num += 1
data_in_list = []
data_tar_list = []
for trunc_idx in range(trunc_num):
start_idx = trunc_idx * tmp_trunc_len
end_idx = start_idx + max_length
data_in_list.append(data_in[:, start_idx:end_idx])
if isinstance(data_tar, torch.Tensor):
data_tar_list.append(data_tar[:, start_idx:end_idx])
else:
data_tar_list.append([])
return data_in_list, data_tar_list, overlap
def f_overlap_data(data_list, overlap_length):
""" data_gen = f_overlap_data(data_list, overlap_length)
Input:
data_list: list of tensors, in (batch, length, dim) or (batch, length)
overlap_length: int, overlap_length
Output:
data_gen: tensor, (batch, length, dim)
"""
batch = data_list[0].shape[0]
data_device = data_list[0].device
data_dtype = data_list[0].dtype
if len(data_list[0].shape) == 2:
dim = 1
else:
dim = data_list[0].shape[2]
total_length = sum([x.shape[1] for x in data_list])
data_gen = torch.zeros([batch, total_length, dim], dtype=data_dtype,
device = data_device)
prev_end = 0
for idx, data_trunc in enumerate(data_list):
tmp_len = data_trunc.shape[1]
if len(data_trunc.shape) == 2:
data_tmp = torch.unsqueeze(data_trunc, -1)
else:
data_tmp = data_trunc
if idx == 0:
data_gen[:, 0:tmp_len] = data_tmp
prev_end = tmp_len
else:
win_len = min([prev_end, overlap_length, tmp_len])
win_cof = torch.arange(0, win_len,
dtype=data_dtype, device=data_device)/win_len
win_cof = win_cof.unsqueeze(0).unsqueeze(-1)
data_gen[:, prev_end-win_len:prev_end] *= 1.0 - win_cof
data_tmp[:, :win_len] *= win_cof
data_gen[:, prev_end-win_len:prev_end-win_len+tmp_len] += data_tmp
prev_end = prev_end-win_len+tmp_len
return data_gen[:, 0:prev_end]
##############
#
##############
def data2device(data_in, device, data_type):
if isinstance(data_in, torch.Tensor):
data_ = data_in.to(device, dtype = data_type)
elif isinstance(data_in, list) and data_in:
data_ = [data2device(x, device, data_type) for x in data_in]
else:
data_ = None
if data_ is None:
nii_display.f_die("[Error]: fail to cast data to device")
return data_
if __name__ == "__main__":
print("nn_manager_tools")
| 20,331 | 33.815068 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/nn_manager/nn_manager.py | #!/usr/bin/env python
"""
nn_manager
A simple wrapper to run the training / testing process
"""
from __future__ import print_function
import time
import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import core_scripts.data_io.conf as nii_dconf
import core_scripts.data_io.seq_info as nii_seqinfo
import core_scripts.other_tools.list_tools as nii_list_tk
import core_scripts.other_tools.display as nii_display
import core_scripts.other_tools.str_tools as nii_str_tk
import core_scripts.op_manager.op_process_monitor as nii_monitor
import core_scripts.op_manager.op_display_tools as nii_op_display_tk
import core_scripts.nn_manager.nn_manager_tools as nii_nn_tools
import core_scripts.nn_manager.nn_manager_conf as nii_nn_manage_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
#############################################################
def f_run_one_epoch(args,
pt_model, loss_wrapper, \
device, monitor, \
data_loader, epoch_idx, optimizer = None, \
target_norm_method = None,
data_set_wrapper = None):
"""
f_run_one_epoch:
run one poech over the dataset (for training or validation sets)
Args:
args: from argpase
pt_model: pytorch model (torch.nn.Module)
loss_wrapper: a wrapper over loss function
loss_wrapper.compute(generated, target)
device: torch.device("cuda") or torch.device("cpu")
monitor: defined in op_procfess_monitor.py
data_loader: pytorch DataLoader.
epoch_idx: int, index of the current epoch
optimizer: torch optimizer or None
if None, the back propgation will be skipped
(for developlement set)
target_norm_method: method to normalize target data
(by default, use pt_model.normalize_target)
data_set_wrapper: pytorch Dataset. It is only used to update
information
"""
# timer
start_time = time.time()
# loop over samples
for data_idx, (data_in, data_tar, data_info, idx_orig) in \
enumerate(data_loader):
#############
# prepare
#############
# idx_orig is the original idx in the dataset
# which can be different from data_idx when shuffle = True
#idx_orig = idx_orig.numpy()[0]
#data_seq_info = data_info[0]
# send data to device
if optimizer is not None:
if args.size_accumulate_grad > 1:
# if accumulate-gradient is on, only zero out grad here
if data_idx % args.size_accumulate_grad == 0:
optimizer.zero_grad()
else:
# zero grad is gradient accumulation is not used
optimizer.zero_grad()
############
# compute output
############
data_in = nii_nn_tools.data2device(data_in, device, nii_dconf.d_dtype)
if args.model_forward_with_target:
# if model.forward requires (input, target) as arguments
# for example, for auto-encoder & autoregressive model
if isinstance(data_tar, torch.Tensor):
data_tar_tm = data_tar.to(device, dtype=nii_dconf.d_dtype)
elif isinstance(data_tar, list) and data_tar:
# if the data_tar is a list of tensors
data_tar_tm = [x.to(device, dtype=nii_dconf.d_dtype) \
for x in data_tar]
else:
nii_display.f_print("--model-forward-with-target is set")
nii_display.f_die("but data_tar is not loaded, or a tensor")
if args.model_forward_with_file_name:
data_gen = pt_model(data_in, data_tar_tm, data_info)
else:
data_gen = pt_model(data_in, data_tar_tm)
else:
if args.model_forward_with_file_name:
# specifcal case when model.forward requires data_info
data_gen = pt_model(data_in, data_info)
else:
# normal case for model.forward(input)
data_gen = pt_model(data_in)
#####################
# compute loss and do back propagate
#####################
# Two cases
# 1. if loss is defined as pt_model.loss, then let the users do
# normalization inside the pt_mode.loss
# 2. if loss_wrapper is defined as a class independent from model
# there is no way to normalize the data inside the loss_wrapper
# because the normalization weight is saved in pt_model
if hasattr(pt_model, 'loss'):
# case 1, pt_model.loss is available
if isinstance(data_tar, torch.Tensor):
data_tar = data_tar.to(device, dtype=nii_dconf.d_dtype)
elif isinstance(data_tar, list) and data_tar:
data_tar = [x.to(device, dtype=nii_dconf.d_dtype) \
for x in data_tar]
else:
data_tar = []
loss_computed = pt_model.loss(data_gen, data_tar)
else:
# case 2, loss is defined independent of pt_model
if isinstance(data_tar, torch.Tensor):
data_tar = data_tar.to(device, dtype=nii_dconf.d_dtype)
# there is no way to normalize the data inside loss
# thus, do normalization here
if target_norm_method is None:
normed_target = pt_model.normalize_target(data_tar)
else:
normed_target = target_norm_method(data_tar)
elif isinstance(data_tar, list) and data_tar:
data_tar = [x.to(device, dtype=nii_dconf.d_dtype) \
for x in data_tar]
if target_norm_method is None:
normed_target = pt_model.normalize_target(data_tar)
else:
normed_target = target_norm_method(data_tar)
else:
normed_target = []
# return the loss from loss_wrapper
# loss_computed may be [[loss_1, loss_2, ...],[flag_1, flag_2,.]]
# which contain multiple loss and flags indicating whether
# the corresponding loss should be taken into consideration
# for early stopping
# or
# loss_computed may be simply a tensor loss
loss_computed = loss_wrapper.compute(data_gen, normed_target)
loss_values = [0]
# To handle cases where there are multiple loss functions
# when loss_comptued is [[loss_1, loss_2, ...],[flag_1, flag_2,.]]
# loss: sum of [loss_1, loss_2, ...], for backward()
# loss_values: [loss_1.item(), loss_2.item() ..], for logging
# loss_flags: [True/False, ...], for logging,
# whether loss_n is used for early stopping
# when loss_computed is loss
# loss: loss
# los_vals: [loss.item()]
# loss_flags: [True]
loss, loss_values, loss_flags = nii_nn_tools.f_process_loss(
loss_computed)
# Back-propgation using the summed loss
if optimizer is not None and loss.requires_grad:
if args.size_accumulate_grad > 1:
# if gradient accumulation is on
# adjust loss based on the number of batches accumulated
loss = loss / args.size_accumulate_grad
loss.backward()
# do updating only after specified number of mini-batches
if (data_idx + 1) % args.size_accumulate_grad == 0:
# apply gradient clip
if args.grad_clip_norm > 0:
grad_norm = torch.nn.utils.clip_grad_norm_(
pt_model.parameters(), args.grad_clip_norm)
# update parameters
optimizer.step()
else:
# else
# backward propagation
loss.backward()
# apply gradient clip
if args.grad_clip_norm > 0:
grad_norm = torch.nn.utils.clip_grad_norm_(
pt_model.parameters(), args.grad_clip_norm)
# update parameters
optimizer.step()
# save the training process information to the monitor
end_time = time.time()
batchsize = len(data_info)
for idx, data_seq_info in enumerate(data_info):
# loss_value is supposed to be the average loss value
# over samples in the the batch, thus, just loss_value
# rather loss_value / batchsize
monitor.log_loss(loss_values, loss_flags, \
(end_time-start_time) / batchsize, \
data_seq_info, idx_orig.numpy()[idx], \
epoch_idx)
# print infor for one sentence
if args.verbose == 1:
# here we use args.batch_size because len(data_info)
# may be < args.batch_size.
monitor.print_error_for_batch(
data_idx * args.batch_size + idx,\
idx_orig.numpy()[idx], \
epoch_idx)
#
# start the timer for a new batch
start_time = time.time()
# Save intermediate model for every n mini-batches (optional).
# Note that if we re-start trainining with this intermediate model,
# the data will start from the 1st sample, not the one where we stopped
if args.save_model_every_n_minibatches > 0 \
and (data_idx+1) % args.save_model_every_n_minibatches == 0 \
and optimizer is not None and data_idx > 0:
cp_names = nii_nn_manage_conf.CheckPointKey()
tmp_model_name = nii_nn_tools.f_save_epoch_name(
args, epoch_idx, '_{:05d}'.format(data_idx+1))
# save
tmp_dic = {
cp_names.state_dict : pt_model.state_dict(),
cp_names.optimizer : optimizer.state_dict()
}
torch.save(tmp_dic, tmp_model_name)
# If debug mode is used, only run a specified number of mini-batches
if args.debug_batch_num > 0 and data_idx >= (args.debug_batch_num - 1):
nii_display.f_print("Debug mode is on. This epoch is finished")
break
# other procedures
if data_set_wrapper and args.force_update_seq_length:
# update data length for sampler
# when using multi-thread (workers > 0), the information updated
# in each subthread will not be reflected here.
# we need to do this update manually
data_set_wrapper.update_seq_len_in_sampler_sub(data_info)
# loop done
return
def f_train_wrapper(args, pt_model, loss_wrapper, device, \
optimizer_wrapper, \
train_dataset_wrapper, \
val_dataset_wrapper = None, \
checkpoint = None):
"""
f_train_wrapper(args, pt_model, loss_wrapper, device,
optimizer_wrapper
train_dataset_wrapper, val_dataset_wrapper = None,
check_point = None):
A wrapper to run the training process
Args:
args: argument information given by argpase
pt_model: pytorch model (torch.nn.Module)
loss_wrapper: a wrapper over loss function
loss_wrapper.compute(generated, target)
device: torch.device("cuda") or torch.device("cpu")
optimizer_wrapper:
a wrapper over optimizer (defined in op_manager.py)
optimizer_wrapper.optimizer is torch.optimizer
train_dataset_wrapper:
a wrapper over training data set (data_io/default_data_io.py)
train_dataset_wrapper.get_loader() returns torch.DataSetLoader
val_dataset_wrapper:
a wrapper over validation data set (data_io/default_data_io.py)
it can None.
check_point:
a check_point that stores every thing to resume training
"""
nii_display.f_print_w_date("Start model training")
##############
## Preparation
##############
# get the optimizer
optimizer_wrapper.print_info()
optimizer = optimizer_wrapper.optimizer
lr_scheduler = optimizer_wrapper.lr_scheduler
epoch_num = optimizer_wrapper.get_epoch_num()
no_best_epoch_num = optimizer_wrapper.get_no_best_epoch_num()
# get data loader for training set
train_dataset_wrapper.print_info()
train_data_loader = train_dataset_wrapper.get_loader()
train_seq_num = train_dataset_wrapper.get_seq_num()
# get the training process monitor
monitor_trn = nii_monitor.Monitor(epoch_num, train_seq_num)
# if validation data is provided, get data loader for val set
if val_dataset_wrapper is not None:
val_dataset_wrapper.print_info()
val_data_loader = val_dataset_wrapper.get_loader()
val_seq_num = val_dataset_wrapper.get_seq_num()
monitor_val = nii_monitor.Monitor(epoch_num, val_seq_num)
else:
monitor_val = None
# training log information
train_log = ''
# prepare for DataParallism if available
# pytorch.org/tutorials/beginner/blitz/data_parallel_tutorial.html
if torch.cuda.device_count() > 1 and args.multi_gpu_data_parallel:
flag_multi_device = True
nii_display.f_print("\nUse %d GPUs\n" % (torch.cuda.device_count()))
# no way to call normtarget_f after pt_model is in DataParallel
normtarget_f = pt_model.normalize_target
pt_model = nn.DataParallel(pt_model)
else:
nii_display.f_print("\nUse single GPU: %s\n" % \
(torch.cuda.get_device_name(device)))
flag_multi_device = False
normtarget_f = None
pt_model.to(device, dtype=nii_dconf.d_dtype)
# print the network
nii_nn_tools.f_model_show(pt_model)
nii_nn_tools.f_loss_show(loss_wrapper)
cp_names = nii_nn_manage_conf.CheckPointKey()
###############################
## Resume training if necessary
###############################
# resume training or initialize the model if necessary
train_log = nii_nn_tools.f_load_checkpoint(
checkpoint, args, flag_multi_device, pt_model,
optimizer, monitor_trn, monitor_val, lr_scheduler)
######################
### User defined setup
######################
if hasattr(pt_model, "other_setups"):
nii_display.f_print("Conduct User-defined setup")
pt_model.other_setups()
# This should be merged with other_setups
if hasattr(pt_model, "g_pretrained_model_path") and \
hasattr(pt_model, "g_pretrained_model_prefix"):
nii_display.f_print("Load pret-rained models as part of this mode")
nii_nn_tools.f_load_pretrained_model_partially(
pt_model, pt_model.g_pretrained_model_path,
pt_model.g_pretrained_model_prefix)
######################
### Start training
######################
# other variables
flag_early_stopped = False
start_epoch = monitor_trn.get_epoch()
epoch_num = monitor_trn.get_max_epoch()
# print
_ = nii_op_display_tk.print_log_head()
nii_display.f_print_message(train_log, flush=True, end='')
# loop over multiple epochs
for epoch_idx in range(start_epoch, epoch_num):
# training one epoch
pt_model.train()
# set validation and other flags if necessary
# this tells the model whether the current epoch is for validation
if hasattr(pt_model, 'validation'):
pt_model.validation = False
mes = "Warning: model.validation is deprecated, "
mes += "please use model.g_flag_validation"
nii_display.f_print(mes, 'warning')
if hasattr(pt_model, 'flag_validation'):
pt_model.flag_validation = False
if hasattr(pt_model, 'g_flag_validation'):
pt_model.g_flag_validation = False
# set epoch number
if hasattr(pt_model, 'g_epoch_idx'):
pt_model.g_epoch_idx = epoch_idx
# run one epoch
f_run_one_epoch(args, pt_model, loss_wrapper, device, \
monitor_trn, train_data_loader, \
epoch_idx, optimizer, normtarget_f, \
train_dataset_wrapper)
time_trn = monitor_trn.get_time(epoch_idx)
loss_trn = monitor_trn.get_loss(epoch_idx)
# if necessary, do validataion
if val_dataset_wrapper is not None:
# set eval() if necessary
if args.eval_mode_for_validation:
pt_model.eval()
# set validation flag if necessary
if hasattr(pt_model, 'validation'):
pt_model.validation = True
mes = "Warning: model.validation is deprecated, "
mes += "please use model.flag_validation"
nii_display.f_print(mes, 'warning')
if hasattr(pt_model, 'flag_validation'):
pt_model.flag_validation = True
with torch.no_grad():
f_run_one_epoch(args, pt_model, loss_wrapper, \
device, \
monitor_val, val_data_loader, \
epoch_idx, None, normtarget_f, \
val_dataset_wrapper)
time_val = monitor_val.get_time(epoch_idx)
loss_val = monitor_val.get_loss(epoch_idx)
# update lr rate scheduler if necessary
if lr_scheduler.f_valid():
lr_scheduler.f_step(
monitor_val.get_loss_for_learning_stopping(epoch_idx))
else:
#time_val = monitor_val.get_time(epoch_idx)
#loss_val = monitor_val.get_loss(epoch_idx)
time_val, loss_val = 0, np.zeros_like(loss_trn)
if val_dataset_wrapper is not None:
flag_new_best = monitor_val.is_new_best()
else:
flag_new_best = True
# print information
train_log += nii_op_display_tk.print_train_info(
epoch_idx, time_trn, loss_trn, time_val, loss_val,
flag_new_best, optimizer_wrapper.get_lr_info())
# save the best model
if flag_new_best or args.force_save_lite_trained_network_per_epoch:
tmp_best_name = nii_nn_tools.f_save_trained_name(args)
torch.save(pt_model.state_dict(), tmp_best_name)
# save intermediate model if necessary
if not args.not_save_each_epoch:
tmp_model_name = nii_nn_tools.f_save_epoch_name(args, epoch_idx)
if monitor_val is not None:
tmp_val_log = monitor_val.get_state_dic()
else:
tmp_val_log = None
if lr_scheduler.f_valid():
lr_scheduler_state = lr_scheduler.f_state_dict()
else:
lr_scheduler_state = None
# save
tmp_dic = {
cp_names.state_dict : pt_model.state_dict(),
cp_names.info : train_log,
cp_names.optimizer : optimizer.state_dict(),
cp_names.trnlog : monitor_trn.get_state_dic(),
cp_names.vallog : tmp_val_log,
cp_names.lr_scheduler : lr_scheduler_state
}
torch.save(tmp_dic, tmp_model_name)
if args.verbose == 1:
nii_display.f_eprint(str(datetime.datetime.now()))
nii_display.f_eprint("Save {:s}".format(tmp_model_name),
flush=True)
# Early stopping
# note: if LR scheduler is used, early stopping will be
# disabled
if lr_scheduler.f_allow_early_stopping() and \
monitor_val is not None and \
monitor_val.should_early_stop(no_best_epoch_num):
flag_early_stopped = True
break
# Update datasets informaiton if necessary
if args.force_update_seq_length:
# update the sequence length logged in sampler if sequence
# length are to be changed during data loading process
train_dataset_wrapper.update_seq_len_in_sampler()
val_dataset_wrapper.update_seq_len_in_sampler()
# loop done
nii_op_display_tk.print_log_tail()
if flag_early_stopped:
nii_display.f_print("Training finished by early stopping")
else:
nii_display.f_print("Training finished")
nii_display.f_print("Model is saved to", end = '')
nii_display.f_print("{}".format(nii_nn_tools.f_save_trained_name(args)))
return
def f_inference_wrapper(args, pt_model, device, \
test_dataset_wrapper, checkpoint):
""" Wrapper for inference
"""
# prepare dataloader
test_data_loader = test_dataset_wrapper.get_loader()
test_seq_num = test_dataset_wrapper.get_seq_num()
test_dataset_wrapper.print_info()
# cuda device
if torch.cuda.device_count() > 1 and args.multi_gpu_data_parallel:
nii_display.f_print(
"DataParallel for inference is not implemented", 'warning')
nii_display.f_print("\nUse single GPU: %s\n" % \
(torch.cuda.get_device_name(device)))
# print the network
pt_model.to(device, dtype=nii_dconf.d_dtype)
nii_nn_tools.f_model_show(pt_model)
# load trained model parameters from checkpoint
nii_nn_tools.f_load_checkpoint_for_inference(checkpoint, pt_model)
# decide the range of the data index to generate
range_genidx_start = args.inference_sample_start_index
if args.inference_sample_end_index < 0:
range_genidx_end = len(test_data_loader)
else:
range_genidx_end = args.inference_sample_end_index
if range_genidx_start >= range_genidx_end:
mes = "--inference-sample-start-index should be smaller than"
mes += " --inference-sample-end-index"
nii_display.f_die(mes)
# print information
nii_display.f_print("Start inference (generation):", 'highlight')
nii_display.f_print("Generate minibatch indexed within [{:d},{:d})".format(
range_genidx_start, range_genidx_end))
# if a list of file to be processed is provided
inf_datalist_path = args.inference_data_list
if len(inf_datalist_path):
inf_datalist = nii_list_tk.read_list_from_text(inf_datalist_path)
mes = "And only data in {:s} is processed".format(inf_datalist_path)
nii_display.f_print(mes)
else:
inf_datalist = None
# other information
if hasattr(args, 'trunc_input_length_for_inference') and \
args.trunc_input_length_for_inference > 0:
mes = "Generation in segment-by-segment mode (truncation length "
mes += "{:d})".format(args.trunc_input_length_for_inference)
nii_display.f_print(mes)
# output buffer, filename buffer
#output_buf = []
#filename_buf = []
# start generation
pt_model.eval()
total_start_time = time.time()
total_accumulate = 0
sample_account = 0
with torch.no_grad():
start_time_load = time.time()
# run generation
for data_idx, (data_in, data_tar, data_info, idx_orig) in \
enumerate(test_data_loader):
# decide whether to process this data sample or not
if data_idx < range_genidx_start:
# not in range
nii_display.f_print("skip {:s}".format(str(data_info)))
continue
elif data_idx >= range_genidx_end:
# not in range
nii_display.f_print("stopped by --inference-sample-end-index")
break
else:
# and if the data is in the list
if inf_datalist is not None:
# to be completed. this only works for batchsize=1
seqname = nii_seqinfo.SeqInfo()
seqname.parse_from_str(data_info[0])
if seqname.seq_tag() in inf_datalist:
pass
else:
nii_display.f_print("skip {:s}".format(str(data_info)))
continue
else:
pass
# send data to device and convert data type
if isinstance(data_in, torch.Tensor):
data_in = data_in.to(device, dtype=nii_dconf.d_dtype)
elif isinstance(data_in, list) and data_in:
data_in = [x.to(device, dtype=nii_dconf.d_dtype) \
for x in data_in]
else:
nii_display.f_die("data_in is not a tensor or list of tensors")
if isinstance(data_tar, torch.Tensor):
data_tar = data_tar.to(device, dtype=nii_dconf.d_dtype)
elif isinstance(data_tar, list) and data_tar:
data_tar = [x.to(device, dtype=nii_dconf.d_dtype) \
for x in data_tar]
else:
pass
# load down time for debugging
start_time_inf = time.time()
time_cost_load = (start_time_inf - start_time_load)/len(data_info)
# in case the model defines inference function explicitly
if hasattr(pt_model, "inference"):
infer_func = pt_model.inference
else:
infer_func = pt_model.forward
if hasattr(args, 'trunc_input_length_for_inference') and \
args.trunc_input_length_for_inference > 0:
# generate the data segment by segment, then do overlap and add
in_list, tar_list, overlap = nii_nn_tools.f_split_data(
data_in, data_tar, args.trunc_input_length_for_inference,
args.trunc_input_overlap)
gen_list = []
for in_tmp, tar_tmp in zip(in_list, tar_list):
# compute output
if args.model_forward_with_target:
if args.model_forward_with_file_name:
data_gen = infer_func(in_tmp, tar_tmp, data_info)
else:
data_gen = infer_func(in_tmp, tar_tmp)
else:
if args.model_forward_with_file_name:
data_gen = infer_func(in_tmp, data_info)
else:
data_gen = infer_func(in_tmp)
gen_list.append(data_gen)
# generation model may "up-sample" the input, we need to know
# output_segment_length // input_segment_length
if len(gen_list) > 0 and len(in_list) > 0:
upsamp_fac = gen_list[0].shape[1] // in_list[0].shape[1]
data_gen = nii_nn_tools.f_overlap_data(
gen_list, upsamp_fac * overlap)
else:
print("Gneration failed on {:s}".format(data_info))
sys.exit(1)
else:
# normal case: generate the output sequence as a whole
if args.model_forward_with_target:
# if model.forward requires (input, target) as arguments
# for example, for auto-encoder
if args.model_forward_with_file_name:
data_gen = infer_func(data_in, data_tar, data_info)
else:
data_gen = infer_func(data_in, data_tar)
else:
if args.model_forward_with_file_name:
data_gen = infer_func(data_in, data_info)
else:
data_gen = infer_func(data_in)
# log time for debugging
start_time_save = time.time()
time_cost_inf = start_time_save - start_time_inf
# average time for each sequence when batchsize > 1
time_cost_inf = time_cost_inf / len(data_info)
# write the generated data to file
if data_gen is None:
nii_display.f_print("No output saved: %s" % (str(data_info)),\
'warning')
else:
#output_buf.append(data_gen)
#filename_buf.append(data_info)
try:
data_gen = pt_model.denormalize_output(data_gen)
data_gen_np = data_gen.to("cpu").numpy()
except AttributeError:
mes = "Output data is not torch.tensor. Please check "
mes += "model.forward or model.inference"
nii_display.f_die(mes)
# save output (in case batchsize > 1, )
for idx, seq_info in enumerate(data_info):
#nii_display.f_print(seq_info)
test_dataset_wrapper.putitem(data_gen_np[idx:idx+1],\
args.output_dir, \
args.output_filename_prefix, \
seq_info)
# time to generate
start_time_load = time.time()
time_cost_save = (start_time_load - start_time_save)/len(data_info)
# print information
time_cost = time_cost_load + time_cost_inf + time_cost_save
for idx, seq_info in enumerate(data_info):
if args.verbose == 2:
print("{:s} {:f} {:f} {:f}".format(
seq_info,time_cost_load, time_cost_inf, time_cost_save))
sample_account += 1
_ = nii_op_display_tk.print_gen_info(
seq_info, time_cost, sample_account)
# log time for debugging
total_accumulate += time_cost * len(data_info)
#
total_time = time.time() - total_start_time
nii_display.f_print("Inference time cost: {:f}s".format(total_time))
#nii_display.f_print("{:f}".format(total_accumulate))
# done for
# done with
nii_display.f_print("Output data has been saved to %s" % (args.output_dir))
# finish up if necessary
if hasattr(pt_model, "finish_up_inference"):
pt_model.finish_up_inference()
# done
return
def f_convert_epoch_to_trained(args, pt_model, device, checkpoint):
""" Convert a checkpoint to trained_network.pt
(remove gradients and other statistics for training)
"""
# cuda device
if torch.cuda.device_count() > 1 and args.multi_gpu_data_parallel:
nii_display.f_print(
"DataParallel is not implemented here", 'warning')
nii_display.f_print("\nUse single GPU: %s\n" % \
(torch.cuda.get_device_name(device)))
# print the network
pt_model.to(device, dtype=nii_dconf.d_dtype)
nii_nn_tools.f_model_show(pt_model)
# load trained model parameters from checkpoint
nii_nn_tools.f_load_checkpoint_for_inference(checkpoint, pt_model)
# start generation
nii_display.f_print("Start conversion:", 'highlight')
tmp_best_name = nii_nn_tools.f_save_trained_name(args)
torch.save(pt_model.state_dict(), tmp_best_name)
nii_display.f_print("Model is saved to", end = '')
nii_display.f_print("{}".format(tmp_best_name))
# done
return
if __name__ == "__main__":
print("nn_manager")
| 32,606 | 39.606476 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/nn_manager/nn_manager_profile.py | #!/usr/bin/env python
"""
A trimmed version of nn_manager.py for profiling
This requires Pytorch-1.8
https://pytorch.org/tutorials/intermediate/tensorboard_profiler_tutorial.html
It requires a specific ../../sandbox/different_main/main_profile.py to run.
"""
from __future__ import print_function
import time
import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.profiler
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.display as nii_display
import core_scripts.other_tools.str_tools as nii_str_tk
import core_scripts.op_manager.op_process_monitor as nii_monitor
import core_scripts.op_manager.op_display_tools as nii_op_display_tk
import core_scripts.nn_manager.nn_manager_tools as nii_nn_tools
import core_scripts.nn_manager.nn_manager_conf as nii_nn_manage_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
#############################################################
def f_run_one_epoch_profile(args,
pt_model, loss_wrapper, \
device, monitor, \
data_loader, epoch_idx, optimizer = None, \
target_norm_method = None):
"""
f_run_one_epoch:
run one poech over the dataset (for training or validation sets)
Args:
args: from argpase
pt_model: pytorch model (torch.nn.Module)
loss_wrapper: a wrapper over loss function
loss_wrapper.compute(generated, target)
device: torch.device("cuda") or torch.device("cpu")
monitor: defined in op_procfess_monitor.py
data_loader: pytorch DataLoader.
epoch_idx: int, index of the current epoch
optimizer: torch optimizer or None
if None, the back propgation will be skipped
(for developlement set)
target_norm_method: method to normalize target data
(by default, use pt_model.normalize_target)
"""
# timer
start_time = time.time()
#######################
# options for profile
#######################
try:
prof_opt = [int(x) for x in args.wait_warmup_active_repeat.split('-')]
except ValueError:
nii_display.f_die("Fail to parse --wait-warmup-active-repeat")
if len(prof_opt) != 4:
nii_display.f_die("Fail to parse --wait-warmup-active-repeat")
# number of steps for profiling
num_steps = (prof_opt[0] + prof_opt[1] + prof_opt[2]) * prof_opt[3]
# output dir
prof_outdir = args.profile_output_dir
with torch.profiler.profile(
schedule=torch.profiler.schedule(wait=prof_opt[0],
warmup=prof_opt[1],
active=prof_opt[2],
repeat=prof_opt[3]),
on_trace_ready=torch.profiler.tensorboard_trace_handler(prof_outdir),
record_shapes=True,
profile_memory=False,
with_stack=True
) as prof:
# loop over samples
for data_idx, (data_in, data_tar, data_info, idx_orig) in \
enumerate(data_loader):
# If debug mode is used, only run a specified number of mini-batches
if data_idx >= num_steps:
nii_display.f_print("Profiling mode is on. Epoch is finished")
break
#############
# prepare
#############
# idx_orig is the original idx in the dataset
# which can be different from data_idx when shuffle = True
#idx_orig = idx_orig.numpy()[0]
#data_seq_info = data_info[0]
# send data to device
if optimizer is not None:
optimizer.zero_grad()
############
# compute output
############
if isinstance(data_in, torch.Tensor):
data_in = data_in.to(device, dtype=nii_dconf.d_dtype)
elif isinstance(data_in, list) and data_in:
data_in=[x.to(device, dtype=nii_dconf.d_dtype) for x in data_in]
else:
nii_display.f_die("data_in is not a tensor or list of tensors")
if args.model_forward_with_target:
# if model.forward requires (input, target) as arguments
# for example, for auto-encoder & autoregressive model
if isinstance(data_tar, torch.Tensor):
data_tar_tm = data_tar.to(device, dtype=nii_dconf.d_dtype)
elif isinstance(data_tar, list) and data_tar:
# if the data_tar is a list of tensors
data_tar_tm = [x.to(device, dtype=nii_dconf.d_dtype) \
for x in data_tar]
else:
nii_display.f_print("--model-forward-with-target is set")
nii_display.f_die("but data_tar is not loaded, or a tensor")
if args.model_forward_with_file_name:
data_gen = pt_model(data_in, data_tar_tm, data_info)
else:
data_gen = pt_model(data_in, data_tar_tm)
else:
if args.model_forward_with_file_name:
# specifcal case when model.forward requires data_info
data_gen = pt_model(data_in, data_info)
else:
# normal case for model.forward(input)
data_gen = pt_model(data_in)
#####################
# compute loss and do back propagate
#####################
# Two cases
# 1. if loss is defined as pt_model.loss, then let the users do
# normalization inside the pt_mode.loss
# 2. if loss_wrapper is defined as a class independent from model
# there is no way to normalize the data inside the loss_wrapper
# because the normalization weight is saved in pt_model
if hasattr(pt_model, 'loss'):
# case 1, pt_model.loss is available
if isinstance(data_tar, torch.Tensor):
data_tar = data_tar.to(device, dtype=nii_dconf.d_dtype)
elif isinstance(data_tar, list) and data_tar:
data_tar = [x.to(device, dtype=nii_dconf.d_dtype) \
for x in data_tar]
else:
data_tar = []
loss_computed = pt_model.loss(data_gen, data_tar)
else:
# case 2, loss is defined independent of pt_model
if isinstance(data_tar, torch.Tensor):
data_tar = data_tar.to(device, dtype=nii_dconf.d_dtype)
# there is no way to normalize the data inside loss
# thus, do normalization here
if target_norm_method is None:
normed_target = pt_model.normalize_target(data_tar)
else:
normed_target = target_norm_method(data_tar)
elif isinstance(data_tar, list) and data_tar:
data_tar = [x.to(device, dtype=nii_dconf.d_dtype) \
for x in data_tar]
if target_norm_method is None:
normed_target = pt_model.normalize_target(data_tar)
else:
normed_target = target_norm_method(data_tar)
else:
normed_target = []
# return the loss from loss_wrapper
# loss_computed may be [[loss_1, loss_2, ...],[flag_1,flag_2,.]]
# which contain multiple loss and flags indicating whether
# the corresponding loss should be taken into consideration
# for early stopping
# or
# loss_computed may be simply a tensor loss
loss_computed = loss_wrapper.compute(data_gen, normed_target)
loss_values = [0]
# To handle cases where there are multiple loss functions
# when loss_comptued is [[loss_1, loss_2, ...],[flag_1, flag_2,.]]
# loss: sum of [loss_1, loss_2, ...], for backward()
# loss_values: [loss_1.item(), loss_2.item() ..], for logging
# loss_flags: [True/False, ...], for logging,
# whether loss_n is used for early stopping
# when loss_computed is loss
# loss: loss
# los_vals: [loss.item()]
# loss_flags: [True]
loss, loss_values, loss_flags = nii_nn_tools.f_process_loss(
loss_computed)
# Back-propgation using the summed loss
if optimizer is not None and loss.requires_grad:
# backward propagation
loss.backward()
# apply gradient clip
if args.grad_clip_norm > 0:
grad_norm = torch.nn.utils.clip_grad_norm_(
pt_model.parameters(), args.grad_clip_norm)
# update parameters
optimizer.step()
# save the training process information to the monitor
end_time = time.time()
batchsize = len(data_info)
for idx, data_seq_info in enumerate(data_info):
# loss_value is supposed to be the average loss value
# over samples in the the batch, thus, just loss_value
# rather loss_value / batchsize
monitor.log_loss(loss_values, loss_flags, \
(end_time-start_time) / batchsize, \
data_seq_info, idx_orig.numpy()[idx], \
epoch_idx)
# print infor for one sentence
if args.verbose == 1:
# here we use args.batch_size because len(data_info)
# may be < args.batch_size.
monitor.print_error_for_batch(
data_idx * args.batch_size + idx,\
idx_orig.numpy()[idx], \
epoch_idx)
#
# start the timer for a new batch
start_time = time.time()
#
prof.step()
# loop done
# done with
return
def f_train_wrapper(args, pt_model, loss_wrapper, device, \
optimizer_wrapper, \
train_dataset_wrapper, \
val_dataset_wrapper = None, \
checkpoint = None):
"""
f_train_wrapper(args, pt_model, loss_wrapper, device,
optimizer_wrapper
train_dataset_wrapper, val_dataset_wrapper = None,
check_point = None):
A wrapper to run the training process
Args:
args: argument information given by argpase
pt_model: pytorch model (torch.nn.Module)
loss_wrapper: a wrapper over loss function
loss_wrapper.compute(generated, target)
device: torch.device("cuda") or torch.device("cpu")
optimizer_wrapper:
a wrapper over optimizer (defined in op_manager.py)
optimizer_wrapper.optimizer is torch.optimizer
train_dataset_wrapper:
a wrapper over training data set (data_io/default_data_io.py)
train_dataset_wrapper.get_loader() returns torch.DataSetLoader
val_dataset_wrapper:
a wrapper over validation data set (data_io/default_data_io.py)
it can None.
check_point:
a check_point that stores every thing to resume training
"""
nii_display.f_print_w_date("Start model training")
##############
## Preparation
##############
# get the optimizer
optimizer_wrapper.print_info()
optimizer = optimizer_wrapper.optimizer
lr_scheduler = optimizer_wrapper.lr_scheduler
epoch_num = optimizer_wrapper.get_epoch_num()
no_best_epoch_num = optimizer_wrapper.get_no_best_epoch_num()
# get data loader for training set
train_dataset_wrapper.print_info()
train_data_loader = train_dataset_wrapper.get_loader()
train_seq_num = train_dataset_wrapper.get_seq_num()
# get the training process monitor
monitor_trn = nii_monitor.Monitor(epoch_num, train_seq_num)
# if validation data is provided, get data loader for val set
if val_dataset_wrapper is not None:
val_dataset_wrapper.print_info()
val_data_loader = val_dataset_wrapper.get_loader()
val_seq_num = val_dataset_wrapper.get_seq_num()
monitor_val = nii_monitor.Monitor(epoch_num, val_seq_num)
else:
monitor_val = None
# training log information
train_log = ''
# prepare for DataParallism if available
# pytorch.org/tutorials/beginner/blitz/data_parallel_tutorial.html
if torch.cuda.device_count() > 1 and args.multi_gpu_data_parallel:
flag_multi_device = True
nii_display.f_print("\nUse %d GPUs\n" % (torch.cuda.device_count()))
# no way to call normtarget_f after pt_model is in DataParallel
normtarget_f = pt_model.normalize_target
pt_model = nn.DataParallel(pt_model)
else:
nii_display.f_print("\nUse single GPU: %s\n" % \
(torch.cuda.get_device_name(device)))
flag_multi_device = False
normtarget_f = None
pt_model.to(device, dtype=nii_dconf.d_dtype)
# print the network
nii_nn_tools.f_model_show(pt_model)
nii_nn_tools.f_loss_show(loss_wrapper)
cp_names = nii_nn_manage_conf.CheckPointKey()
###############################
## Resume training if necessary
###############################
# resume training or initialize the model if necessary
train_log = nii_nn_tools.f_load_checkpoint(
checkpoint, args, flag_multi_device, pt_model,
optimizer, monitor_trn, monitor_val, lr_scheduler)
######################
### User defined setup
######################
if hasattr(pt_model, "other_setups"):
nii_display.f_print("Conduct User-defined setup")
pt_model.other_setups()
# This should be merged with other_setups
if hasattr(pt_model, "g_pretrained_model_path") and \
hasattr(pt_model, "g_pretrained_model_prefix"):
nii_display.f_print("Load pret-rained models as part of this mode")
nii_nn_tools.f_load_pretrained_model_partially(
pt_model, pt_model.g_pretrained_model_path,
pt_model.g_pretrained_model_prefix)
######################
### Start training
######################
# other variables
flag_early_stopped = False
start_epoch = monitor_trn.get_epoch()
#epoch_num = monitor_trn.get_max_epoch()
epoch_num = 1
# print
_ = nii_op_display_tk.print_log_head()
nii_display.f_print_message(train_log, flush=True, end='')
# loop over multiple epochs
for epoch_idx in range(start_epoch, epoch_num):
# training one epoch
pt_model.train()
# set validation flag if necessary
if hasattr(pt_model, 'validation'):
pt_model.validation = False
mes = "Warning: model.validation is deprecated, "
mes += "please use model.flag_validation"
nii_display.f_print(mes, 'warning')
if hasattr(pt_model, 'flag_validation'):
pt_model.flag_validation = False
f_run_one_epoch_profile(
args, pt_model, loss_wrapper, device, \
monitor_trn, train_data_loader, \
epoch_idx, optimizer, normtarget_f)
time_trn = monitor_trn.get_time(epoch_idx)
loss_trn = monitor_trn.get_loss(epoch_idx)
# No validation for profiling
if False and val_dataset_wrapper is not None:
# set eval() if necessary
if args.eval_mode_for_validation:
pt_model.eval()
# set validation flag if necessary
if hasattr(pt_model, 'validation'):
pt_model.validation = True
mes = "Warning: model.validation is deprecated, "
mes += "please use model.flag_validation"
nii_display.f_print(mes, 'warning')
if hasattr(pt_model, 'flag_validation'):
pt_model.flag_validation = True
with torch.no_grad():
f_run_one_epoch(args, pt_model, loss_wrapper, \
device, \
monitor_val, val_data_loader, \
epoch_idx, None, normtarget_f)
time_val = monitor_val.get_time(epoch_idx)
loss_val = monitor_val.get_loss(epoch_idx)
# update lr rate scheduler if necessary
if lr_scheduler.f_valid():
lr_scheduler.f_step(loss_val)
else:
time_val = monitor_val.get_time(epoch_idx)
loss_val = monitor_val.get_loss(epoch_idx)
if val_dataset_wrapper is not None:
flag_new_best = monitor_val.is_new_best()
else:
flag_new_best = True
# print information
train_log += nii_op_display_tk.print_train_info(
epoch_idx, time_trn, loss_trn, time_val, loss_val,
flag_new_best, optimizer_wrapper.get_lr_info())
# not save the best model for profiling
if False and flag_new_best:
tmp_best_name = nii_nn_tools.f_save_trained_name(args)
torch.save(pt_model.state_dict(), tmp_best_name)
# not save intermediate model if necessary
if False and not args.not_save_each_epoch:
tmp_model_name = nii_nn_tools.f_save_epoch_name(args, epoch_idx)
if monitor_val is not None:
tmp_val_log = monitor_val.get_state_dic()
else:
tmp_val_log = None
if lr_scheduler.f_valid():
lr_scheduler_state = lr_scheduler.f_state_dict()
else:
lr_scheduler_state = None
# save
tmp_dic = {
cp_names.state_dict : pt_model.state_dict(),
cp_names.info : train_log,
cp_names.optimizer : optimizer.state_dict(),
cp_names.trnlog : monitor_trn.get_state_dic(),
cp_names.vallog : tmp_val_log,
cp_names.lr_scheduler : lr_scheduler_state
}
torch.save(tmp_dic, tmp_model_name)
if args.verbose == 1:
nii_display.f_eprint(str(datetime.datetime.now()))
nii_display.f_eprint("Save {:s}".format(tmp_model_name),
flush=True)
# Early stopping
# note: if LR scheduler is used, early stopping will be
# disabled
if lr_scheduler.f_allow_early_stopping() and \
monitor_val is not None and \
monitor_val.should_early_stop(no_best_epoch_num):
flag_early_stopped = True
break
# loop done
nii_op_display_tk.print_log_tail()
if flag_early_stopped:
nii_display.f_print("Profiling finished")
else:
nii_display.f_print("Profiling finished")
#nii_display.f_print("Model is saved to", end = '')
#nii_display.f_print("{}".format(nii_nn_tools.f_save_trained_name(args)))
nii_display.f_print("Profiling log is saved to {:s}".format(
args.profile_output_dir))
return
def f_inference_wrapper(args, pt_model, device, \
test_dataset_wrapper, checkpoint):
""" Wrapper for inference
"""
# prepare dataloader
test_data_loader = test_dataset_wrapper.get_loader()
test_seq_num = test_dataset_wrapper.get_seq_num()
test_dataset_wrapper.print_info()
# cuda device
if torch.cuda.device_count() > 1 and args.multi_gpu_data_parallel:
nii_display.f_print(
"DataParallel for inference is not implemented", 'warning')
nii_display.f_print("\nUse single GPU: %s\n" % \
(torch.cuda.get_device_name(device)))
# print the network
pt_model.to(device, dtype=nii_dconf.d_dtype)
nii_nn_tools.f_model_show(pt_model)
# load trained model parameters from checkpoint
nii_nn_tools.f_load_checkpoint_for_inference(checkpoint, pt_model)
# start generation
nii_display.f_print("Start inference (generation):", 'highlight')
# output buffer, filename buffer
output_buf = []
filename_buf = []
pt_model.eval()
with torch.no_grad():
# run generation
for _, (data_in, data_tar, data_info, idx_orig) in \
enumerate(test_data_loader):
# send data to device and convert data type
if isinstance(data_in, torch.Tensor):
data_in = data_in.to(device, dtype=nii_dconf.d_dtype)
elif isinstance(data_in, list) and data_in:
data_in = [x.to(device, dtype=nii_dconf.d_dtype) \
for x in data_in]
else:
nii_display.f_die("data_in is not a tensor or list of tensors")
if isinstance(data_tar, torch.Tensor):
data_tar = data_tar.to(device, dtype=nii_dconf.d_dtype)
elif isinstance(data_tar, list) and data_tar:
data_tar = [x.to(device, dtype=nii_dconf.d_dtype) \
for x in data_tar]
else:
pass
start_time = time.time()
# in case the model defines inference function explicitly
if hasattr(pt_model, "inference"):
infer_func = pt_model.inference
else:
infer_func = pt_model.forward
# compute output
if args.model_forward_with_target:
# if model.forward requires (input, target) as arguments
# for example, for auto-encoder
if args.model_forward_with_file_name:
data_gen = infer_func(data_in, data_tar, data_info)
else:
data_gen = infer_func(data_in, data_tar)
else:
if args.model_forward_with_file_name:
data_gen = infer_func(data_in, data_info)
else:
data_gen = infer_func(data_in)
time_cost = time.time() - start_time
# average time for each sequence when batchsize > 1
time_cost = time_cost / len(data_info)
if data_gen is None:
nii_display.f_print("No output saved: %s" % (str(data_info)),\
'warning')
else:
output_buf.append(data_gen)
filename_buf.append(data_info)
# print information
for idx, seq_info in enumerate(data_info):
_ = nii_op_display_tk.print_gen_info(seq_info, time_cost)
# Writing generatd data to disk
nii_display.f_print("Writing output to %s" % (args.output_dir))
for data_gen, data_info in zip(output_buf, filename_buf):
if data_gen is not None:
try:
data_gen = pt_model.denormalize_output(data_gen)
data_gen_np = data_gen.to("cpu").numpy()
except AttributeError:
mes = "Output data is not torch.tensor. Please check "
mes += "model.forward or model.inference"
nii_display.f_die(mes)
# save output (in case batchsize > 1, )
for idx, seq_info in enumerate(data_info):
nii_display.f_print(seq_info)
test_dataset_wrapper.putitem(data_gen_np[idx:idx+1],\
args.output_dir, \
args.output_filename_prefix, \
seq_info)
# done for
# done with
nii_display.f_print("Output data has been saved to %s" % (args.output_dir))
# finish up if necessary
if hasattr(pt_model, "finish_up_inference"):
pt_model.finish_up_inference()
# done
return
if __name__ == "__main__":
print("nn_manager used for profiling")
| 25,076 | 38.931529 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/nn_manager/nn_manager_GAN_ob.py | #!/usr/bin/env python
"""
nn_manager_gan
A simple wrapper to run the training / testing process for GAN
"""
from __future__ import print_function
import time
import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.display as nii_display
import core_scripts.other_tools.str_tools as nii_str_tk
import core_scripts.op_manager.op_process_monitor as nii_monitor
import core_scripts.op_manager.op_display_tools as nii_op_display_tk
import core_scripts.nn_manager.nn_manager_tools as nii_nn_tools
import core_scripts.nn_manager.nn_manager_conf as nii_nn_manage_conf
import core_scripts.other_tools.debug as nii_debug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#############################################################
def f_run_one_epoch_GAN(
args, pt_model_G, pt_model_D,
loss_wrapper, \
device, monitor, \
data_loader, epoch_idx,
optimizer_G = None, optimizer_D = None, \
target_norm_method = None):
"""
f_run_one_epoch_GAN:
run one poech over the dataset (for training or validation sets)
Args:
args: from argpase
pt_model_G: pytorch model (torch.nn.Module) generator
pt_model_D: pytorch model (torch.nn.Module) discriminator
loss_wrapper: a wrapper over loss function
loss_wrapper.compute(generated, target)
device: torch.device("cuda") or torch.device("cpu")
monitor: defined in op_procfess_monitor.py
data_loader: pytorch DataLoader.
epoch_idx: int, index of the current epoch
optimizer_G: torch optimizer or None, for generator
optimizer_D: torch optimizer or None, for discriminator
if None, the back propgation will be skipped
(for developlement set)
target_norm_method: method to normalize target data
(by default, use pt_model.normalize_target)
"""
# timer
start_time = time.time()
# loop over samples
for data_idx, (data_in, data_tar, data_info, idx_orig) in \
enumerate(data_loader):
#############
# prepare
#############
# send data to device
if optimizer_G is not None:
optimizer_G.zero_grad()
if optimizer_D is not None:
optimizer_D.zero_grad()
# normalize the target data (for input for discriminator)
if isinstance(data_tar, torch.Tensor):
data_tar = data_tar.to(device, dtype=nii_dconf.d_dtype)
# there is no way to normalize the data inside loss
# thus, do normalization here
if target_norm_method is None:
normed_target = pt_model_G.normalize_target(data_tar)
else:
normed_target = target_norm_method(data_tar)
else:
nii_display.f_die("target data is required")
# to device (we assume noise will be generated by the model itself)
# here we only provide external condition
data_in = data_in.to(device, dtype=nii_dconf.d_dtype)
############################
# Update Discriminator
############################
####
# train with real
####
pt_model_D.zero_grad()
d_out_real = pt_model_D(data_tar, data_in)
errD_real = loss_wrapper.compute_gan_D_real(d_out_real)
if optimizer_D is not None:
errD_real.backward()
# this should be given by pt_model_D or loss wrapper
#d_out_real_mean = d_out_real.mean()
###
# train with fake
###
# generate sample
if args.model_forward_with_target:
# if model.forward requires (input, target) as arguments
# for example, for auto-encoder & autoregressive model
if isinstance(data_tar, torch.Tensor):
data_tar_tm = data_tar.to(device, dtype=nii_dconf.d_dtype)
if args.model_forward_with_file_name:
data_gen = pt_model_G(data_in, data_tar_tm, data_info)
else:
data_gen = pt_model_G(data_in, data_tar_tm)
else:
nii_display.f_print("--model-forward-with-target is set")
nii_display.f_die("but data_tar is not loaded")
else:
if args.model_forward_with_file_name:
# specifcal case when model.forward requires data_info
data_gen = pt_model_G(data_in, data_info)
else:
# normal case for model.forward(input)
data_gen = pt_model_G(data_in)
# data_gen.detach() is required
# https://github.com/pytorch/examples/issues/116
# https://stackoverflow.com/questions/46774641/
d_out_fake = pt_model_D(data_gen.detach(), data_in)
errD_fake = loss_wrapper.compute_gan_D_fake(d_out_fake)
if optimizer_D is not None:
errD_fake.backward()
# get the summed error for discrminator (only for displaying)
errD = errD_real + errD_fake
# update discriminator weight
if optimizer_D is not None:
optimizer_D.step()
############################
# Update Generator
############################
pt_model_G.zero_grad()
d_out_fake_for_G = pt_model_D(data_gen, data_in)
errG_gan = loss_wrapper.compute_gan_G(d_out_fake_for_G)
# if defined, calculate auxilliart loss
if hasattr(loss_wrapper, "compute_aux"):
errG_aux = loss_wrapper.compute_aux(data_gen, data_tar)
else:
errG_aux = torch.zeros_like(errG_gan)
# if defined, calculate feat-matching loss
if hasattr(loss_wrapper, "compute_feat_match"):
errG_feat = loss_wrapper.compute_feat_match(
d_out_real, d_out_fake_for_G)
else:
errG_feat = torch.zeros_like(errG_gan)
# sum loss for generator
errG = errG_gan + errG_aux + errG_feat
if optimizer_G is not None:
errG.backward()
optimizer_G.step()
# construct the loss for logging and early stopping
# only use errG_aux for early-stopping
loss_computed = [
[errG_aux, errD_real, errD_fake, errG_gan, errG_feat],
[True, False, False, False, False]]
# to handle cases where there are multiple loss functions
_, loss_vals, loss_flags = nii_nn_tools.f_process_loss(loss_computed)
# save the training process information to the monitor
end_time = time.time()
batchsize = len(data_info)
for idx, data_seq_info in enumerate(data_info):
# loss_value is supposed to be the average loss value
# over samples in the the batch, thus, just loss_value
# rather loss_value / batchsize
monitor.log_loss(loss_vals, loss_flags, \
(end_time-start_time) / batchsize, \
data_seq_info, idx_orig.numpy()[idx], \
epoch_idx)
# print infor for one sentence
if args.verbose == 1:
monitor.print_error_for_batch(data_idx*batchsize + idx,\
idx_orig.numpy()[idx], \
epoch_idx)
#
# start the timer for a new batch
start_time = time.time()
# lopp done
return
def f_run_one_epoch_WGAN(
args, pt_model_G, pt_model_D,
loss_wrapper, \
device, monitor, \
data_loader, epoch_idx,
optimizer_G = None, optimizer_D = None, \
target_norm_method = None):
"""
f_run_one_epoch_WGAN:
similar to f_run_one_epoch_GAN, but for WGAN
"""
# timer
start_time = time.time()
# This should be moved to model definition
# number of critic (default 5)
num_critic = 5
# clip value
wgan_clamp = 0.01
# loop over samples
for data_idx, (data_in, data_tar, data_info, idx_orig) in \
enumerate(data_loader):
# send data to device
if optimizer_G is not None:
optimizer_G.zero_grad()
if optimizer_D is not None:
optimizer_D.zero_grad()
# prepare data
if isinstance(data_tar, torch.Tensor):
data_tar = data_tar.to(device, dtype=nii_dconf.d_dtype)
# there is no way to normalize the data inside loss
# thus, do normalization here
if target_norm_method is None:
normed_target = pt_model_G.normalize_target(data_tar)
else:
normed_target = target_norm_method(data_tar)
else:
nii_display.f_die("target data is required")
# to device (we assume noise will be generated by the model itself)
# here we only provide external condition
data_in = data_in.to(device, dtype=nii_dconf.d_dtype)
############################
# Update Discriminator
############################
# train with real
pt_model_D.zero_grad()
d_out_real = pt_model_D(data_tar)
errD_real = loss_wrapper.compute_gan_D_real(d_out_real)
if optimizer_D is not None:
errD_real.backward()
d_out_real_mean = d_out_real.mean()
# train with fake
# generate sample
if args.model_forward_with_target:
# if model.forward requires (input, target) as arguments
# for example, for auto-encoder & autoregressive model
if isinstance(data_tar, torch.Tensor):
data_tar_tm = data_tar.to(device, dtype=nii_dconf.d_dtype)
if args.model_forward_with_file_name:
data_gen = pt_model_G(data_in, data_tar_tm, data_info)
else:
data_gen = pt_model_G(data_in, data_tar_tm)
else:
nii_display.f_print("--model-forward-with-target is set")
nii_display.f_die("but data_tar is not loaded")
else:
if args.model_forward_with_file_name:
# specifcal case when model.forward requires data_info
data_gen = pt_model_G(data_in, data_info)
else:
# normal case for model.forward(input)
data_gen = pt_model_G(data_in)
# data_gen.detach() is required
# https://github.com/pytorch/examples/issues/116
d_out_fake = pt_model_D(data_gen.detach())
errD_fake = loss_wrapper.compute_gan_D_fake(d_out_fake)
if optimizer_D is not None:
errD_fake.backward()
d_out_fake_mean = d_out_fake.mean()
errD = errD_real + errD_fake
if optimizer_D is not None:
optimizer_D.step()
# clip weights of discriminator
for p in pt_model_D.parameters():
p.data.clamp_(-wgan_clamp, wgan_clamp)
############################
# Update Generator
############################
pt_model_G.zero_grad()
d_out_fake_for_G = pt_model_D(data_gen)
errG_gan = loss_wrapper.compute_gan_G(d_out_fake_for_G)
errG_aux = loss_wrapper.compute_aux(data_gen, data_tar)
errG = errG_gan + errG_aux
# only update after num_crictic iterations on discriminator
if data_idx % num_critic == 0 and optimizer_G is not None:
errG.backward()
optimizer_G.step()
d_out_fake_for_G_mean = d_out_fake_for_G.mean()
# construct the loss for logging and early stopping
# only use errG_aux for early-stopping
loss_computed = [[errG_aux, errG_gan, errD_real, errD_fake,
d_out_real_mean, d_out_fake_mean,
d_out_fake_for_G_mean],
[True, False, False, False, False, False, False]]
# to handle cases where there are multiple loss functions
loss, loss_vals, loss_flags = nii_nn_tools.f_process_loss(loss_computed)
# save the training process information to the monitor
end_time = time.time()
batchsize = len(data_info)
for idx, data_seq_info in enumerate(data_info):
# loss_value is supposed to be the average loss value
# over samples in the the batch, thus, just loss_value
# rather loss_value / batchsize
monitor.log_loss(loss_vals, loss_flags, \
(end_time-start_time) / batchsize, \
data_seq_info, idx_orig.numpy()[idx], \
epoch_idx)
# print infor for one sentence
if args.verbose == 1:
monitor.print_error_for_batch(data_idx*batchsize + idx,\
idx_orig.numpy()[idx], \
epoch_idx)
#
# start the timer for a new batch
start_time = time.time()
# lopp done
return
def f_train_wrapper_GAN(
args, pt_model_G, pt_model_D, loss_wrapper, device, \
optimizer_G_wrapper, optimizer_D_wrapper, \
train_dataset_wrapper, \
val_dataset_wrapper = None, \
checkpoint_G = None, checkpoint_D = None):
"""
f_train_wrapper_GAN(
args, pt_model_G, pt_model_D, loss_wrapper, device,
optimizer_G_wrapper, optimizer_D_wrapper,
train_dataset_wrapper, val_dataset_wrapper = None,
check_point = None):
A wrapper to run the training process
Args:
args: argument information given by argpase
pt_model_G: generator, pytorch model (torch.nn.Module)
pt_model_D: discriminator, pytorch model (torch.nn.Module)
loss_wrapper: a wrapper over loss functions
loss_wrapper.compute_D_real(discriminator_output)
loss_wrapper.compute_D_fake(discriminator_output)
loss_wrapper.compute_G(discriminator_output)
loss_wrapper.compute_G(fake, real)
device: torch.device("cuda") or torch.device("cpu")
optimizer_G_wrapper:
a optimizer wrapper for generator (defined in op_manager.py)
optimizer_D_wrapper:
a optimizer wrapper for discriminator (defined in op_manager.py)
train_dataset_wrapper:
a wrapper over training data set (data_io/default_data_io.py)
train_dataset_wrapper.get_loader() returns torch.DataSetLoader
val_dataset_wrapper:
a wrapper over validation data set (data_io/default_data_io.py)
it can None.
checkpoint_G:
a check_point that stores every thing to resume training
checkpoint_D:
a check_point that stores every thing to resume training
"""
nii_display.f_print_w_date("Start model training")
##############
## Preparation
##############
# get the optimizer
optimizer_G_wrapper.print_info()
optimizer_D_wrapper.print_info()
optimizer_G = optimizer_G_wrapper.optimizer
optimizer_D = optimizer_D_wrapper.optimizer
epoch_num = optimizer_G_wrapper.get_epoch_num()
no_best_epoch_num = optimizer_G_wrapper.get_no_best_epoch_num()
# get data loader for training set
train_dataset_wrapper.print_info()
train_data_loader = train_dataset_wrapper.get_loader()
train_seq_num = train_dataset_wrapper.get_seq_num()
# get the training process monitor
monitor_trn = nii_monitor.Monitor(epoch_num, train_seq_num)
# if validation data is provided, get data loader for val set
if val_dataset_wrapper is not None:
val_dataset_wrapper.print_info()
val_data_loader = val_dataset_wrapper.get_loader()
val_seq_num = val_dataset_wrapper.get_seq_num()
monitor_val = nii_monitor.Monitor(epoch_num, val_seq_num)
else:
monitor_val = None
# training log information
train_log = ''
model_tags = ["_G", "_D"]
# prepare for DataParallism if available
# pytorch.org/tutorials/beginner/blitz/data_parallel_tutorial.html
if torch.cuda.device_count() > 1 and args.multi_gpu_data_parallel:
nii_display.f_die("data_parallel not implemented for GAN")
else:
nii_display.f_print("\nUse single GPU: %s\n" % \
(torch.cuda.get_device_name(device)))
flag_multi_device = False
normtarget_f = None
pt_model_G.to(device, dtype=nii_dconf.d_dtype)
pt_model_D.to(device, dtype=nii_dconf.d_dtype)
# print the network
nii_display.f_print("Setup generator")
nii_nn_tools.f_model_show(pt_model_G, model_type='GAN')
nii_display.f_print("Setup discriminator")
nii_nn_tools.f_model_show(pt_model_D, do_model_def_check=False,
model_type='GAN')
nii_nn_tools.f_loss_show(loss_wrapper, model_type='GAN')
###############################
## Resume training if necessary
###############################
# resume training or initialize the model if necessary
cp_names = nii_nn_manage_conf.CheckPointKey()
if checkpoint_G is not None or checkpoint_D is not None:
for checkpoint, optimizer, pt_model, model_name in \
zip([checkpoint_G, checkpoint_D], [optimizer_G, optimizer_D],
[pt_model_G, pt_model_D], ["Generator", "Discriminator"]):
nii_display.f_print("For %s" % (model_name))
if type(checkpoint) is dict:
# checkpoint
# load model parameter and optimizer state
if cp_names.state_dict in checkpoint:
# wrap the state_dic in f_state_dict_wrapper
# in case the model is saved when DataParallel is on
pt_model.load_state_dict(
nii_nn_tools.f_state_dict_wrapper(
checkpoint[cp_names.state_dict],
flag_multi_device))
# load optimizer state
if cp_names.optimizer in checkpoint:
optimizer.load_state_dict(checkpoint[cp_names.optimizer])
# optionally, load training history
if not args.ignore_training_history_in_trained_model:
#nii_display.f_print("Load ")
if cp_names.trnlog in checkpoint:
monitor_trn.load_state_dic(
checkpoint[cp_names.trnlog])
if cp_names.vallog in checkpoint and monitor_val:
monitor_val.load_state_dic(
checkpoint[cp_names.vallog])
if cp_names.info in checkpoint:
train_log = checkpoint[cp_names.info]
nii_display.f_print("Load check point, resume training")
else:
nii_display.f_print("Load pretrained model and optimizer")
elif checkpoint is not None:
# only model status
#pt_model.load_state_dict(checkpoint)
pt_model.load_state_dict(
nii_nn_tools.f_state_dict_wrapper(
checkpoint, flag_multi_device))
nii_display.f_print("Load pretrained model")
else:
nii_display.f_print("No pretrained model")
# done for resume training
######################
### User defined setup
######################
# Not implemented yet
######################
### Start training
######################
# other variables
flag_early_stopped = False
start_epoch = monitor_trn.get_epoch()
epoch_num = monitor_trn.get_max_epoch()
# select one wrapper, based on the flag in loss definition
if hasattr(loss_wrapper, "flag_wgan") and loss_wrapper.flag_wgan:
f_wrapper_gan_one_epoch = f_run_one_epoch_WGAN
else:
f_wrapper_gan_one_epoch = f_run_one_epoch_GAN
# print
_ = nii_op_display_tk.print_log_head()
nii_display.f_print_message(train_log, flush=True, end='')
# loop over multiple epochs
for epoch_idx in range(start_epoch, epoch_num):
# training one epoch
pt_model_D.train()
pt_model_G.train()
f_wrapper_gan_one_epoch(
args, pt_model_G, pt_model_D,
loss_wrapper, device, \
monitor_trn, train_data_loader, \
epoch_idx, optimizer_G, optimizer_D,
normtarget_f)
time_trn = monitor_trn.get_time(epoch_idx)
loss_trn = monitor_trn.get_loss(epoch_idx)
# if necessary, do validataion
if val_dataset_wrapper is not None:
# set eval() if necessary
if args.eval_mode_for_validation:
pt_model_G.eval()
pt_model_D.eval()
with torch.no_grad():
f_wrapper_gan_one_epoch(
args, pt_model_G, pt_model_D,
loss_wrapper, \
device, \
monitor_val, val_data_loader, \
epoch_idx, None, None, normtarget_f)
time_val = monitor_val.get_time(epoch_idx)
loss_val = monitor_val.get_loss(epoch_idx)
else:
time_val, loss_val = 0, 0
if val_dataset_wrapper is not None:
flag_new_best = monitor_val.is_new_best()
else:
flag_new_best = True
# print information
train_log += nii_op_display_tk.print_train_info(
epoch_idx, time_trn, loss_trn, time_val, loss_val,
flag_new_best, optimizer_G_wrapper.get_lr_info())
# save the best model
if flag_new_best:
for pt_model, tmp_tag in zip([pt_model_G, pt_model_D], model_tags):
tmp_best_name = nii_nn_tools.f_save_trained_name(args, tmp_tag)
torch.save(pt_model.state_dict(), tmp_best_name)
# save intermediate model if necessary
if not args.not_save_each_epoch:
# save model discrminator and generator
for pt_model, optimizer, model_tag in \
zip([pt_model_G, pt_model_D], [optimizer_G, optimizer_D],
model_tags):
tmp_model_name = nii_nn_tools.f_save_epoch_name(
args, epoch_idx, model_tag)
if monitor_val is not None:
tmp_val_log = monitor_val.get_state_dic()
else:
tmp_val_log = None
# save
tmp_dic = {
cp_names.state_dict : pt_model.state_dict(),
cp_names.info : train_log,
cp_names.optimizer : optimizer.state_dict(),
cp_names.trnlog : monitor_trn.get_state_dic(),
cp_names.vallog : tmp_val_log
}
torch.save(tmp_dic, tmp_model_name)
if args.verbose == 1:
nii_display.f_eprint(str(datetime.datetime.now()))
nii_display.f_eprint("Save {:s}".format(tmp_model_name),
flush=True)
# early stopping
if monitor_val is not None and \
monitor_val.should_early_stop(no_best_epoch_num):
flag_early_stopped = True
break
# loop done
nii_op_display_tk.print_log_tail()
if flag_early_stopped:
nii_display.f_print("Training finished by early stopping")
else:
nii_display.f_print("Training finished")
nii_display.f_print("Model is saved to", end = '')
for model_tag in model_tags:
nii_display.f_print("{}".format(
nii_nn_tools.f_save_trained_name(args, model_tag)))
return
if __name__ == "__main__":
print("nn_manager for GAN")
| 24,536 | 38.009539 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/nn_manager/nn_manager_AL.py | #!/usr/bin/env python
"""
nn_manager_AL
A simple wrapper to run the training for active learning
Note:
1. The mode to continue to training does not guanrantee exactly
the same result because selection is based on random sampling. The
random seed for data selection differs.
"""
from __future__ import print_function
import os
import time
import datetime
import numpy as np
import copy
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.display as nii_display
import core_scripts.other_tools.str_tools as nii_str_tk
import core_scripts.op_manager.op_process_monitor as nii_monitor
import core_scripts.op_manager.op_display_tools as nii_op_display_tk
import core_scripts.nn_manager.nn_manager_tools as nii_nn_tools
import core_scripts.nn_manager.nn_manager_conf as nii_nn_manage_conf
import core_scripts.nn_manager.nn_manager as nii_nn_manager_base
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#############################################################
__g_info_separator = ':'
__g_name_separator = ';'
__g_type_tags = ['add', 'remove']
def __print_new_sample_list(cycle_idx, dataset_wrapper, data_idx):
""" print information on the newly added data
"""
mes = 'Active learning cycle {:d}, add samples'.format(cycle_idx)
mes += __g_info_separator + ' '
mes += __g_name_separator.join(dataset_wrapper.get_seq_info())
mes += __g_info_separator + ' '
mes += __g_name_separator.join([str(x) for x in data_idx])
#mes += '\nNumber of samples: {:d}'.format(len(data_idx))
nii_display.f_eprint(mes)
return mes
def __print_excl_sample_list(cycle_idx, dataset_wrapper, data_idx):
""" print information on the newly removed data
"""
mes = 'Before learning cycle {:d}, remove'.format(cycle_idx)
mes += __g_info_separator + ' '
mes += __g_name_separator.join(dataset_wrapper.get_seq_info())
mes += __g_info_separator + ' '
mes += __g_name_separator.join([str(x) for x in data_idx])
#mes += '\nNumber of removed samples: {:d}'.format(len(data_idx))
nii_display.f_eprint(mes)
return mes
def __save_sample_list_buf(list_buff, cache_path):
with open(cache_path, 'w') as file_ptr:
for data_str in list_buff:
file_ptr.write(data_str + '\n')
return
def __cache_name(path, cycle_idx):
return '{:s}_{:03d}.txt'.format(path, cycle_idx)
def __parse_sample_list(mes):
"""
Active learning cycle K, add samples: file1, file2, file3
->
K, add, [file1, file2, file3]
"""
# cycle index
cycle_id = re.findall("[0-9]+", mes.split(__g_info_separator)[0])
cycle_id = int(cycle_id[0])
# type of method
if re.findall(__g_type_tags[1], mes.split(__g_info_separator)[0]):
tag = __g_type_tags[1]
else:
tag = __g_type_tags[0]
# assume that : is not included in the file name
filepart = mes.split(__g_info_separator)[2]
# return the sample list
return cycle_id, tag, \
[int(x.rstrip().lstrip()) for x in filepart.split(__g_name_separator)]
def __load_cached_data_list_file(cache_path):
with open(cache_path, 'r') as file_ptr:
output = [__parse_sample_list(x) for x in file_ptr]
return output
def __print_cycle(cycle_idx, train_s, pool_s):
""" print information added to the error log
"""
return "AL cycle {:d}, {:d}, {:d}".format(cycle_idx, train_s, pool_s)
def __print_AL_info(num_al_cycle, epoch_per_cycle, num_sample_al_cycle, args):
""" print head information to summarize the AL settings
"""
mes = "\nActive learning (pool-based) settings:"
nii_display.f_print(mes)
mes = 'Number of active learning cycle: {:d}'.format(num_al_cycle)
mes += '\nNumber of epochs per cycle: {:d}'.format(epoch_per_cycle)
mes += '\nNumber of new samples per cycle: {:d}'.format(num_sample_al_cycle)
if args.active_learning_use_new_data_only:
mes += '\nUse retrieved data for fine-tuning model'
else:
mes += '\nUse seed + retrieved data for model training'
if args.active_learning_with_replacement:
mes += '\nRetrieve data w/ replacement'
else:
mes += '\nRetrieve data w/o replacement'
mes += '\n'
nii_display.f_print(mes, 'normal')
return
def _f_copy_subset(dataset_wrapper, data_idx):
""" return a deepcopy of dataset that contains data specified by data_idx
"""
# create data that contains selected data only
# the database only contains data index, so it is fast to do deepcopy
tmp_data_wrapper = copy.deepcopy(dataset_wrapper)
tmp_data_wrapper.manage_data(data_idx, 'keep')
return tmp_data_wrapper
def _f_add_data(pool_dataset_wrapper, train_dataset_wrapper, data_idx, args):
"""
"""
# create a copy of the data to be selected from the pool
# the database only contains data index, so it is fast to do deepcopy
tmp_data_wrapper = _f_copy_subset(pool_dataset_wrapper, data_idx)
# Delete data from original pool if we sample without replacement
if not args.active_learning_with_replacement:
pool_dataset_wrapper.manage_data(data_idx, 'delete')
#pool_data_loader = pool_dataset_wrapper.get_loader()
#
if args.active_learning_use_new_data_only:
# only augmented data
nii_display.f_die("Not implemented yet")
else:
# base dataset + augmented data
train_dataset_wrapper.add_dataset(tmp_data_wrapper)
return
def _f_remove_data(pool_dataset_wrapper, data_idx, args):
"""
"""
pool_dataset_wrapper.manage_data(data_idx, 'delete')
return
def f_train_wrapper(args, pt_model, loss_wrapper, device, \
optimizer_wrapper, \
train_dataset_wrapper, \
pool_dataset_wrapper, \
val_dataset_wrapper = None, \
checkpoint = None):
"""
f_train_wrapper(args, pt_model, loss_wrapper, device,
optimizer_wrapper
train_dataset_wrapper, val_dataset_wrapper = None,
check_point = None):
A wrapper to run the training process
Args:
args: argument information given by argpase
pt_model: pytorch model (torch.nn.Module)
loss_wrapper: a wrapper over loss function
loss_wrapper.compute(generated, target)
device: torch.device("cuda") or torch.device("cpu")
optimizer_wrapper:
a wrapper over optimizer (defined in op_manager.py)
optimizer_wrapper.optimizer is torch.optimizer
train_dataset_wrapper:
a wrapper over training data set
train_dataset_wrapper.get_loader() returns torch.DataSetLoader
pool_dataset_wrapper:
a wrapper over pool data set for AL
train_dataset_wrapper.get_loader() returns torch.DataSetLoader
val_dataset_wrapper:
a wrapper over validation data set (data_io/default_data_io.py)
it can None.
check_point:
a check_point that stores every thing to resume training
"""
nii_display.f_print_w_date("Start model training")
##############
## Preparation
##############
##
# Configurations
##
# total number of epoch = epoch per cycle * number of AL cycles
total_epoch_num = optimizer_wrapper.get_epoch_num()
num_al_cycle = np.abs(args.active_learning_cycle_num)
if num_al_cycle == 0:
nii_display.f_die("Number of active learning cycles must be > 0")
epoch_per_cycle = total_epoch_num // num_al_cycle
# set the number of samples take per cycle
num_sample_al_cycle = args.active_learning_new_sample_per_cycle
# if not set, take batch-size of samples per cycle
if num_sample_al_cycle < 1:
num_sample_al_cycle = args.batch_size
#nii_display.f_print("Add {:d} new samples per cycle".format(
# num_sample_al_cycle))
# patience for early stopping on development set
no_best_epoch_num = optimizer_wrapper.get_no_best_epoch_num()
##
# data loader, optimizer, model, ...
##
# get the optimizer
if not args.active_learning_pre_train_epoch_num:
# this information should have been printed during pre-training
optimizer_wrapper.print_info()
optimizer = optimizer_wrapper.optimizer
lr_scheduler = optimizer_wrapper.lr_scheduler
# get data loader for seed training set
if not args.active_learning_pre_train_epoch_num:
train_dataset_wrapper.print_info()
train_data_loader = train_dataset_wrapper.get_loader()
train_seq_num = train_dataset_wrapper.get_seq_num()
# get pool data set for active learning
pool_dataset_wrapper.print_info()
pool_data_loader = pool_dataset_wrapper.get_loader()
pool_seq_num = pool_dataset_wrapper.get_seq_num()
# get the training process monitor
monitor_trn = nii_monitor.Monitor(total_epoch_num, train_seq_num)
# if validation data is provided, get data loader for val set
if val_dataset_wrapper is not None:
if not args.active_learning_pre_train_epoch_num:
val_dataset_wrapper.print_info()
val_data_loader = val_dataset_wrapper.get_loader()
val_seq_num = val_dataset_wrapper.get_seq_num()
monitor_val = nii_monitor.Monitor(total_epoch_num, val_seq_num)
else:
monitor_val = None
# prepare for DataParallism if available
# pytorch.org/tutorials/beginner/blitz/data_parallel_tutorial.html
if torch.cuda.device_count() > 1 and args.multi_gpu_data_parallel:
nii_display.f_die("Not implemented for multiple GPU")
else:
nii_display.f_print("\nUse single GPU: %s\n" % \
(torch.cuda.get_device_name(device)))
flag_multi_device = False
normtarget_f = None
pt_model.to(device, dtype=nii_dconf.d_dtype)
##
# misc
##
# print the network
if not args.active_learning_pre_train_epoch_num:
nii_nn_tools.f_model_show(pt_model)
nii_nn_tools.f_loss_show(loss_wrapper)
# key names, used when saving *.epoch.pt
cp_names = nii_nn_manage_conf.CheckPointKey()
# training log information
train_log = ''
# buffer for selected data list
al_mes_buff = []
###############################
## Resume training if necessary
###############################
##
# load epoch*.pt (which contains no infor on previously selected data)
##
train_log = nii_nn_tools.f_load_checkpoint(
checkpoint, args, flag_multi_device, pt_model,
optimizer, monitor_trn, monitor_val, lr_scheduler)
##
# load selected or removed utterances in previous cycles,
##
# index of the starting cycle (default 0)
start_cycle = 0
if len(args.active_learning_cache_dataname_path) \
and os.path.isfile(args.active_learning_cache_dataname_path):
nii_display.f_print("Load cache of selected (removed data)")
# Load from file
cached_data_status = __load_cached_data_list_file(
args.active_learning_cache_dataname_path)
# For each cycle, update the pool and training set
for entry in cached_data_status:
# retrieve the log
cycle_id, method_type, data_idx = entry[0], entry[1], entry[2]
if method_type == __g_type_tags[1]:
# for removing
# print the information
mes = __print_excl_sample_list(
cycle_id, _f_copy_subset(pool_dataset_wrapper, data_idx),
data_idx)
# remove previously removed data from pool
_f_remove_data(pool_dataset_wrapper, data_idx, args)
else:
# for selected data (either active or passive)
# print the information
mes = __print_new_sample_list(
cycle_id, _f_copy_subset(pool_dataset_wrapper, data_idx),
data_idx)
# add previously selected from the pool to the training set
_f_add_data(pool_dataset_wrapper, train_dataset_wrapper,
data_idx, args)
#
pool_data_loader = pool_dataset_wrapper.get_loader()
train_data_loader = train_dataset_wrapper.get_loader()
#
al_mes_buff.append(mes)
if len(cached_data_status):
# data selectio and removing should have been done in cycle_id
# thus, we start from the next cycle
start_cycle = cycle_id + 1
else:
pass
######################
### User defined setup
######################
if hasattr(pt_model, "other_setups"):
nii_display.f_print("Conduct User-defined setup")
pt_model.other_setups()
# This should be merged with other_setups
if hasattr(pt_model, "g_pretrained_model_path") and \
hasattr(pt_model, "g_pretrained_model_prefix"):
nii_display.f_print("Load pret-rained models as part of this mode")
nii_nn_tools.f_load_pretrained_model_partially(
pt_model, pt_model.g_pretrained_model_path,
pt_model.g_pretrained_model_prefix)
##############################
### Start active learning loop
##############################
# other variables
# initialize flag to save state of early stopping
flag_early_stopped = False
# initialize the starting epoch number
# this counts the epoch number across different cycles
start_epoch = monitor_trn.get_epoch()
epoch_counter = start_epoch
# get the total number of epochs to run
total_epoch_num = monitor_trn.get_max_epoch()
# a buf to store the path of trained models per cycle
saved_model_path_buf = []
# print active learining general information
__print_AL_info(num_al_cycle, epoch_per_cycle, num_sample_al_cycle, args)
# print training log (if available from resumed checkpoint)
_ = nii_op_display_tk.print_log_head()
nii_display.f_print_message(train_log, flush=True, end='')
# sanity check
if start_epoch // epoch_per_cycle != start_cycle:
nii_display.f_print("Training cycle in {:s} != that in {:s}".format(
args.trained_model, args.active_learning_cache_dataname_path))
nii_display.f_print(" {:d} // {:d} != {:d}".format(
start_epoch, epoch_per_cycle, start_cycle))
nii_display.f_die("Fail to resume training")
#
# currently, we can only restat from the 1st epoch in each active learning
# cycle. Note that, monitor_trn.get_epoch() returns the current epoch idx
if start_epoch > 0 and start_epoch % epoch_per_cycle != 0:
mes = "The checkpoint is not the last epoch in one cycle"
nii_display.f_print(mes)
nii_display.f_die("Fail to resume training")
# loop over cycles
for cycle_idx in range(start_cycle, num_al_cycle):
# Pool data has been used up. Training ends.
if pool_dataset_wrapper.get_seq_num() < 1:
break
########
# select the samples
########
# There are many methods to select samples
# we require pt_model to define one of the method
# I. Pool-based, no knowedge on seed data:
# al_retrieve_data(pool_data_loader,
# num_sample_al_cycle)
# Only use model to score each data in pool_data_loader
#
# II. Pool-based, w/ knowedge on seed data
# al_retrieve_data_knowing_train(train_data_loader,
# pool_data_loader,
# num_sample_al_cycle)
# Select sample from pool given knowlege of train seed data
#
# III. Pool-based, but to exclude data first
# al_exclude_data(train_data_loader,pool_data_loader,
# num_sample_al_cycle)
# Exclude samples from the pool set
# If provided, this function will be called first before executing
# Pool-based II and I
#
# save current model flag
tmp_train_flag = True if pt_model.training else False
if args.active_learning_train_model_for_retrieval:
pt_model.train()
else:
pt_model.eval()
# exclude data if necesary
if hasattr(pt_model, 'al_exclude_data'):
# select least useful data
data_idx = pt_model.al_exclude_data(
pool_data_loader, num_sample_al_cycle)
# convert data index to int
data_idx = [int(x) for x in data_idx]
# print datat to be excluded
mes = __print_excl_sample_list(
cycle_idx, _f_copy_subset(pool_dataset_wrapper, data_idx),
data_idx)
al_mes_buff.append(mes)
# remove the pool
_f_remove_data(pool_dataset_wrapper, data_idx, args)
pool_data_loader = pool_dataset_wrapper.get_loader()
# retrieve data from the pool to training set
if hasattr(pt_model, 'al_retrieve_data'):
data_idx = pt_model.al_retrieve_data(
pool_data_loader, num_sample_al_cycle)
elif hasattr(pt_model, 'al_retrieve_data_knowing_train'):
data_idx = pt_model.al_retrieve_data_knowing_train(
train_data_loader, pool_data_loader, num_sample_al_cycle)
else:
nii_display.f_die("model must define al_retrieve_data")
# convert data index to int
data_idx = [int(x) for x in data_idx]
# set flag back
if tmp_train_flag:
pt_model.train()
else:
pt_model.eval()
########
# Create Dataset wrapper for the new data add new data to train set
########
# _f_add_data alters pool_dataset_wrapper
# thus, we print the list based on pool_dataset before _f_add_data
mes = __print_new_sample_list(
cycle_idx, _f_copy_subset(pool_dataset_wrapper, data_idx), data_idx)
al_mes_buff.append(mes)
_f_add_data(pool_dataset_wrapper, train_dataset_wrapper, data_idx, args)
# prepare for training
# get the data loader from the new training and pool sets
pool_data_loader = pool_dataset_wrapper.get_loader()
train_data_loader = train_dataset_wrapper.get_loader()
# number of samples in current training and pool sets
train_seq_num = train_dataset_wrapper.get_seq_num()
pool_seq_num = pool_dataset_wrapper.get_seq_num()
# because the number of training data changes in each cycle, we need to
# create a temporary monitor for training
tmp_monitor_trn = nii_monitor.Monitor(epoch_per_cycle, train_seq_num)
########
# training using the new training set
# epoch_counter: a global counter of training epoch, across all cycles
# tmp_start_epoch: index of starting epoch within one cycle
# tmp_epoch_idx: index of epoch within one cycle
tmp_start_epoch = epoch_counter % epoch_per_cycle
for tmp_epoch_idx in range(tmp_start_epoch, epoch_per_cycle):
# If the model has a member for g_epoch_idx
# save the index
if hasattr(pt_model, 'g_epoch_idx'):
pt_model.g_epoch_idx = epoch_counter
# If the model has a member for g_epoch_idx
# save the index
# cycle index should be updated after selecting the data
if hasattr(pt_model, 'g_cycle_idx'):
pt_model.g_cycle_idx = cycle_idx
# training one epoch
pt_model.train()
if hasattr(pt_model, 'flag_validation'):
pt_model.flag_validation = False
nii_nn_manager_base.f_run_one_epoch(
args, pt_model, loss_wrapper, device, \
tmp_monitor_trn, train_data_loader, \
tmp_epoch_idx, optimizer, normtarget_f)
# get the time and loss for this epoch
time_trn = tmp_monitor_trn.get_time(tmp_epoch_idx)
loss_trn = tmp_monitor_trn.get_loss(tmp_epoch_idx)
# if necessary, forward pass on development set
if val_dataset_wrapper is not None:
# set eval() if necessary
if args.eval_mode_for_validation:
pt_model.eval()
if hasattr(pt_model, 'flag_validation'):
pt_model.flag_validation = True
with torch.no_grad():
nii_nn_manager_base.f_run_one_epoch(
args, pt_model, loss_wrapper, \
device, monitor_val, val_data_loader, \
epoch_counter, None, normtarget_f)
time_val = monitor_val.get_time(epoch_counter)
loss_val = monitor_val.get_loss(epoch_counter)
# update lr rate scheduler if necessary
if lr_scheduler.f_valid():
lr_scheduler.f_step(loss_val)
else:
time_val = monitor_val.get_time(epoch_counter)
loss_val = monitor_val.get_loss(epoch_counter)
#time_val, loss_val = 0, 0
# wether this is the new best trained epoch?
if val_dataset_wrapper is not None:
flag_new_best = monitor_val.is_new_best()
else:
flag_new_best = True
# print information
info_mes = [optimizer_wrapper.get_lr_info(),
__print_cycle(cycle_idx, train_seq_num, pool_seq_num)]
train_log += nii_op_display_tk.print_train_info(
tmp_epoch_idx, time_trn, loss_trn, time_val, loss_val,
flag_new_best, ', '.join([x for x in info_mes if x]))
# save the best model if necessary
if flag_new_best or args.force_save_lite_trained_network_per_epoch:
tmp_best_name = nii_nn_tools.f_save_trained_name(args)
torch.save(pt_model.state_dict(), tmp_best_name)
# save intermediate model if necessary
# we only say the last epoch in each cycle
if not args.not_save_each_epoch \
and tmp_epoch_idx == (epoch_per_cycle - 1):
# we save the global epoch counter into the checkpoint
monitor_trn.log_epoch(epoch_counter)
# name of the checkpoint
tmp_model_name = nii_nn_tools.f_save_epoch_name(
args, cycle_idx, '_epoch_{:03d}'.format(tmp_epoch_idx),
'_al_cycle')
if monitor_val is not None:
tmp_val_log = monitor_val.get_state_dic()
else:
tmp_val_log = None
if lr_scheduler.f_valid():
lr_scheduler_state = lr_scheduler.f_state_dict()
else:
lr_scheduler_state = None
# save
tmp_dic = {
cp_names.state_dict : pt_model.state_dict(),
cp_names.info : train_log,
cp_names.optimizer : optimizer.state_dict(),
cp_names.trnlog : monitor_trn.get_state_dic(),
cp_names.vallog : tmp_val_log,
cp_names.lr_scheduler : lr_scheduler_state
}
torch.save(tmp_dic, tmp_model_name)
if args.verbose == 1:
nii_display.f_eprint(str(datetime.datetime.now()))
nii_display.f_eprint("Save {:s}".format(tmp_model_name),
flush=True)
#
epoch_counter += 1
# loop done for epoch per cycle
# always save the trained model for each cycle
suffix = '_al_cycle_{:03d}'.format(cycle_idx)
tmp_best_name = nii_nn_tools.f_save_trained_name(args, suffix)
torch.save(pt_model.state_dict(), tmp_best_name)
saved_model_path_buf.append(tmp_best_name)
# save selected data for each cycle
__save_sample_list_buf(
al_mes_buff,
__cache_name(args.active_learning_cache_dataname_save, cycle_idx))
# loop for AL cycle
nii_op_display_tk.print_log_tail()
nii_display.f_print("Training finished")
nii_display.f_print("Models from each cycle are saved to:")
for path in saved_model_path_buf:
nii_display.f_print("{}".format(path), 'normal')
return
if __name__ == "__main__":
print("nn_manager_AL")
| 25,279 | 38.254658 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/core_scripts/config_parse/arg_parse.py | #!/usr/bin/env python
"""
config_parse
Argument parse
"""
from __future__ import absolute_import
import os
import sys
import argparse
import core_scripts.other_tools.list_tools as nii_list_tools
import core_scripts.other_tools.display as nii_display
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#############################################################
# argparser
#
def f_args_parsed(argument_input = None):
""" Arg_parse
"""
parser = argparse.ArgumentParser(
description='General argument parse')
######
# lib
mes = 'module of model definition (default model, model.py will be loaded)'
parser.add_argument('--module-model', type=str, default="model", help=mes)
mes = 'module of configuration (default config, config.py will be loaded)'
parser.add_argument('--module-config', type=str, default="config",
help=mes)
mes = 'module of auxiliary model definition (in case this is needed)'
parser.add_argument('--module-model-aux', type=str, default="", help=mes)
######
# Training settings
mes = 'batch size for training/inference (default: 1)'
parser.add_argument('--batch-size', type=int, default=1, help=mes)
mes = 'number of mini-batches to accumulate (default: 1)'
parser.add_argument('--size-accumulate-grad', type=int, default=1, help=mes)
mes = 'number of epochs to train (default: 50)'
parser.add_argument('--epochs', type=int, default=50, help=mes)
mes = 'number of no-best epochs for early stopping (default: 5)'
parser.add_argument('--no-best-epochs', type=int, default=5, help=mes)
mes = 'force to save trained-network.pt per epoch, '
mes += 'no matter whether the epoch is currently the best.'
parser.add_argument('--force-save-lite-trained-network-per-epoch',
action='store_true', default=False, help=mes)
mes = 'sampler (default: None). Default sampler is random shuffler. '
mes += 'Option 1: block_shuffle_by_length, shuffle data by length'
parser.add_argument('--sampler', type=str, default='None', help=mes)
parser.add_argument('--lr', type=float, default=0.0001,
help='learning rate (default: 0.0001)')
mes = 'learning rate decaying factor, using '
mes += 'torch.optim.lr_scheduler.ReduceLROnPlateau(patience=no-best-epochs,'
mes += ' factor=lr-decay-factor). By default, no decaying is used.'
mes += ' Training stopped after --no-best-epochs.'
parser.add_argument('--lr-decay-factor', type=float, default=-1.0, help=mes)
mes = 'lr scheduler: 0: ReduceLROnPlateau (default); 1: StepLR; '
mes += 'this option is set on only when --lr-decay-factor > 0. '
mes += 'Please check core_scripts/op_manager/lr_scheduler.py '
mes += 'for detailed hyper config for each type of lr scheduler'
parser.add_argument('--lr-scheduler-type', type=int, default=0, help=mes)
mes = 'lr patience: patience for torch_optim_steplr.ReduceLROnPlateau '
mes += 'this option is used only when --lr-scheduler-type == 0. '
parser.add_argument('--lr-patience', type=int, default=5, help=mes)
mes = 'lr step size: step size for torch.optim.lr_scheduler.StepLR'
mes += 'this option is used only when --lr-scheduler-type == 1. '
parser.add_argument('--lr-steplr-size', type=int, default=5, help=mes)
mes = 'L2 penalty on weight (default: not use). '
mes += 'It corresponds to the weight_decay option in Adam'
parser.add_argument('--l2-penalty', type=float, default=-1.0, help=mes)
mes = 'gradient norm (torch.nn.utils.clip_grad_norm_ of Pytorch)'
mes += 'default (-1, not use)'
parser.add_argument('--grad-clip-norm', type=float, default=-1.0,
help=mes)
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1,
help='random seed (default: 1)')
mes = 'turn model.eval() on validation set (default: false)'
parser.add_argument('--eval-mode-for-validation', \
action='store_true', default=False, help=mes)
mes = 'if model.forward(input, target), please set this option on. '
mes += 'This is used for autoregressive model, auto-encoder, and so on. '
mes += 'When --model-forward-with-file-name is also on, '
mes += 'model.forward(input, target, file_name) should be defined'
parser.add_argument('--model-forward-with-target', \
action='store_true', default=False, help=mes)
mes = 'if model.forward(input, file_name), please set option on. '
mes += 'This is used with forward requires file name of the data. '
mes += 'When --model-forward-with-target is also on, '
mes += 'model.forward(input, target, file_name) should be defined'
parser.add_argument('--model-forward-with-file-name', \
action='store_true', default=False, help=mes)
mes = 'shuffle data? (default true). Set --shuffle will turn off shuffling'
parser.add_argument('--shuffle', action='store_false', \
default=True, help=mes)
mes = 'number of parallel workers to load data (default: 0)'
parser.add_argument('--num-workers', type=int, default=0, help=mes)
mes = 'use DataParallel to levarage multiple GPU (default: False)'
parser.add_argument('--multi-gpu-data-parallel', \
action='store_true', default=False, help=mes)
mes = 'way to concatenate multiple datasets: '
mes += 'concatenate: simply merge two datasets as one large dataset. '
mes += 'batch_merge: make a minibatch by drawing one sample from each set. '
mes += '(default: concatenate)'
parser.add_argument('--way-to-merge-datasets', type=str, \
default='concatenate', help=mes)
mes = "Ignore invalid data? the length of features does not match"
parser.add_argument('--ignore-length-invalid-data',
action='store_true', default=False, help=mes)
mes = "Ignore existing cache file dic"
parser.add_argument('--ignore-cached-file-infor',
action='store_true', default=False, help=mes)
mes = "External directory to store cache file dic"
parser.add_argument('--path-cache-file', type=str, default="", help=mes)
mes = "Skip scanning data directories (by default False)"
parser.add_argument('--force-skip-datadir-scanning',
action='store_true', default=False, help=mes)
######
# options to save model / checkpoint
parser.add_argument('--save-model-dir', type=str, \
default="./", \
help='save model to this direcotry (default ./)')
mes = 'do not save model after every epoch (default: False)'
parser.add_argument('--not-save-each-epoch', action='store_true', \
default=False, help=mes)
mes = 'name prefix of saved model (default: epoch)'
parser.add_argument('--save-epoch-name', type=str, default="epoch", \
help=mes)
mes = 'name of trained model (default: trained_network)'
parser.add_argument('--save-trained-name', type=str, \
default="trained_network", help=mes)
parser.add_argument('--save-model-ext', type=str, default=".pt",
help='extension name of model (default: .pt)')
mes = 'save model after every N mini-batches (default: 0, not use)'
parser.add_argument('--save-model-every-n-minibatches', type=int,
default=0, help=mes)
#######
# options for active learning
mes = 'Number of active leaning cycles'
parser.add_argument('--active-learning-cycle-num', type=int, default=0,
help = mes)
mes = 'Whetehr use base traing set with new samples? (default True)'
parser.add_argument('--active-learning-use-new-data-only',
action='store_true', default=False, help = mes)
mes = 'Number of samples selected per cycle? (default =batch size)'
parser.add_argument('--active-learning-new-sample-per-cycle', type=int,
default=0, help = mes)
mes = 'Use model.train() during data retrieval (defaul False)'
parser.add_argument('--active-learning-train-model-for-retrieval',
action='store_true', default=False, help = mes)
mes = 'Retrieve data with replacement (defaul True)'
parser.add_argument('--active-learning-with-replacement',
action='store_true', default=False, help = mes)
mes = 'Number of pre-trainining epochs before active learniing (defaul 0)'
parser.add_argument('--active-learning-pre-train-epoch-num', type=int,
default=0, help=mes)
mes = 'Name of the cache file to store names of selected or removed data'
parser.add_argument('--active-learning-cache-dataname-save', type=str,
default="cache_al_data_log", help=mes)
mes = 'Path to the cache file that stores names of selected or removed data'
parser.add_argument('--active-learning-cache-dataname-path', type=str,
default="", help=mes)
#######
# options to load model
mes = 'a trained model for inference or resume training '
parser.add_argument('--trained-model', type=str, \
default="", help=mes + "(default: '')")
mes = 'do not load previous training error information.'
mes += " Load only model para. and optimizer state (default: false)"
parser.add_argument('--ignore-training-history-in-trained-model',
action='store_true', \
default=False, help=mes)
mes = 'do not load previous training statistics in optimizer.'
mes += " (default: false)"
parser.add_argument('--ignore-optimizer-statistics-in-trained-model',
action='store_true', \
default=False, help=mes)
mes = 'load pre-trained model even if there is mismatch on the number of'
mes += " parameters. Mismatched part will not be loaded (default: false)"
parser.add_argument('--allow-mismatched-pretrained-model',
action='store_true', \
default=False, help=mes)
mes = 'run inference mode (default: False, run training script)'
parser.add_argument('--inference', action='store_true', \
default=False, help=mes)
mes = 'run model conversion script (default: False)'
parser.add_argument('--epoch2pt', action='store_true', \
default=False, help=mes)
mes = 'inference only on data whose minibatch index is within the range of '
mes = mes + '[--inference-sample-start-index, --inference-sample-end-index)'
mes = mes + 'default: 0, starting from the 1st data'
parser.add_argument('--inference-sample-start-index', type=int, default=0,
help=mes)
mes = 'inference only on data whose minibatch index is within the range of '
mes = mes + '[--inference-sample-start-index, --inference-sample-end-index)'
mes = mes + 'default: -1, until the end of all data'
parser.add_argument('--inference-sample-end-index', type=int, default=-1,
help=mes)
mes = 'inference data list. A list of file names that should '
mes = mes + 'be processed during the inference stage. '
mes = mes + 'If such a data list is provided, only data listed will '
mes = mes + 'be processed.'
parser.add_argument('--inference-data-list', type=str, default="",
help=mes)
#######
# options to output
mes = 'path to save generated data (default: ./output)'
parser.add_argument('--output-dir', type=str, default="./output", \
help=mes)
# options to output
mes = 'prefix added to file name (default: no string)'
parser.add_argument('--output-filename-prefix', type=str, default="", \
help=mes)
mes = 'truncate input data sequences so that the max length < N.'
mes += ' (default: -1, not do truncating at all)'
parser.add_argument('--trunc-input-length-for-inference', type=int,
default=-1, help=mes)
mes = 'truncate input data overlap length (default: 5)'
parser.add_argument('--trunc-input-overlap', type=int, default=5, help=mes)
mes = 'which optimizer to use (Adam | SGD, default: Adam)'
parser.add_argument('--optimizer', type=str, default='Adam', help=mes)
mes = 'verbose level 0: nothing; 1: print error per utterance'
mes = mes + ' (default: 1)'
parser.add_argument('--verbose', type=int, default=1,
help=mes)
#######
# options for debug mode
mes = 'debug mode, each epoch only uses a specified number of mini-batches'
mes += ' (default: 0, not used)'
parser.add_argument('--debug-batch-num', type=int, default=0, help=mes)
#######
# options for user defined
mes = 'a temporary flag without specific purpose.'
mes += 'User should define args.temp_flag only for temporary usage.'
parser.add_argument('--temp-flag', type=str, default='', help=mes)
mes = 'reverse the order when loading data from the dataset.'
mes += 'This should not not used if --sampler block_shuffle_by_length '
parser.add_argument('--flag-reverse-data-loading-order',
action='store_true', default=False, help=mes)
#######
# backend options
parser.add_argument('--cudnn-deterministic-toggle', action='store_false', \
default=True,
help='use cudnn-deterministic? (default true)')
parser.add_argument('--cudnn-benchmark-toggle', action='store_true', \
default=False,
help='use cudnn-benchmark? (default false)')
#######
# profile options
mes = "options to setup Pytorch profile. It must be a string like A-B-C-D"
mes += ' where A, B, C, D are integers. Meanining of these options are in'
mes += ' torch.profiler.schedule. Default 1-1-3-2.'
parser.add_argument('--wait-warmup-active-repeat', type=str,
default='1-1-3-2',
help=mes)
mes = "directory to save profiling output. Default ./log_profile"
parser.add_argument('--profile-output-dir', type=str,
default='./log_profile')
#######
# data options
mes = 'option to set silence_handler on waveform data.\n'
mes += ' 0: do nothing, use the data as it is (default) \n'
mes += ' 1: remove segments with small energy, use other segments\n'
mes += ' 2: keep only segments with small energy, remove other segments\n'
mes += ' 3: remove segments with small energy only at begining and end\n'
mes += 'Code in core_scripts.data_io.wav_tools.silence_handler. '
mes += 'This option is used when input or output contains only waveform. '
mes += 'It only processes waveform. Other features will not be trimmed.'
parser.add_argument('--opt-wav-silence-handler', type=int,
default=0, help=mes)
mes = 'update data length in internal buffer if data length is changed '
mes += 'by augmentation method. This is useful, for example, when using '
mes += '--sampler block_shuffle_by_length --opt-wav-silence-handler 3 '
mes += 'or using other data augmentation method changes data length.'
parser.add_argument('--force-update-seq-length', action='store_true', \
default=False, help=mes)
#
# done
if argument_input is not None:
return parser.parse_args(argument_input)
else:
return parser.parse_args()
if __name__ == "__main__":
pass
| 16,184 | 41.704485 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/util_loss_metric.py | #!/usr/bin/env python
"""
util_loss_metric
Loss functions or metrics
References
[1] Weitang Liu, Xiaoyun Wang, John Owens, and Yixuan Li.
Energy-Based Out-of-Distribution Detection.
In Proc. NIPS, 33:21464–21475. 2020.
[2] Prannay Khosla, Piotr Teterwak, Chen Wang, Aaron Sarna,
Yonglong Tian, Phillip Isola, Aaron Maschinot, Ce Liu, and Dilip Krishnan
Supervised Contrastive Learning. Proc.NIPS. 2020.
[3] Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz.
Mixup: Beyond Empirical Risk Minimization. In Proc. ICLR. 2018.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#####################
# negative energy [1]
#####################
def neg_energy(logits, temperature=1):
""" neg_eng = neg_energy(logits, temperature=1)
neg_eng[x] = -T \log \sum_y \exp (logits[x, y] / T)
See [1]
input
-----
logits: tensor, shape (batch, dim)
temperature: float, temperature hyperparameter
output
------
neg_eng: tensor, shape (batch,)
"""
eng = - temperature * torch.logsumexp(logits / temperature, dim=1)
return eng
def neg_energy_reg_loss(energy, margin_in, margin_out, flag_in):
""" loss = neg_energy_reg_loss(energy, margin_in, margin_out, flag_in)
See [1] eqs.(8-9)
input
-----
energy: tensor, any shape is OK
margin_in: float, margin for the in-dist. data
margin_out: float, margin for the out-dist. data
flag_in: bool, if the input data is in-dist. data
output
------
loss: scalar
"""
if flag_in:
loss = torch.pow(torch_nn_func.relu(energy - margin_in), 2).mean()
else:
loss = torch.pow(torch_nn_func.relu(margin_out - energy), 2).mean()
return loss
#####################
# supervised contrastive loss [2]
#####################
def supcon_loss(input_feat,
labels = None, mask = None, sim_metric = None,
t=0.07, contra_mode='all', length_norm=False):
"""
loss = SupConLoss(feat,
labels = None, mask = None, sim_metric = None,
t=0.07, contra_mode='all')
input
-----
feat: tensor, feature vectors z [bsz, n_views, ...].
labels: ground truth of shape [bsz].
mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j
has the same class as sample i. Can be asymmetric.
sim_metric: func, function to measure the similarity between two
feature vectors
t: float, temperature
contra_mode: str, default 'all'
'all': use all data in class i as anchors
'one': use 1st data in class i as anchors
length_norm: bool, default False
if True, l2 normalize feat along the last dimension
output
------
A loss scalar.
Based on https://github.com/HobbitLong/SupContrast/blob/master/losses.py
Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.
Example:
feature = torch.rand([16, 2, 1000], dtype=torch.float32)
feature = torch_nn_func.normalize(feature, dim=-1)
label = torch.tensor([1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 1, 1, 1, 1, 1],
dtype=torch.long)
loss = supcon_loss(feature, labels=label)
"""
if length_norm:
feat = torch_nn_func.normalize(input_feat, dim=-1)
else:
feat = input_feat
# batch size
bs = feat.shape[0]
# device
dc = feat.device
# dtype
dt = feat.dtype
# number of view
nv = feat.shape[1]
# get the mask
# mask[i][:] indicates the data that has the same class label as data i
if labels is not None and mask is not None:
raise ValueError('Cannot define both `labels` and `mask`')
elif labels is None and mask is None:
mask = torch.eye(bs, dtype=dt, device=dc)
elif labels is not None:
labels = labels.view(-1, 1)
if labels.shape[0] != bs:
raise ValueError('Num of labels does not match num of features')
mask = torch.eq(labels, labels.T).type(dt).to(dc)
else:
mask = mask.type(dt).to(dc)
# prepare feature matrix
# -> (num_view * batch, feature_dim, ...)
contrast_feature = torch.cat(torch.unbind(feat, dim=1), dim=0)
#
if contra_mode == 'one':
# (batch, feat_dim, ...)
anchor_feature = feat[:, 0]
anchor_count = 1
elif contra_mode == 'all':
anchor_feature = contrast_feature
anchor_count = nv
else:
raise ValueError('Unknown mode: {}'.format(self.contrast_mode))
# compute logits
# logits_mat is a matrix of size [num_view * batch, num_view * batch]
# or [batch, num_view * batch]
if sim_metric is not None:
logits_mat = torch.div(
sim_metric(anchor_feature, contrast_feature), t)
else:
logits_mat = torch.div(
torch.matmul(anchor_feature, contrast_feature.T), t)
# mask based on the label
# -> same shape as logits_mat
mask_ = mask.repeat(anchor_count, nv)
# mask on each data itself (
self_mask = torch.scatter(
torch.ones_like(mask_), 1,
torch.arange(bs * anchor_count).view(-1, 1).to(dc),
0)
#
mask_ = mask_ * self_mask
# for numerical stability, remove the max from logits
# see https://en.wikipedia.org/wiki/LogSumExp trick
# for numerical stability
logits_max, _ = torch.max(logits_mat * self_mask, dim=1, keepdim=True)
logits_mat_ = logits_mat - logits_max.detach()
# compute log_prob
exp_logits = torch.exp(logits_mat_ * self_mask) * self_mask
log_prob = logits_mat_ - torch.log(exp_logits.sum(1, keepdim=True))
# compute mean of log-likelihood over positive
mean_log_prob_pos = (mask_ * log_prob).sum(1) / mask_.sum(1)
# loss
loss = - mean_log_prob_pos
loss = loss.view(anchor_count, bs).mean()
return loss
################
# Mixup
################
class MixUpCE(torch_nn.Module):
def __init__(self, weight = None):
super(MixUpCE, self).__init__()
self.m_loss1 = torch_nn.CrossEntropyLoss(weight=weight,reduction='none')
self.m_loss2 = torch_nn.CrossEntropyLoss(weight=weight,reduction='none')
return
def forward(self, logits, y1, y2=None, gammas=None):
""" loss = MixUpCE.forward(logits, y1, y2, gammas)
This API computes the mixup cross-entropy.
Logits is assumed to be f( gammas * x1 + (1-gammas) * x2).
Thus, this API only compute the CE:
gammas * Loss(logits, y1) + (1 - gammas) * Loss(logits, y2)
Note that if y2 and gammas are None, it uses common CE
input
-----
logits: tensor, (batch, dim)
y1: tensor, (batch, )
y2: tensor, (batch, )
gammas: tensor, (batch, )
output
------
loss: scalar
"""
if y2 is None and gammas is None:
loss_val = self.m_loss1(logits, y1)
else:
loss_val = gammas * self.m_loss1(logits, y1)
loss_val += (1-gammas) * self.m_loss2(logits, y2)
return loss_val.mean()
#####################
# Distillation related
#####################
def kld_distill(logits_s, logits_t, temp=20):
""" KLD-based distillation loss
input
-----
logits_s: tensor, (batch, ..., dim), student output logits
where dim is #. output categories
logits_t: tensor, (batch, ..., dim), teacher output logits
temp: float, temperature, default=20
output
------
loss: scalar
"""
KD_loss = torch_nn_func.kl_div(
torch_nn_func.log_softmax(logits_s / temp, dim = -1),
torch_nn_func.log_softmax(logits_t / temp, dim = -1),
reduction = 'batchmean',
log_target = True) * temp * temp
return KD_loss
#####################
# Rank consistency
#####################
def rank_consistency(x, metric = None, anchor = None, diff_mat = None):
"""loss = rank_consistency(x, metric)
input
-----
x: tensor, (batch, D1, D2 ...)
metric: a function or None
This function must be f(x1, x2) -> scalar
where x1 and x2 are in shape (D1, D2 ...)
if None, negative cosine similarity for
x1 and x2 of shape (D1, )
anchor: tensor, (batch, D1, D2, ...), as anchor
or None
If None, one of difference vector in the
matrix will be selected as anchor
diff_mat: tensor, (batch, batch, D1, D2 ...)
of None
if diff_mat is provided, x will be ignored
output
------
loss: scalar, loss value
Example
-------
>> x = torch.randn(4, 10)
>> x[1] = x[0] + 1.0
>> x[2] = x[0] + 2.0
>> x[3] = x[0] + 3.0
>> rank_consistency(x)
tensor(-1.)
"""
if diff_mat is None:
# (batch, batch, dim)
# diff_mat[i, j] = x[j] - x[i]
diff_mat = x - x.unsqueeze(1)
# batch size
bs = diff_mat.shape[0]
# loss to be accumulated
loss = 0.0
# metric
if metric is None:
# default use negative cosine_similarity
metric = lambda x1, x2: -torch_nn_func.cosine_similarity(x1, x2, dim=0)
#
if bs < 3:
return loss
# get anchor
if anchor is None:
# choose the diff_mat[1, 0] as the anchor
anchor_row_idx = 1
anchor_col_idx = 0
anchor = diff_mat[anchor_row_idx, anchor_col_idx]
else:
# anchor is provided externally
anchor_row_idx = -1
anchor_col_idx = -1
# loop over the matrix, compare the off-diagnoal elements
# with the anchor
count = 0.0
for col_idx in np.arange(bs-1):
for row_idx in np.arange(col_idx+1, bs):
if col_idx == anchor_col_idx and anchor_row_idx == row_idx:
continue
loss += metric(anchor, diff_mat[row_idx, col_idx])
count += 1
loss = loss / count
return loss
def rank_consistency_v2(x, metric = None, diff_mat = None):
"""loss = rank_consistency_v2(x, metric)
input
-----
x: tensor, (batch, D1, D2 ...)
metric: a function or None
This function must be f(x1, x2) -> scalar
where x1 and x2 are in shape (D1, D2 ...)
if None, negative cosine similarity for
x1 and x2 of shape (D1, )
diff_mat: tensor, (batch, batch, D1, D2 ...)
of None
if diff_mat is provided, x will be ignored
output
------
loss: scalar, loss value
Example
-------
>> x = torch.randn(4, 10)
>> x[1] = x[0] + 1.0
>> x[2] = x[0] + 2.0
>> x[3] = x[0] + 3.0
>> metric = lambda x1, x2: \
torch_nn_func.margin_ranking_loss(x1, x2, torch.ones_like(x1), 0.1)
>> rank_consistencyV2(x, metric)
tensor(.0)
"""
if diff_mat is None:
# (batch, batch, dim)
# diff_mat[i, j] = x[j] - x[i]
diff_mat = x - x.unsqueeze(1)
# batch size
bs = diff_mat.shape[0]
# loss to be accumulated
loss = 0.0
# metric
if metric is None:
# default use margin_ranking_loss
metric = lambda x1, x2: torch_nn_func.margin_ranking_loss(
x1, x2, torch.ones_like(x1), 0.1)
#
if bs < 3:
return loss
count = 0.0
# loop over the matrix, column first
for col_idx in np.arange(bs-2):
for row_idx in np.arange(col_idx+2, bs):
# assume diff[i, j] should be diff[i-1, j]
loss += metric(diff_mat[row_idx-1, col_idx],
diff_mat[row_idx, col_idx])
count += 1
# loop over the matrix, column first
for row_idx in np.arange(2, bs):
for col_idx in np.arange(1, row_idx):
# assume diff[i, j] should be diff[i, j-1]
loss += metric(diff_mat[row_idx, col_idx],
diff_mat[row_idx, col_idx-1])
count += 1
loss = loss / count
return loss
def rank_consistency_v3(x, metric = None):
"""loss = rank_consistency_v3(x, metric)
input
-----
x: tensor, (batch, D1, D2 ...)
metric: a function or None
This function must be f(x1, x2) -> scalar
where x1 and x2 are in shape (D1, D2 ...)
if None, negative cosine similarity for
x1 and x2 of shape (D1, )
output
------
loss: scalar, loss value
Example
-------
>> x = torch.randn(4, 10)
>> x[1] = x[0] + 1.0
>> x[2] = x[0] + 2.0
>> x[3] = x[0] + 3.0
>> metric = lambda x1, x2: \
torch_nn_func.margin_ranking_loss(x1, x2, torch.ones_like(x1), 0.1)
>> rank_consistency_v3(x, metric)
tensor(.0)
"""
# batch size
bs = x.shape[0]
# loss to be accumulated
loss = 0.0
# metric
if metric is None:
# default use margin_ranking_loss
# x1 should be ranked higher
metric = lambda x1, x2: torch_nn_func.margin_ranking_loss(
x1, x2, torch.ones_like(x1), 0.1)
#
if bs < 2:
return loss
count = 0.0
# loop over the rows
for row_idx1 in np.arange(1, bs):
for row_idx2 in np.arange(0, row_idx1):
loss += metric(x[row_idx1],
x[row_idx2])
count += 1
loss = loss / count
return loss
if __name__ == "__main__":
print("loss and metric")
| 14,085 | 27.456566 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/block_nn.py | ##!/usr/bin/env python
"""
Common blocks for neural networks
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
from scipy import signal as scipy_signal
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import torch.nn.init as torch_init
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
# For blstm
class BLSTMLayer(torch_nn.Module):
""" Wrapper over dilated conv1D
Input tensor: (batchsize=1, length, dim_in)
Output tensor: (batchsize=1, length, dim_out)
We want to keep the length the same
"""
def __init__(self, input_dim, output_dim):
super(BLSTMLayer, self).__init__()
if output_dim % 2 != 0:
print("Output_dim of BLSTMLayer is {:d}".format(output_dim))
print("BLSTMLayer expects a layer size of even number")
sys.exit(1)
# bi-directional LSTM
self.l_blstm = torch_nn.LSTM(input_dim, output_dim // 2, \
bidirectional=True)
def forward(self, x):
# permute to (length, batchsize=1, dim)
blstm_data, _ = self.l_blstm(x.permute(1, 0, 2))
# permute it backt to (batchsize=1, length, dim)
return blstm_data.permute(1, 0, 2)
#
# 1D dilated convolution that keep the input/output length
class Conv1dKeepLength(torch_nn.Conv1d):
""" Wrapper for causal convolution
Input tensor: (batchsize=1, length, dim_in)
Output tensor: (batchsize=1, length, dim_out)
https://github.com/pytorch/pytorch/issues/1333
Note: Tanh is applied
"""
def __init__(self, input_dim, output_dim, dilation_s, kernel_s,
causal = False, stride = 1, groups=1, bias=True, \
tanh = True, pad_mode='constant'):
super(Conv1dKeepLength, self).__init__(
input_dim, output_dim, kernel_s, stride=stride,
padding = 0, dilation = dilation_s, groups=groups, bias=bias)
self.causal = causal
# input & output length will be the same
if self.causal:
# left pad to make the convolution causal
self.pad_le = dilation_s * (kernel_s - 1)
self.pad_ri = 0
else:
# pad on both sizes
self.pad_le = dilation_s * (kernel_s - 1) // 2
self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le
# we may wrap other functions too
if tanh:
self.l_ac = torch_nn.Tanh()
else:
self.l_ac = torch_nn.Identity()
self.pad_mode = pad_mode
#
return
def forward(self, data):
# permute to (batchsize=1, dim, length)
# add one dimension (batchsize=1, dim, ADDED_DIM, length)
# pad to ADDED_DIM
# squeeze and return to (batchsize=1, dim, length)
# https://github.com/pytorch/pytorch/issues/1333
x = torch_nn_func.pad(data.permute(0, 2, 1).unsqueeze(2), \
(self.pad_le, self.pad_ri, 0, 0),
mode = self.pad_mode).squeeze(2)
# tanh(conv1())
# permmute back to (batchsize=1, length, dim)
output = self.l_ac(super(Conv1dKeepLength, self).forward(x))
return output.permute(0, 2, 1)
#
# Moving average
class MovingAverage(Conv1dKeepLength):
""" Wrapper to define a moving average smoothing layer
Note: MovingAverage can be implemented using TimeInvFIRFilter too.
Here we define another Module dicrectly on Conv1DKeepLength
"""
def __init__(self, feature_dim, window_len, causal=False):
super(MovingAverage, self).__init__(
feature_dim, feature_dim, 1, window_len, causal,
groups=feature_dim, bias=False, tanh=False)
# set the weighting coefficients
torch_nn.init.constant_(self.weight, 1/window_len)
# turn off grad for this layer
for p in self.parameters():
p.requires_grad = False
def forward(self, data):
return super(MovingAverage, self).forward(data)
#
# FIR filter layer
class TimeInvFIRFilter(Conv1dKeepLength):
""" Wrapper to define a FIR filter over Conv1d
Note: FIR Filtering is conducted on each dimension (channel)
independently: groups=channel_num in conv1d
"""
def __init__(self, feature_dim, filter_coef,
causal=True, flag_train=False):
""" __init__(self, feature_dim, filter_coef,
causal=True, flag_train=False)
feature_dim: dimension of input data
filter_coef: 1-D tensor of filter coefficients
causal: FIR is causal or not (default: true)
flag_train: whether train the filter coefficients (default: false)
Input data: (batchsize=1, length, feature_dim)
Output data: (batchsize=1, length, feature_dim)
"""
super(TimeInvFIRFilter, self).__init__(
feature_dim, feature_dim, 1, filter_coef.shape[0], causal,
groups=feature_dim, bias=False, tanh=False)
if filter_coef.ndim == 1:
# initialize weight using provided filter_coef
with torch.no_grad():
tmp_coef = torch.zeros([feature_dim, 1,
filter_coef.shape[0]])
tmp_coef[:, 0, :] = filter_coef
tmp_coef = torch.flip(tmp_coef, dims=[2])
self.weight = torch.nn.Parameter(tmp_coef,
requires_grad=flag_train)
else:
print("TimeInvFIRFilter expects filter_coef to be 1-D tensor")
print("Please implement the code in __init__ if necessary")
sys.exit(1)
def forward(self, data):
return super(TimeInvFIRFilter, self).forward(data)
#
# Up sampling
class UpSampleLayer(torch_nn.Module):
""" Wrapper over up-sampling
Input tensor: (batchsize=1, length, dim)
Ouput tensor: (batchsize=1, length * up-sampling_factor, dim)
"""
def __init__(self, feature_dim, up_sampling_factor, smoothing=False):
super(UpSampleLayer, self).__init__()
# wrap a up_sampling layer
self.scale_factor = up_sampling_factor
self.l_upsamp = torch_nn.Upsample(scale_factor=self.scale_factor)
if smoothing:
self.l_ave1 = MovingAverage(feature_dim, self.scale_factor)
self.l_ave2 = MovingAverage(feature_dim, self.scale_factor)
else:
self.l_ave1 = torch_nn.Identity()
self.l_ave2 = torch_nn.Identity()
return
def forward(self, x):
# permute to (batchsize=1, dim, length)
up_sampled_data = self.l_upsamp(x.permute(0, 2, 1))
# permute it backt to (batchsize=1, length, dim)
# and do two moving average
return self.l_ave1(self.l_ave2(up_sampled_data.permute(0, 2, 1)))
class upsampleByTransConv(torch_nn.Module):
"""upsampleByTransConv
Upsampling layer using transposed convolution
"""
def __init__(self, feat_dim, output_dim, upsample_rate, window_ratio=5):
"""upsampleByTransConv(feat_dim, upsample_rate, window_ratio=5)
Args
----
feat_dim: int, input feature should be (batch, length, feat_dim)
upsample_rate, int, output feature will be
(batch, length*upsample_rate, feat_dim)
window_ratio: int, default 5, window length of transconv will be
upsample_rate * window_ratio
"""
super(upsampleByTransConv, self).__init__()
window_l = upsample_rate * window_ratio
self.m_layer = torch_nn.ConvTranspose1d(
feat_dim, output_dim, window_l, stride=upsample_rate)
self.m_uprate = upsample_rate
return
def forward(self, x):
""" y = upsampleByTransConv(x)
input
-----
x: tensor, (batch, length, feat_dim)
output
------
y: tensor, (batch, length*upsample_rate, output_dim)
"""
l = x.shape[1] * self.m_uprate
y = self.m_layer(x.permute(0, 2, 1))[:, :, 0:l]
return y.permute(0, 2, 1).contiguous()
class TimeVarFIRFilter(torch_nn.Module):
""" TimeVarFIRFilter
Given sequences of filter coefficients and a signal, do filtering
Filter coefs: (batchsize, signal_length, filter_order = K)
Signal: (batchsize, signal_length, 1)
For batch 0:
For n in [1, sequence_length):
output(0, n, 1) = \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k)
Note: filter coef (0, n, :) is only used to compute the output
at (0, n, 1)
"""
def __init__(self):
super(TimeVarFIRFilter, self).__init__()
def forward(self, signal, f_coef):
"""
Filter coefs: (batchsize=1, signal_length, filter_order = K)
Signal: (batchsize=1, signal_length, 1)
Output: (batchsize=1, signal_length, 1)
For n in [1, sequence_length):
output(0, n, 1) = \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k)
This method may be not efficient:
Suppose signal [x_1, ..., x_N], filter [a_1, ..., a_K]
output [y_1, y_2, y_3, ..., y_N, *, * ... *]
= a_1 * [x_1, x_2, x_3, ..., x_N, 0, ..., 0]
+ a_2 * [ 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0]
+ a_3 * [ 0, 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0]
"""
signal_l = signal.shape[1]
order_k = f_coef.shape[-1]
# pad to (batchsize=1, signal_length + filter_order-1, dim)
padded_signal = torch_nn_func.pad(signal, (0, 0, 0, order_k - 1))
y = torch.zeros_like(signal)
# roll and weighted sum, only take [0:signal_length]
for k in range(order_k):
y += torch.roll(padded_signal, k, dims=1)[:, 0:signal_l, :] \
* f_coef[:, :, k:k+1]
# done
return y
class SignalsConv1d(torch_nn.Module):
""" Filtering input signal with time invariant filter
Note: FIRFilter conducted filtering given fixed FIR weight
SignalsConv1d convolves two signals
Note: this is based on torch.nn.functional.conv1d
"""
def __init__(self):
super(SignalsConv1d, self).__init__()
def forward(self, signal, system_ir):
""" output = forward(signal, system_ir)
signal: (batchsize, length1, dim)
system_ir: (length2, dim)
output: (batchsize, length1, dim)
"""
if signal.shape[-1] != system_ir.shape[-1]:
print("Error: SignalsConv1d expects shape:")
print("signal (batchsize, length1, dim)")
print("system_id (batchsize, length2, dim)")
print("But received signal: {:s}".format(str(signal.shape)))
print(" system_ir: {:s}".format(str(system_ir.shape)))
sys.exit(1)
padding_length = system_ir.shape[0] - 1
groups = signal.shape[-1]
# pad signal on the left
signal_pad = torch_nn_func.pad(signal.permute(0, 2, 1),\
(padding_length, 0))
# prepare system impulse response as (dim, 1, length2)
# also flip the impulse response
ir = torch.flip(system_ir.unsqueeze(1).permute(2, 1, 0), \
dims=[2])
# convolute
output = torch_nn_func.conv1d(signal_pad, ir, groups=groups)
return output.permute(0, 2, 1)
# Sinc filter generator
class SincFilter(torch_nn.Module):
""" SincFilter
Given the cut-off-frequency, produce the low-pass and high-pass
windowed-sinc-filters.
If input cut-off-frequency is (batchsize=1, signal_length, 1),
output filter coef is (batchsize=1, signal_length, filter_order).
For each time step in [1, signal_length), we calculate one
filter for low-pass sinc filter and another for high-pass filter.
Example:
import scipy
import scipy.signal
import numpy as np
filter_order = 31
cut_f = 0.2
sinc_layer = SincFilter(filter_order)
lp_coef, hp_coef = sinc_layer(torch.ones(1, 10, 1) * cut_f)
w, h1 = scipy.signal.freqz(lp_coef[0, 0, :].numpy(), [1])
w, h2 = scipy.signal.freqz(hp_coef[0, 0, :].numpy(), [1])
plt.plot(w, 20*np.log10(np.abs(h1)))
plt.plot(w, 20*np.log10(np.abs(h2)))
plt.plot([cut_f * np.pi, cut_f * np.pi], [-100, 0])
"""
def __init__(self, filter_order):
super(SincFilter, self).__init__()
# Make the filter oder an odd number
# [-(M-1)/2, ... 0, (M-1)/2]
#
self.half_k = (filter_order - 1) // 2
self.order = self.half_k * 2 +1
def hamming_w(self, n_index):
""" prepare hamming window for each time step
n_index (batchsize=1, signal_length, filter_order)
For each step, n_index.shape is [-(M-1)/2, ... 0, (M-1)/2]
where,
n_index[0, 0, :] = [-(M-1)/2, ... 0, (M-1)/2]
n_index[0, 1, :] = [-(M-1)/2, ... 0, (M-1)/2]
...
output (batchsize=1, signal_length, filter_order)
output[0, 0, :] = hamming_window
output[0, 1, :] = hamming_window
...
"""
# Hamming window
return 0.54 + 0.46 * torch.cos(2 * np.pi * n_index / self.order)
def sinc(self, x):
""" Normalized sinc-filter sin( pi * x) / pi * x
https://en.wikipedia.org/wiki/Sinc_function
Assume x (batchsize, signal_length, filter_order) and
x[0, 0, :] = [-half_order, - half_order+1, ... 0 ..., half_order]
x[:, :, self.half_order] -> time index = 0, sinc(0)=1
"""
y = torch.zeros_like(x)
y[:,:,0:self.half_k]=torch.sin(np.pi * x[:, :, 0:self.half_k]) \
/ (np.pi * x[:, :, 0:self.half_k])
y[:,:,self.half_k+1:]=torch.sin(np.pi * x[:, :, self.half_k+1:])\
/ (np.pi * x[:, :, self.half_k+1:])
y[:,:,self.half_k] = 1
return y
def forward(self, cut_f):
""" lp_coef, hp_coef = forward(self, cut_f)
cut-off frequency cut_f (batchsize=1, length, dim = 1)
lp_coef: low-pass filter coefs (batchsize, length, filter_order)
hp_coef: high-pass filter coefs (batchsize, length, filter_order)
"""
# create the filter order index
with torch.no_grad():
# [- (M-1) / 2, ..., 0, ..., (M-1)/2]
lp_coef = torch.arange(-self.half_k, self.half_k + 1,
device=cut_f.device)
# [[[- (M-1) / 2, ..., 0, ..., (M-1)/2],
# [- (M-1) / 2, ..., 0, ..., (M-1)/2],
# ...
# ],
# [[- (M-1) / 2, ..., 0, ..., (M-1)/2],
# [- (M-1) / 2, ..., 0, ..., (M-1)/2],
# ...
# ]]
lp_coef = lp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1)
hp_coef = torch.arange(-self.half_k, self.half_k + 1,
device=cut_f.device)
hp_coef = hp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1)
# temporary buffer of [-1^n] for gain norm in hp_coef
tmp_one = torch.pow(-1, hp_coef)
# unnormalized filter coefs with hamming window
lp_coef = cut_f * self.sinc(cut_f * lp_coef) \
* self.hamming_w(lp_coef)
hp_coef = (self.sinc(hp_coef) \
- cut_f * self.sinc(cut_f * hp_coef)) \
* self.hamming_w(hp_coef)
# normalize the coef to make gain at 0/pi is 0 dB
# sum_n lp_coef[n]
lp_coef_norm = torch.sum(lp_coef, axis=2).unsqueeze(-1)
# sum_n hp_coef[n] * -1^n
hp_coef_norm = torch.sum(hp_coef * tmp_one, axis=2).unsqueeze(-1)
lp_coef = lp_coef / lp_coef_norm
hp_coef = hp_coef / hp_coef_norm
# return normed coef
return lp_coef, hp_coef
class BatchNorm1DWrapper(torch_nn.BatchNorm1d):
"""
"""
def __init__(self, num_features, eps=1e-05, momentum=0.1,
affine=True, track_running_stats=True):
super(BatchNorm1DWrapper, self).__init__(
num_features, eps, momentum, affine, track_running_stats)
def forward(self, data):
output = super(BatchNorm1DWrapper, self).forward(data.permute(0, 2, 1))
return output.permute(0, 2, 1)
class SignalFraming(torch_nn.Conv1d):
""" SignalFraming(w_len, h_len, w_type='Hamming')
Do framing on the signal. The implementation is based on conv1d
Args:
-----
w_len: window length (frame length)
h_len: hop length (frame shift)
w_type: type of window, (default='Hamming')
Hamming: Hamming window
else: square window
Note:
-----
input signal (batch, length, 1)
output signal (batch, frame_num, frame_length)
where frame_num = length + (frame_length - frame_num)
Compatibility with Librosa framing need to be checked
"""
def __init__(self, w_len, h_len, w_type='Hamming'):
super(SignalFraming, self).__init__(1, w_len, w_len, stride=h_len,
padding = 0, dilation = 1, groups=1, bias=False)
self.m_wlen = w_len
self.m_wtype = w_type
self.m_hlen = h_len
if w_type == 'Hamming':
self.m_win = scipy_signal.windows.hamming(self.m_wlen)
else:
self.m_win = np.ones([self.m_wlen])
# for padding
if h_len > w_len:
print("Error: SignalFraming(w_len, h_len)")
print("w_len cannot be < h_len")
sys.exit(1)
self.m_mat = np.diag(self.m_win)
self.m_pad_len_l = (w_len - h_len)//2
self.m_pad_len_r = (w_len - h_len) - self.m_pad_len_l
# filter [output_dim = frame_len, 1, input_dim=frame_len]
# No need to flip the filter coefficients
with torch.no_grad():
tmp_coef = torch.zeros([w_len, 1, w_len])
tmp_coef[:, 0, :] = torch.tensor(self.m_mat)
self.weight = torch.nn.Parameter(tmp_coef, requires_grad = False)
return
def forward(self, signal):
"""
signal: (batchsize, length1, 1)
output: (batchsize, num_frame, frame_length)
Note:
"""
if signal.shape[-1] > 1:
print("Error: SignalFraming expects shape:")
print("signal (batchsize, length, 1)")
sys.exit(1)
# 1. switch dimension from (batch, length, dim) to (batch, dim, length)
# 2. pad signal on the left to (batch, dim, length + pad_length)
signal_pad = torch_nn_func.pad(signal.permute(0, 2, 1),\
(self.m_pad_len_l, self.m_pad_len_r))
# switch dimension from (batch, dim, length) to (batch, length, dim)
return super(SignalFraming, self).forward(signal_pad).permute(0, 2, 1)
class Conv1dStride(torch_nn.Conv1d):
""" Wrapper for normal 1D convolution with stride (optionally)
Input tensor: (batchsize, length, dim_in)
Output tensor: (batchsize, length2, dim_out)
However, we wish that length2 = floor(length / stride)
Therefore,
padding_total_length - dilation_s * (kernel_s - 1) -1 + stride = 0
or,
padding_total_length = dilation_s * (kernel_s - 1) + 1 - stride
Conv1dBundle(input_dim, output_dim, dilation_s, kernel_s,
causal = False, stride = 1, groups=1, bias=True, \
tanh = True, pad_mode='constant')
input_dim: int, input dimension (input channel)
output_dim: int, output dimension (output channel)
kernel_s: int, kernel size of filter
dilation_s: int, dilation for convolution
causal: bool, whether causal convolution, default False
stride: int, stride size, default 1
groups: int, group for conv1d, default 1
bias: bool, whether add bias, default True
tanh: bool, whether use tanh activation, default True
pad_mode: str, padding method, default "constant"
"""
def __init__(self, input_dim, output_dim, kernel_s, dilation_s=1,
causal = False, stride = 1, groups=1, bias=True, \
tanh = True, pad_mode='constant'):
super(Conv1dStride, self).__init__(
input_dim, output_dim, kernel_s, stride=stride,
padding = 0, dilation = dilation_s, groups=groups, bias=bias)
self.pad_mode = pad_mode
self.causal = causal
# padding size
# input & output length will be the same
if self.causal:
# left pad to make the convolution causal
self.pad_le = dilation_s * (kernel_s - 1) + 1 - stride
self.pad_ri = 0
else:
# pad on both sizes
self.pad_le = (dilation_s*(kernel_s-1)+1-stride) // 2
self.pad_ri = (dilation_s*(kernel_s-1)+1-stride) - self.pad_le
# activation functions
if tanh:
self.l_ac = torch_nn.Tanh()
else:
self.l_ac = torch_nn.Identity()
def forward(self, data):
# https://github.com/pytorch/pytorch/issues/1333
# permute to (batchsize=1, dim, length)
# add one dimension as (batchsize=1, dim, ADDED_DIM, length)
# pad to ADDED_DIM
# squeeze and return to (batchsize=1, dim, length+pad_length)
x = torch_nn_func.pad(
data.permute(0, 2, 1).unsqueeze(2), \
(self.pad_le, self.pad_ri,0,0), \
mode = self.pad_mode).squeeze(2)
# tanh(conv1())
# permmute back to (batchsize=1, length, dim)
output = self.l_ac(super(Conv1dStride, self).forward(x))
return output.permute(0, 2, 1)
class MaxPool1dStride(torch_nn.MaxPool1d):
""" Wrapper for maxpooling
Input tensor: (batchsize, length, dim_in)
Output tensor: (batchsize, length2, dim_in)
However, we wish that length2 = floor(length / stride)
Therefore,
padding_total_length - dilation_s * (kernel_s - 1) -1 + stride = 0
or,
padding_total_length = dilation_s * (kernel_s - 1) + 1 - stride
MaxPool1dStride(kernel_s, stride, dilation_s=1)
"""
def __init__(self, kernel_s, stride, dilation_s=1):
super(MaxPool1dStride, self).__init__(
kernel_s, stride, 0, dilation_s)
# pad on both sizes
self.pad_le = (dilation_s*(kernel_s-1)+1-stride) // 2
self.pad_ri = (dilation_s*(kernel_s-1)+1-stride) - self.pad_le
def forward(self, data):
# https://github.com/pytorch/pytorch/issues/1333
# permute to (batchsize=1, dim, length)
# add one dimension as (batchsize=1, dim, ADDED_DIM, length)
# pad to ADDED_DIM
# squeeze and return to (batchsize=1, dim, length+pad_length)
x = torch_nn_func.pad(
data.permute(0, 2, 1).unsqueeze(2), \
(self.pad_le, self.pad_ri,0,0)).squeeze(2).contiguous()
# tanh(conv1())
# permmute back to (batchsize=1, length, dim)
output = super(MaxPool1dStride, self).forward(x)
return output.permute(0, 2, 1)
class AvePool1dStride(torch_nn.AvgPool1d):
""" Wrapper for average pooling
Input tensor: (batchsize, length, dim_in)
Output tensor: (batchsize, length2, dim_in)
However, we wish that length2 = floor(length / stride)
Therefore,
padding_total_length - dilation_s * (kernel_s - 1) -1 + stride = 0
or,
padding_total_length = dilation_s * (kernel_s - 1) + 1 - stride
MaxPool1dStride(kernel_s, stride, dilation_s=1)
"""
def __init__(self, kernel_s, stride):
super(AvePool1dStride, self).__init__(
kernel_s, stride, 0)
# pad on both sizes
self.pad_le = ((kernel_s-1)+1-stride) // 2
self.pad_ri = ((kernel_s-1)+1-stride) - self.pad_le
def forward(self, data):
# https://github.com/pytorch/pytorch/issues/1333
# permute to (batchsize=1, dim, length)
# add one dimension as (batchsize=1, dim, ADDED_DIM, length)
# pad to ADDED_DIM
# squeeze and return to (batchsize=1, dim, length+pad_length)
x = torch_nn_func.pad(
data.permute(0, 2, 1).unsqueeze(2), \
(self.pad_le, self.pad_ri,0,0)).squeeze(2).contiguous()
# tanh(conv1())
# permmute back to (batchsize=1, length, dim)
output = super(AvePool1dStride, self).forward(x)
return output.permute(0, 2, 1)
class Maxout1D(torch_nn.Module):
""" Maxout activation (along 1D)
Maxout(d_in, d_out, pool_size)
From https://github.com/pytorch/pytorch/issues/805
Arguments
---------
d_in: feature input dimension
d_out: feature output dimension
pool_size: window size of max-pooling
Usage
-----
l_maxout1d = Maxout1D(d_in, d_out, pool_size)
data_in = torch.rand([1, T, d_in])
data_out = l_maxout1d(data_in)
"""
def __init__(self, d_in, d_out, pool_size):
super().__init__()
self.d_in, self.d_out, self.pool_size = d_in, d_out, pool_size
self.lin = torch_nn.Linear(d_in, d_out * pool_size)
def forward(self, inputs):
# suppose inputs (batchsize, length, dim)
# shape (batchsize, length, out-dim, pool_size)
shape = list(inputs.size())
shape[-1] = self.d_out
shape.append(self.pool_size)
max_dim = len(shape) - 1
# shape (batchsize, length, out-dim * pool_size)
out = self.lin(inputs)
# view to (batchsize, length, out-dim, pool_size)
# maximize on the last dimension
m, i = out.view(*shape).max(max_dim)
return m
class MaxFeatureMap2D(torch_nn.Module):
""" Max feature map (along 2D)
MaxFeatureMap2D(max_dim=1)
l_conv2d = MaxFeatureMap2D(1)
data_in = torch.rand([1, 4, 5, 5])
data_out = l_conv2d(data_in)
Input:
------
data_in: tensor of shape (batch, channel, ...)
Output:
-------
data_out: tensor of shape (batch, channel//2, ...)
Note
----
By default, Max-feature-map is on channel dimension,
and maxout is used on (channel ...)
"""
def __init__(self, max_dim = 1):
super().__init__()
self.max_dim = max_dim
def forward(self, inputs):
# suppose inputs (batchsize, channel, length, dim)
shape = list(inputs.size())
if self.max_dim >= len(shape):
print("MaxFeatureMap: maximize on %d dim" % (self.max_dim))
print("But input has %d dimensions" % (len(shape)))
sys.exit(1)
if shape[self.max_dim] // 2 * 2 != shape[self.max_dim]:
print("MaxFeatureMap: maximize on %d dim" % (self.max_dim))
print("But this dimension has an odd number of data")
sys.exit(1)
shape[self.max_dim] = shape[self.max_dim]//2
shape.insert(self.max_dim, 2)
# view to (batchsize, 2, channel//2, ...)
# maximize on the 2nd dim
m, i = inputs.view(*shape).max(self.max_dim)
return m
class SelfWeightedPooling(torch_nn.Module):
""" SelfWeightedPooling module
Inspired by
https://github.com/joaomonteirof/e2e_antispoofing/blob/master/model.py
To avoid confusion, I will call it self weighted pooling
Using self-attention format, this is similar to softmax(Query, Key)Value
where Query is a shared learnarble mm_weight, Key and Value are the input
Sequence.
l_selfpool = SelfWeightedPooling(5, 1, False)
with torch.no_grad():
input_data = torch.rand([3, 10, 5])
output_data = l_selfpool(input_data)
"""
def __init__(self, feature_dim, num_head=1, mean_only=False):
""" SelfWeightedPooling(feature_dim, num_head=1, mean_only=False)
Attention-based pooling
input (batchsize, length, feature_dim) ->
output
(batchsize, feature_dim * num_head), when mean_only=True
(batchsize, feature_dim * num_head * 2), when mean_only=False
args
----
feature_dim: dimension of input tensor
num_head: number of heads of attention
mean_only: whether compute mean or mean with std
False: output will be (batchsize, feature_dim*2)
True: output will be (batchsize, feature_dim)
"""
super(SelfWeightedPooling, self).__init__()
self.feature_dim = feature_dim
self.mean_only = mean_only
self.noise_std = 1e-5
self.num_head = num_head
# transformation matrix (num_head, feature_dim)
self.mm_weights = torch_nn.Parameter(
torch.Tensor(num_head, feature_dim), requires_grad=True)
torch_init.kaiming_uniform_(self.mm_weights)
return
def _forward(self, inputs):
""" output, attention = forward(inputs)
inputs
------
inputs: tensor, shape (batchsize, length, feature_dim)
output
------
output: tensor
(batchsize, feature_dim * num_head), when mean_only=True
(batchsize, feature_dim * num_head * 2), when mean_only=False
attention: tensor, shape (batchsize, length, num_head)
"""
# batch size
batch_size = inputs.size(0)
# feature dimension
feat_dim = inputs.size(2)
# input is (batch, legth, feature_dim)
# change mm_weights to (batchsize, feature_dim, num_head)
# weights will be in shape (batchsize, length, num_head)
weights = torch.bmm(inputs,
self.mm_weights.permute(1, 0).contiguous()\
.unsqueeze(0).repeat(batch_size, 1, 1))
# attention (batchsize, length, num_head)
attentions = torch_nn_func.softmax(torch.tanh(weights),dim=1)
# apply attention weight to input vectors
if self.num_head == 1:
# We can use the mode below to compute self.num_head too
# But there is numerical difference.
# original implementation in github
# elmentwise multiplication
# weighted input vector: (batchsize, length, feature_dim)
weighted = torch.mul(inputs, attentions.expand_as(inputs))
else:
# weights_mat = (batch * length, feat_dim, num_head)
weighted = torch.bmm(
inputs.view(-1, feat_dim, 1),
attentions.view(-1, 1, self.num_head))
# weights_mat = (batch, length, feat_dim * num_head)
weighted = weighted.view(batch_size, -1, feat_dim * self.num_head)
# pooling
if self.mean_only:
# only output the mean vector
representations = weighted.sum(1)
else:
# output the mean and std vector
noise = self.noise_std * torch.randn(
weighted.size(), dtype=weighted.dtype, device=weighted.device)
avg_repr, std_repr = weighted.sum(1), (weighted+noise).std(1)
# concatenate mean and std
representations = torch.cat((avg_repr,std_repr),1)
# done
return representations, attentions
def forward(self, inputs):
""" output = forward(inputs)
inputs
------
inputs: tensor, shape (batchsize, length, feature_dim)
output
------
output: tensor
(batchsize, feature_dim * num_head), when mean_only=True
(batchsize, feature_dim * num_head * 2), when mean_only=False
"""
output, _ = self._forward(inputs)
return output
def debug(self, inputs):
return self._forward(inputs)
class Conv1dForARModel(Conv1dKeepLength):
""" Definition of dilated Convolution for autoregressive model
This module is based on block_nn.py/Conv1DKeepLength.
However, Conv1DKeepLength doesn't assume step-by-step generation
for autogressive model.
This Module further adds the method to generate output in AR model
Example:
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import tutorials.plot_lib as nii_plot
# Compare the results of two layers
batchsize = 1
input_dim = 1
output_dim = 1
length = 5
dilation = 2
kernel_s = 3
# Layers
conv1 = nii_nn.Conv1dKeepLength(
input_dim, output_dim, dilation, kernel_s,
causal=True, tanh=False, bias=True)
conv2 = Conv1dForARModel(input_dim, output_dim, dilation, kernel_s,
tanh=False, bias=True)
conv2.weight = conv1.weight
conv2.bias = conv1.bias
# Test
input = torch.rand([batchsize, length, input_dim])
with torch.no_grad():
output = conv1(input)
output2 = conv2(input)
out = torch.zeros([batchsize, length, output_dim])
for step in range(length):
out[:, step:step+1, :] = conv2(input[:, step:step+1, :], step)
print(output - output2)
print(output - out)
#nii_plot.plot_tensor(input, deci_width=2)
#nii_plot.plot_tensor(output, deci_width=2)
#nii_plot.plot_tensor(output2, deci_width=2)
#nii_plot.plot_tensor(out, deci_width=2)
"""
def __init__(self, input_dim, output_dim, dilation_s, kernel_s,
bias=True, tanh = True, causal=True):
""" Conv1dForARModel(input_dim, output_dim, dilation_s, kernel_s,
bias=True, tanh=True)
args
----
input_dim: int, input tensor should be (batchsize, length, input_dim)
output_dim: int, output tensor will be (batchsize, length, output_dim)
dilation_s: int, dilation size
kernel_s: int, kernel size
bias: bool, whether use bias term, default True
tanh: bool, whether apply tanh on the output, default True
causal: bool, whether the convoltuion is causal, default True
Note that causal==False, step-by-step AR generation will raise Error
"""
super(Conv1dForARModel, self).__init__(
input_dim, output_dim, dilation_s, kernel_s, \
causal = causal, stride = 1, groups=1, bias=bias, tanh = tanh)
# configuration options
self.use_bias = bias
self.use_tanh = tanh
self.kernel_s = kernel_s
self.dilation_s = dilation_s
self.out_dim = output_dim
self.causal = causal
# See slide http://tonywangx.github.io/slide.html#misc CURRENNT WaveNet,
# page 50-56 for example on kernel_s = 2
#
# buffer length, depends on kernel size and dilation size
# kernel_size = 3, dilation_size = 1 -> * * * -> buffer_len = 3
# kernel_size = 3, dilation_size = 2 -> * . * . * -> buffer_len = 5
self.buffer_len = (kernel_s - 1) * dilation_s + 1
self.buffer_data = None
# self.buffer_conv1d = None
return
def forward(self, x, step_idx = None):
""" output = forward(x, step_idx)
input
-----
x: tensor, in shape (batchsize, length, input_dim)
step_idx: int, the index of the current time step
or None
output
------
output: tensor, in shape (batchsize, length, output_dim)
If step_idx is True
------------------------
this is same as common conv1d forward method
If self.training is False
------------------------
This method assumes input and output tensors
are for one time step, i.e., length = 1 for both x and output.
This method should be used in a loop, for example:
model.eval()
for idx in range(total_time_steps):
...
output[:, idx:idx+1, :] = forward(x[:, idx:idx+1, :])
...
This Module will use a buffer to store the intermediate results.
See slide http://tonywangx.github.io/slide.html#misc CURRENNT WaveNet,
page 50-56 for example on kernel_s = 2
"""
if step_idx is None:
# normal training mode, use the common conv forward method
return super(Conv1dForARModel, self).forward(x)
else:
if self.causal is False:
print("Step-by-step generation cannot work on non-causal conv")
print("Please use causal=True for Conv1dForARModel")
sys.exit(1)
# step-by-step for generation in AR model
# initialize buffer if necessary
if step_idx == 0:
self.buffer_data = torch.zeros(
[x.shape[0], self.buffer_len, x.shape[-1]],
dtype=x.dtype, device=x.device)
#self.buffer_conv1d = torch.zeros(
# [x.shape[0], self.kernel_s, x.shape[-1]],
# dtype=x.dtype, device=x.device)
# Put new input data into buffer
# the position index to put the input data
tmp_ptr_save = step_idx % self.buffer_len
# assume x is (batchsize, length=1, input_dim), thus
# only take x[:, 0, :]
self.buffer_data[:, tmp_ptr_save, :] = x[:, 0, :]
## Method 1: do multiplication and summing
##
## initialize
#output_tensor = torch.zeros(
# [x.shape[0], self.out_dim], dtype=x.dtype, device=x.device)
## loop over the kernel
#for ker_idx in range(self.kernel_s):
# # which buffer should be retrieved for this kernel idx
# tmp_data_idx = (step_idx - ker_idx * self.dilation_s) \
# % self.buffer_len
# # apply the kernel and sum the product
# # note that self.weight[:, :, -1] is the 1st kernel
# output_tensor += torch.matmul(
# self.buffer_data[:, tmp_data_idx, :],
# self.weight[:, :, self.kernel_s - ker_idx - 1].T)
## Method 2: take advantage of conv1d API
# Method 2 is slower than Method1 when kernel size is small
## create a input buffer to conv1d
#idxs = [(step_idx - x * self.dilation_s) % self.buffer_len \
# for x in range(self.kernel_s)][::-1]
#self.buffer_conv1d = self.buffer_data[:, idxs, :].permute(0, 2, 1)
#output_tensor = torch_nn_func.conv1d(self.buffer_conv1d,
# self.weight).permute(0, 2, 1)
# Method 3:
batchsize = x.shape[0]
# which data buffer should be retrieved for each kernel
# [::-1] is necessary because self.weight[:, :, -1] corresponds to
# the first buffer, [:, :, -2] to the second ...
index_buf = [(step_idx - y * self.dilation_s) % self.buffer_len \
for y in range(self.kernel_s)][::-1]
# concanate buffers as a tensor [batchsize, input_dim * kernel_s]
# concanate weights as a tensor [input_dim * kernel_s, output_dim]
# (out_dim, in_dim, kernel_s)-permute->(out_dim, kernel_s, in_dim)
# (out_dim, kernel_s, in_dim)-reshape->(out_dim, in_dim * kernel_s)
output_tensor = torch.mm(
self.buffer_data[:, index_buf, :].view(batchsize, -1),
self.weight.permute(0, 2, 1).reshape(self.out_dim, -1).T)
# apply bias and tanh if necessary
if self.use_bias:
output_tensor += self.bias
if self.use_tanh:
output_tensor = torch.tanh(output_tensor)
# make it to (batch, length=1, output_dim)
return output_tensor.unsqueeze(1)
class AdjustTemporalResoIO(torch_nn.Module):
def __init__(self, list_reso, target_reso, list_dims):
"""AdjustTemporalResoIO(list_reso, target_reso, list_dims)
Module to change temporal resolution of input tensors.
Args
----
list_reso: list, list of temporal resolutions.
list_reso[i] should be the temporal resolution of the
(i+1)-th tensor
target_reso: int, target temporal resolution to be changed
list_dims: list, list of feat_dim for tensors
assume tensor to have shape (batchsize, time length, feat_dim)
Note
----
target_reso must be <= max(list_reso)
all([target_reso % x == 0 for x in list_reso if x < target_reso])
all([x % target_reso == 0 for x in list_reso if x < target_reso])
Suppose a tensor A (batchsize, time_length1, feat_dim_1) has
temporal resolution of 1. Tensor B has temporal resolution
k and is aligned with A. Then B[:, n, :] corresponds to
A[:, k*n:k*(n+1), :].
For example:
let k = 3, batchsize = 1, feat_dim = 1
---------------> time axis
0 1 2 3 4 5 6 7 8
A[0, 0:9, 0] = [ a b c d e f g h i ]
B[0, 0:3, 0] = [ * & ^ ]
[*] is aligned with [a b c]
[&] is aligned with [d e f]
[^] is aligned with [g h i]
Assume the input tensor list is [A, B]:
list_reso = [1, 3]
list_dims = [A.shape[-1], B.shape[-1]]
If target_reso = 3, then
B will not be changed
A (batchsize=1, time_length1=9, feat_dim=1) will be A_new (1, 3, 3)
B [0, 0:3, 0] = [ * & ^ ]
A_new[0, 0:3, :] = [ [a, [d, [g, ]
b, e, h,
c] f] i]
More concrete examples:
input_dims = [5, 3]
rates = [1, 6]
target_rate = 2
l_adjust = AdjustTemporalRateIO(rates, target_rate, input_dims)
data1 = torch.rand([2, 2*6, 5])
data2 = torch.rand([2, 2, 3])
data1_new, data2_new = l_adjust([data1, data2])
# Visualization requires matplotlib and tutorial.plot_lib as nii_plot
nii_plot.plot_tensor(data1)
nii_plot.plot_tensor(data1_new)
nii_plot.plot_tensor(data2)
nii_plot.plot_tensor(data2_new)
"""
super(AdjustTemporalResoIO, self).__init__()
list_reso = np.array(list_reso)
list_dims = np.array(list_dims)
# save
self.list_reso = list_reso
self.fatest_reso = min(list_reso)
self.slowest_reso = max(list_reso)
self.target_reso = target_reso
# check
if any(list_reso < 0):
print("Expects positive resolution in AdjustTemporalResoIO")
sys.exit(1)
if self.target_reso < 0:
print("Expects positive target_reso in AdjustTemporalResoIO")
sys.exit(1)
if any([x % self.target_reso != 0 for x in self.list_reso \
if x > self.target_reso]):
print("Resolution " + str(list_reso) + " incompatible")
print(" with target resolution {:d}".format(self.target_reso))
sys.exit(1)
if any([self.target_reso % x != 0 for x in self.list_reso \
if x < self.target_reso]):
print("Resolution " + str(list_reso) + " incompatible")
print(" with target resolution {:d}".format(self.target_reso))
sys.exit(1)
self.dim_change = []
self.reso_change = []
self.l_upsampler = []
for x, dim in zip(self.list_reso, list_dims):
if x > self.target_reso:
# up sample
# up-sample don't change feat dim, just duplicate frames
self.dim_change.append(1)
self.reso_change.append(x // self.target_reso)
self.l_upsampler.append(
nii_nn.UpSampleLayer(dim, x // self.target_reso))
elif x < self.target_reso:
# down sample
# for down-sample, we fold the multiple feature frames into one
self.dim_change.append(self.target_reso // x)
# use a negative number to indicate down-sample
self.reso_change.append(-self.target_reso // x)
self.l_upsampler.append(None)
else:
self.dim_change.append(1)
self.reso_change.append(1)
self.l_upsampler.append(None)
self.l_upsampler = torch_nn.ModuleList(self.l_upsampler)
# log down the dimensions after resolution change
self.dim = []
if list_dims is not None and len(list_dims) == len(self.dim_change):
self.dim = [x * y for x, y in zip(self.dim_change, list_dims)]
return
def get_dims(self):
return self.dim
def forward(self, tensor_list):
""" tensor_list = AdjustTemporalResoIO(tensor_list):
Adjust the temporal resolution of the input tensors.
For up-sampling, the tensor is duplicated
For down-samplin, multiple time steps are concated into a single vector
input
-----
tensor_list: list, list of tensors,
(batchsize, time steps, feat dim)
output
------
tensor_list: list, list of tensors,
(batchsize, time_steps * N, feat_dim * Y)
where N is the resolution change option in self.reso_change,
Y is the factor to change dimension in self.dim_change
"""
output_tensor_list = []
for in_tensor, dim_fac, reso_fac, l_up in \
zip(tensor_list, self.dim_change, self.reso_change,
self.l_upsampler):
batchsize = in_tensor.shape[0]
timelength = in_tensor.shape[1]
if reso_fac == 1:
# no change
output_tensor_list.append(in_tensor)
elif reso_fac < 0:
# down sample by concatenating
reso_fac *= -1
expected_len = timelength // reso_fac
trim_length = expected_len * reso_fac
if expected_len == 0:
# if input tensor length < down_sample factor
output_tensor_list.append(
torch.reshape(in_tensor[:, 0:1, :],
(batchsize, 1, -1)))
else:
# make sure that
output_tensor_list.append(
torch.reshape(in_tensor[:, 0:trim_length, :],
(batchsize, expected_len, -1)))
else:
# up-sampling by duplicating
output_tensor_list.append(l_up(in_tensor))
return output_tensor_list
class LSTMZoneOut(torch_nn.Module):
"""LSTM layer with zoneout
This module replies on LSTMCell
"""
def __init__(self, in_feat_dim, out_feat_dim,
bidirectional=False, residual_link=False, bias=True):
"""LSTMZoneOut(in_feat_dim, out_feat_dim,
bidirectional=False, residual_link=False, bias=True)
Args
----
in_feat_dim: int, input tensor should be (batch, length, in_feat_dim)
out_feat_dim: int, output tensor will be (batch, length, out_feat_dim)
bidirectional: bool, whether bidirectional, default False
residual_link: bool, whether residual link over LSTM, default False
bias: bool, bias option in torch.nn.LSTMCell, default True
When bidirectional is True, out_feat_dim must be an even number
When residual_link is True, out_feat_dim must be equal to in_feat_dim
"""
super(LSTMZoneOut, self).__init__()
# config parameters
self.in_dim = in_feat_dim
self.out_dim = out_feat_dim
self.flag_bi = bidirectional
self.flag_res = residual_link
self.bias = bias
# check
if self.flag_res and self.out_dim != self.in_dim:
print("Error in LSTMZoneOut w/ residual: in_feat_dim!=out_feat_dim")
sys.exit(1)
if self.flag_bi and self.out_dim % 2 > 0:
print("Error in Bidirecional LSTMZoneOut: out_feat_dim is not even")
sys.exit(1)
# layer
if self.flag_bi:
self.l_lstm1 = torch_nn.LSTMCell(
self.in_dim, self.out_dim//2, self.bias)
self.l_lstm2 = torch_nn.LSTMCell(
self.in_dim, self.out_dim//2, self.bias)
else:
self.l_lstm1 = torch_nn.LSTMCell(
self.in_dim, self.out_dim, self.bias)
self.l_lstm2 = None
return
def _zoneout(self, pre, cur, p=0.1):
"""zoneout wrapper
"""
if self.training:
with torch.no_grad():
mask = torch.zeros_like(pre).bernoulli_(p)
return pre * mask + cur * (1-mask)
else:
return cur
def forward(self, x):
"""y = LSTMZoneOut(x)
input
-----
x: tensor, (batchsize, length, in_feat_dim)
output
------
y: tensor, (batchsize, length, out_feat_dim)
"""
batchsize = x.shape[0]
length = x.shape[1]
# output tensor
y = torch.zeros([batchsize, length, self.out_dim],
device=x.device, dtype=x.dtype)
# recurrent
if self.flag_bi:
# for bi-directional
hid1 = torch.zeros([batchsize, self.out_dim//2],
device=x.device, dtype=x.dtype)
hid2 = torch.zeros_like(hid1)
cell1 = torch.zeros_like(hid1)
cell2 = torch.zeros_like(hid1)
for time in range(length):
# reverse time idx
rtime = length-time-1
# compute in both forward and reverse directions
hid1_new, cell1_new = self.l_lstm1(x[:,time, :], (hid1, cell1))
hid2_new, cell2_new = self.l_lstm2(x[:,rtime, :], (hid2, cell2))
hid1 = self._zoneout(hid1, hid1_new)
hid2 = self._zoneout(hid2, hid2_new)
y[:, time, 0:self.out_dim//2] = hid1
y[:, length-time-1, self.out_dim//2:] = hid2
else:
# for uni-directional
hid1 = torch.zeros([batchsize, self.out_dim],
device=x.device, dtype=x.dtype)
cell1 = torch.zeros_like(hid1)
for time in range(length):
hid1_new, cell1_new = self.l_lstm1(x[:, time, :], (hid1, cell1))
hid1 = self._zoneout(hid1, hid1_new)
y[:, time, :] = hid1
# residual part
if self.flag_res:
y = y+x
return y
class LinearInitialized(torch_nn.Module):
"""Linear layer with specific initialization
"""
def __init__(self, weight_mat, flag_train=True):
"""LinearInitialized(weight_mat, flag_trainable=True)
Args
----
weight_mat: tensor, (input_dim, output_dim),
the weight matrix for initializing the layer
flag_train: bool, where trainable or fixed, default True
This can be used for trainable filter bank. For example:
import sandbox.util_frontend as nii_front_end
l_fb = LinearInitialized(nii_front_end.linear_fb(fn, sr, filter_num))
y = l_fb(x)
"""
super(LinearInitialized, self).__init__()
self.weight = torch_nn.Parameter(weight_mat, requires_grad=flag_train)
return
def forward(self, x):
"""y = LinearInitialized(x)
input
-----
x: tensor, (batchsize, ..., input_feat_dim)
output
------
y: tensor, (batchsize, ..., output_feat_dim)
Note that weight is in shape (input_feat_dim, output_feat_dim)
"""
return torch.matmul(x, self.weight)
class GRULayer(torch_nn.Module):
"""GRULayer
There are two modes for forward
1. forward(x) -> process sequence x
2. forward(x[n], n) -> process n-th step of x
Example:
data = torch.randn([2, 10, 3])
m_layer = GRULayer(3, 3)
out = m_layer(data)
out_2 = torch.zeros_like(out)
for idx in range(data.shape[1]):
out_2[:, idx:idx+1, :] = m_layer._forwardstep(
data[:, idx:idx+1, :], idx)
"""
def __init__(self, in_size, out_size, flag_bidirec=False):
"""GRULayer(in_size, out_size, flag_bidirec=False)
Args
----
in_size: int, dimension of input feature per step
out_size: int, dimension of output feature per step
flag_bidirec: bool, whether this is bi-directional GRU
"""
super(GRULayer, self).__init__()
self.m_in_size = in_size
self.m_out_size = out_size
self.m_flag_bidirec = flag_bidirec
self.m_gru = torch_nn.GRU(
in_size, out_size, batch_first=True,
bidirectional=flag_bidirec)
# for step-by-step generation
self.m_grucell = None
self.m_buffer = None
return
def _get_gru_cell(self):
# dump GRU layer to GRU cell for step-by-step generation
self.m_grucell = torch_nn.GRUCell(self.m_in_size, self.m_out_size)
self.m_grucell.weight_hh.data = self.m_gru.weight_hh_l0.data
self.m_grucell.weight_ih.data = self.m_gru.weight_ih_l0.data
self.m_grucell.bias_hh.data = self.m_gru.bias_hh_l0.data
self.m_grucell.bias_ih.data = self.m_gru.bias_ih_l0.data
return
def _forward(self, x):
"""y = _forward(x)
input
-----
x: tensor, (batch, length, inputdim)
output
------
y: tensor, (batch, length, out-dim)
"""
out, hn = self.m_gru(x)
return out
def _forwardstep(self, x, step_idx):
"""y = _forwardstep(x)
input
-----
x: tensor, (batch, 1, inputdim)
output
------
y: tensor, (batch, 1, out-dim)
"""
if self.m_flag_bidirec:
print("Bi-directional GRU not supported for step-by-step mode")
sys.exit(1)
else:
if step_idx == 0:
# load weight as grucell
if self.m_grucell is None:
self._get_gru_cell()
# buffer
self.m_buffer = torch.zeros(
[x.shape[0], self.m_out_size],
device=x.device, dtype=x.dtype)
self.m_buffer = self.m_grucell(x[:, 0, :], self.m_buffer)
# (batch, dim) -> (batch, 1, dim)
return self.m_buffer.unsqueeze(1)
def forward(self, x, step_idx=None):
"""y = forward(x, step_idx=None)
input
-----
x: tensor, (batch, length, inputdim)
output
------
y: tensor, (batch, length, out-dim)
When step_idx >= 0, length must be 1, forward(x[:, n:n+1, :], n)
will process the x at the n-th step. The hidden state will be saved
in the buffer and used for n+1 step
"""
if step_idx is None:
# given full context
return self._forward(x)
else:
# step-by-step processing
return self._forwardstep(x, step_idx)
class LSTMLayer(torch_nn.Module):
"""LSTMLayer
There are two modes for forward
1. forward(x) -> process sequence x
2. forward(x[n], n) -> process n-th step of x
Example:
data = torch.randn([2, 10, 3])
m_layer = LSTMLayer(3, 3)
out = m_layer(data)
out_2 = torch.zeros_like(out)
for idx in range(data.shape[1]):
out_2[:, idx:idx+1, :] = m_layer._forwardstep(
data[:, idx:idx+1, :], idx)
"""
def __init__(self, in_size, out_size, flag_bidirec=False):
"""LSTMLayer(in_size, out_size, flag_bidirec=False)
Args
----
in_size: int, dimension of input feature per step
out_size: int, dimension of output feature per step
flag_bidirec: bool, whether this is bi-directional GRU
"""
super(LSTMLayer, self).__init__()
self.m_in_size = in_size
self.m_out_size = out_size
self.m_flag_bidirec = flag_bidirec
self.m_lstm = torch_nn.LSTM(
input_size=in_size, hidden_size=out_size,
batch_first=True,
bidirectional=flag_bidirec)
# for step-by-step generation
self.m_lstmcell = None
self.m_c_buf = None
self.m_h_buf = None
return
def _get_lstm_cell(self):
# dump LSTM layer to LSTM cell for step-by-step generation
self.m_lstmcell = torch_nn.LSTMCell(self.m_in_size, self.m_out_size)
self.m_lstmcell.weight_hh.data = self.m_lstm.weight_hh_l0.data
self.m_lstmcell.weight_ih.data = self.m_lstm.weight_ih_l0.data
self.m_lstmcell.bias_hh.data = self.m_lstm.bias_hh_l0.data
self.m_lstmcell.bias_ih.data = self.m_lstm.bias_ih_l0.data
return
def _forward(self, x):
"""y = _forward(x)
input
-----
x: tensor, (batch, length, inputdim)
output
------
y: tensor, (batch, length, out-dim)
"""
out, hn = self.m_lstm(x)
return out
def _forwardstep(self, x, step_idx):
"""y = _forwardstep(x)
input
-----
x: tensor, (batch, 1, inputdim)
output
------
y: tensor, (batch, 1, out-dim)
"""
if self.m_flag_bidirec:
print("Bi-directional GRU not supported for step-by-step mode")
sys.exit(1)
else:
if step_idx == 0:
# For the 1st time step, prepare the LSTM Cell and buffer
# load weight as LSTMCell
if self.m_lstmcell is None:
self._get_lstm_cell()
# buffer
self.m_c_buf = torch.zeros([x.shape[0], self.m_out_size],
device=x.device, dtype=x.dtype)
self.m_h_buf = torch.zeros_like(self.m_c_buf)
# do generation
self.m_h_buf, self.m_c_buf = self.m_lstmcell(
x[:, 0, :], (self.m_h_buf, self.m_c_buf))
# (batch, dim) -> (batch, 1, dim)
return self.m_h_buf.unsqueeze(1)
def forward(self, x, step_idx=None):
"""y = forward(x, step_idx=None)
input
-----
x: tensor, (batch, length, inputdim)
output
------
y: tensor, (batch, length, out-dim)
When step_idx >= 0, length must be 1, forward(x[:, n:n+1, :], n)
will process the x at the n-th step. The hidden state will be saved
in the buffer and used for n+1 step
"""
if step_idx is None:
# given full context
return self._forward(x)
else:
# step-by-step processing
return self._forwardstep(x, step_idx)
class DropoutForMC(torch_nn.Module):
"""Dropout layer for Bayesian model
THe difference is that we do dropout even in eval stage
"""
def __init__(self, p, dropout_flag=True):
super(DropoutForMC, self).__init__()
self.p = p
self.flag = dropout_flag
return
def forward(self, x):
return torch_nn_func.dropout(x, self.p, training=self.flag)
if __name__ == "__main__":
print("Definition of block NN")
| 61,631 | 36.172497 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/block_resnet_new.py | ##!/usr/bin/env python
"""
ResNet model
Modified based on https://github.com/joaomonteirof/e2e_antispoofing
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import torch.nn.init as torch_init
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
class ResNetBlock1D(torch_nn.Module):
""" ResNetBlock1D(inplane, outplane, dilation=1, stride=1,
kernel=[1, 3, 1], expansion = 4)
Args
----
inplane: int, input feature dimension.
outplane: int, output feature dimension.
dilation: int, convolution dilation
stride: int, stride size
kernel: [int, int, int], the kernel size of the 3 conv layers
expansion: int, ratio for the bottleneck
"""
def __init__(self, inplane, outplane, dilation=1, stride=1,
kernel=[1, 3, 1], expansion = 4, act_type='ReLU'):
super(ResNetBlock1D, self).__init__()
#
self.ins = inplane
self.outs = outplane
self.expansion = expansion
self.hid = self.ins // expansion
dl = dilation
#
# block1 (batch, input_dim, length) -> (batch, hid_dim, length)
pad = self._get_pad(1, dilation, kernel[0])
self.conv1 = torch_nn.Sequential(
torch_nn.Conv1d(self.ins, self.hid, kernel[0], 1, pad, dilation),
torch_nn.BatchNorm1d(self.hid),
self._get_act(act_type))
# block2 (batch, hid_dim, length) -> (batch, hid_dim, length // stride)
pad = self._get_pad(stride, dilation, kernel[1])
self.conv2 = torch_nn.Sequential(
torch_nn.Conv1d(self.hid, self.hid, kernel[1], stride, pad, dl),
torch_nn.BatchNorm1d(self.hid),
self._get_act(act_type))
# block3
pad = self._get_pad(1, dilation, kernel[2])
self.conv3 = torch_nn.Sequential(
torch_nn.Conv1d(self.hid, self.outs, kernel[2], 1, pad, dl),
torch_nn.BatchNorm1d(self.outs))
self.output_act = self._get_act(act_type)
# change input dimension if necessary
if self.ins != self.outs or stride != 1:
pad = self._get_pad(stride, dilation, kernel[1])
self.changeinput = torch_nn.Sequential(
torch_nn.Conv1d(self.ins,self.outs, kernel[1], stride, pad, dl),
torch_nn.BatchNorm1d(self.outs))
else:
self.changeinput = torch_nn.Identity()
return
def _get_act(self, act_type):
if act_type == 'LeakyReLU':
return torch_nn.LeakyReLU()
elif act_type == 'ELU':
return torch_nn.ELU()
elif act_type == 'GELU':
return torch_nn.GELU()
else:
return torch_nn.ReLU()
def _get_pad(self, stride, dilation, kernel):
pad = (dilation * (kernel - 1) + 1 - stride) // 2
return pad
def forward(self, input_data):
"""output = ResNetBlock(input_data)
input: tensor, (batchsize, dimension, length)
output: tensor, (batchsize, dimension, length)
"""
output = self.conv1(input_data)
output = self.conv2(output)
output = self.conv3(output)
output = output + self.changeinput(input_data)
output = self.output_act(output)
return output
class ResNet1D(torch_nn.Module):
"""
"""
def __init__(self, inplane, outplanes, kernels, dilations, strides, ratios,
block_module = ResNetBlock1D, act_type = 'ReLU'):
super(ResNet1D, self).__init__()
#
tmp_ins = [inplane] + outplanes[:-1]
tmp_outs = outplanes
layer_list = []
for indim, outdim, kernel, dilation, stride, expand in zip(
tmp_ins, tmp_outs, kernels, dilations, strides, ratios):
layer_list.append(
block_module(indim, outdim, dilation, stride, kernel,
expand, act_type))
self.m_layers = torch_nn.Sequential(*layer_list)
return
def forward(self, input_data, length_first=True):
""" output = ResNet(input_data, swap_dim=True)
input
-----
input_data: tensor, (batch, input_dim, length),
or (batch, length, input_dim)
length_first: bool, True, this is used when input_data is
(batch, length, input_dim). Otherwise, False
output
------
output_data: tensor, (batch, length, input_dim) if length_first True
else (batch, input_dim, length)
"""
if length_first:
return self.m_layers(input_data.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous()
else:
return self.m_layers(input_data)
if __name__ == "__main__":
print("Implementation of ResNet for 1D signals")
| 5,137 | 32.363636 | 104 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/block_attention.py | ##!/usr/bin/env python
"""
Blocks for attention mechanism
Implementation is based on https://github.com/soobinseo/Transformer-TTS.git,
but code is re-facotrized:
DotScaledAttention and MultiheadAttention are separated.
The former is the core attention softmax(QK^T/sqrt(d))V,
with optional mask to mask dummy query and dummy key-value
that zero-padded due to the varied sequence length in batch
The former further includes the mask due to causal dependency
between output and input
The latter does split-> transform -> DotScaledAtt -> concat -> transform
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2023, Xin Wang"
# ================================================
# DotScaledAttention & multi-head implementation
# ================================================
class DotScaledAttention(torch_nn.Module):
"""DotScaledAttention in Transformer
O = q_mask * softmax( (Q K^\top / \sqrt(d)) k_mask ) V, where
Q: (batch, length1, dimension)
K: (batch, length2, dimension)
V: (batch, length2, dimension2)
k_mask: (batch, length1)
q_mask: (batch, length1)
Example:
l_m1 = DotScaledAttention()
q_data = torch.rand([5, 100, 64])
k_data2 = torch.rand([5, 40, 64])
v_data3 = torch.rand([5, 40, 32])
q_mask = torch.ones([5, 100])
q_mask[0, -4:] = 0
q_mask[1, -5:] = 0
q_mask_bin = q_mask.eq(0)
k_mask = torch.ones([5, 40])
k_mask[0, -4:] = 0
k_mask[1, -5:] = 0
k_mask_bin = k_mask.eq(0)
o1, a1 = l_m1(q_data, k_data2, v_data3, q_mask_bin, k_mask_bin)
# causal
l_m1 = DotScaledAttention(True)
data = torch.rand([5, 100, 64])
q_mask = torch.ones([5, 100])
q_mask_bin = q_mask.eq(0)
o1, a1 = l_m1(data, data, data, q_mask_bin, q_mask_bin)
o1[0, 1] - a1[0, 1, 0] * data[0, 0] - a1[0, 1, 1] * data[0, 1]
"""
def __init__(self, flag_causal=False, dropout=None):
super(DotScaledAttention, self).__init__()
self.flag_causal = flag_causal
if dropout is not None:
self.m_drop = torch_nn.Dropout(p=dropout)
else:
self.m_drop = None
return
def forward(self, Q, K, V, q_mask=None, k_mask=None):
"""O = DotScaledAttention(Q, K, V, q_mask=None, k_mask=None)
O = q_mask * softmax( (Q K^\top / \sqrt(d)) k_mask ) V
input:
------
Q: tensor, (batch, length1, dimension)
K: tensor, (batch, length2, dimension)
V: tensor, (batch, length2, dimension2)
k_mask: None or tensor, (batch, length2)
q_mask: None or tensor, (batch, length1)
output
------
O: tensor, (batch, length1, dimension2)
attn: tensor, (batch, length1, length2), attention matrix
k_mask[i] is a mask for the i-th key/value, k_mask[i, j]==True
indicates that K[i][j] and V[i][j] should be masked.
attention[i][:, j] should be zero
q_mask[i] is a mask for the i-the query, q_mask[i, j]==True
indicates that output O[i][j] should be masked
"""
bsize = Q.shape[0]
feat_dim = Q.shape[-1]
q_len = Q.shape[1]
k_len = K.shape[1]
# Q K^\top
# attn has shape (length1, length2)
attn = torch.bmm(Q, K.transpose(1, 2)) / np.sqrt(feat_dim)
# apply k_mask to mask dummy key/value (by setting attn to 0)
if k_mask is not None:
# (batch, length2) -> (batch, length1, length2) by duplicating
mask_tmp = k_mask.unsqueeze(1).repeat(1, q_len, 1)
# if causal dependency, add diagonal mask
# mask_tmp[:, i, >i] should be True
if self.flag_causal and q_len == k_len:
# length2 must be == length1
# create upper triagle (length1, length1)
tria_tmp = torch.triu(torch.ones_like(mask_tmp[0]), diagonal=1)
# repeat to batch
tria_tmp = tria_tmp.unsqueeze(0).repeat(bsize, 1, 1).gt(0)
# overlap the upper-triangle matrix with the k_mask
mask_tmp = torch.bitwise_or(mask_tmp, tria_tmp)
elif self.flag_causal and q_len == k_len:
# even if no need to mask dummy input, it is necessary to
# mask for causal self-attention
mask_tmp = torch.triu(torch.ones([k_len, k_len]), diagonal=1)
# repeat to batch
mask_tmp = mask_tmp.unsqueeze(0).repeat(bsize, 1, 1).gt(0)
mask_tmp = mask_tmp.to(device=Q.device)
else:
# no k_mask provided, neither is k_mask provided
mask_tmp = None
# mask the attn matrix if necessary
if mask_tmp != None:
attn = attn.masked_fill(mask_tmp, -2 ** 32 +1)
# softmax, over length2 of the (batch, length1, length2)
attn = torch_nn_func.softmax(attn, dim=-1)
# apply q_mask
if q_mask is not None:
# (batch, length1, 1) -> (batch, length1, length2)
mask_tmp = q_mask.unsqueeze(-1).repeat(1, 1, k_len)
# mask query (row) that should be dummy
attn = attn.masked_fill(mask_tmp, 0)
# apply dropout is necessary
if self.m_drop is not None:
attn = self.m_drop(attn)
# o = attn * V
O = torch.bmm(attn, V)
return O, attn
class MultiheadAttention(torch_nn.Module):
"""Multihead Attention in Transformer
V, K, Q -> linear -> split -> DotScaledAttention -> concate -> linear
Q: (batch, lengthQ, feat_dimK)
K: (batch, lengthK, feat_dimK)
V: (batch, lengthK, feat_dimV)
k_mask: (batch, lengthK)
q_mask: (batch, lengthQ)
Example:
q_data = torch.rand([5, 100, 64])
k_data2 = torch.rand([5, 40, 64])
v_data3 = torch.rand([5, 40, 32])
q_mask = torch.ones([5, 100])
q_mask[0, -4:] = 0
q_mask[1, -5:] = 0
q_mask_bin = q_mask.eq(0)
k_mask = torch.ones([5, 40])
k_mask[0, -4:] = 0
k_mask[1, -5:] = 0
k_mask_bin = k_mask.eq(0)
l_m = MultiheadAttention(64, 32, 4)
data_out = l_m.forward(v_data3, k_data2, q_data, k_mask_bin, q_mask_bin)
"""
def __init__(self, feat_dim_k, feat_dim_v, num_head=4,
flag_cat_q=True, flag_causal=False, dropout=None,
with_bias=False, flag_norm_before=False):
"""MultiheadAttention(num_head=4, flag_cat_q=True)
Args
----
feat_dim_k: int, feat_dimension of Query and Key
feat_dim_v: int, feat_dimension of Value
num_head: int, number of heads
flag_cat_q: bool, if true, concate(query, attention's output)
flag_causal: bool, causal dependency in self-attention
with_bias: bool, bias in feedforward layer for multi-head splitting?
(default False)
dropout: float or None, dropout rate on attention matrix
(default None)
flag_norm_before: bool, whether do layer normalize before attention
(default False). If true, the input q, k, and v should
be layerer normed before given to forward()
When flag_causal is True, Q, K, V must have same temporal length
"""
super(MultiheadAttention, self).__init__()
# log information
self.flag_causal = flag_causal
self.num_head = num_head
# feedforward layers
if feat_dim_k % self.num_head > 0 or feat_dim_v % self.num_head > 0:
print("feat_dim_k cannot be divided by num_head")
sys.exit(1)
self.m_q_fc = torch_nn.Linear(feat_dim_k, feat_dim_k, bias=with_bias)
self.m_k_fc = torch_nn.Linear(feat_dim_k, feat_dim_k, bias=with_bias)
self.m_v_fc = torch_nn.Linear(feat_dim_v, feat_dim_v, bias=with_bias)
torch_nn.init.xavier_uniform_(
self.m_q_fc.weight, gain=torch_nn.init.calculate_gain('linear'))
torch_nn.init.xavier_uniform_(
self.m_k_fc.weight, gain=torch_nn.init.calculate_gain('linear'))
torch_nn.init.xavier_uniform_(
self.m_v_fc.weight, gain=torch_nn.init.calculate_gain('linear'))
# core attention
self.m_attn = DotScaledAttention(self.flag_causal, dropout)
# dropout
if dropout is not None:
self.m_drop = torch_nn.Dropout(p=dropout)
else:
self.m_drop = None
# output linear layer
self.flag_cat_q = flag_cat_q
if self.flag_cat_q:
self.m_output = torch_nn.Linear(feat_dim_k+feat_dim_v, feat_dim_v)
else:
self.m_output = torch_nn.Linear(feat_dim_v, feat_dim_v)
torch_nn.init.xavier_uniform_(
self.m_output.weight, gain=torch_nn.init.calculate_gain('linear'))
#
self.m_layernorm = torch_nn.LayerNorm(feat_dim_v)
self.flag_norm_before = flag_norm_before
if feat_dim_k != feat_dim_v:
print("Warning: query/key and value differ in feature dimensions.")
print("Residual connection will not be used")
return
def forward(self, value, key, query, k_mask=None, q_mask=None):
"""O, attn = MultiheadAttention(value, key, query, k_mask, q_mask)
input:
------
Q: (batch, lengthQ, feat_dimK)
K: (batch, lengthK, feat_dimK)
V: (batch, lengthK, feat_dimV)
k_mask: None or tensor, (batch, length2)
q_mask: None or tensor, (batch, length1)
output
------
O: tensor, (batch, length1, dimension2)
attn: tensor, (batch, length1, length2), attention matrix
k_mask[i] is a mask for the i-th key/value, k_mask[i, j]==True
indicates that K[i][j] and V[i][j] should be masked.
attention[i][:, j] should be zero
q_mask[i] is a mask for the i-the query, q_mask[i, j]==True
indicates that output O[i][j] should be masked
"""
bsize = value.size(0)
k_len = key.size(1)
q_len = query.size(1)
if self.flag_causal and k_len != q_len:
print("Causal Attention, Q,V,K must have same length in time")
sys.exit(1)
# transform and split the input Q, K, V
def _trans_split(data_mat, trans_func, head):
bsize, length, dim = data_mat.shape
# (batch, length, feat_dim) -> (batch, length, feat_dimV)
# -> (batch, lengthK, num_head, feat_dimV / num_head)
tmp_mat = trans_func(data_mat).view(bsize, length, head, -1)
# -> ( num_head, batch, lengthK, feat_dimV / num_head)
tmp_mat = tmp_mat.permute(2, 0, 1, 3).contiguous()
# -> ( num_head * batch, lengthK, feat_dimV / num_head)
tmp_mat = tmp_mat.view(-1, length, tmp_mat.shape[-1])
return tmp_mat
value_mul = _trans_split(value, self.m_v_fc, self.num_head)
key_mul = _trans_split(key, self.m_k_fc, self.num_head)
query_mul = _trans_split(query, self.m_q_fc, self.num_head)
# duplicate masks to multi heads
if q_mask is not None:
q_mask_tmp = q_mask.repeat(self.num_head, 1)
else:
q_mask_tmp = None
if k_mask is not None:
k_mask_tmp = k_mask.repeat(self.num_head, 1)
else:
k_mask_tmp = None
# attention and sum
o_mul, attn = self.m_attn(query_mul, key_mul, value_mul,
q_mask_tmp, k_mask_tmp)
# recover it back
# ( num_head * batch, lengthQ, feat_dimV / num_head) ->
# ( num_head, batch, lengthQ, feat_dimV / num_head) ->
o_mul = o_mul.view(self.num_head, bsize, q_len, -1)
# -> ( batch, lengthQ, feat_dimV)
o_mat = o_mul.permute(1, 2, 0, 3).contiguous().view(bsize, q_len, -1)
# concatenate the input query and output of attention if necessary
if self.flag_cat_q:
# (batch, lengthQ, feat_dimQ + feat_dimV)
o_mat = torch.cat([o_mat, query], dim=-1)
# linear
o_mat = self.m_output(o_mat)
# dropout
if self.m_drop:
o_mat = self.m_drop(o_mat)
# residual & layer norm
if o_mat.shape[-1] == query.shape[-1]:
o_mat = o_mat + query
# layer normalize after
if not self.flag_norm_before:
o_mat = self.m_layernorm(o_mat)
return o_mat, attn
# ====================
# misc
# ====================
def position_encoding(n_pos, n_dim, padding_idx=None):
"""Position encoding in Transformer
input:
------
n_pos: int, pos, number of possible positions
n_dim: int, n_dim//2 = i, number of hidden dimensions
output:
------
sin_tab: np.array, (n_pos, n_dim)
sin_tab[n, 2i] = sin(n / 10000 ^ (2i / n_dim))
sin_tab[n, 2i+1] = cos(n / 10000 ^ (2i / n_dim))
Example:
data = position_encoding(1024, 512, 0)
"""
# make sure that n_dim is an even number
if n_dim % 2 > 0:
print("position_encoding: n_dim should be an even number")
sys.exit(1)
# create the table
sin_tab = np.zeros([n_pos, n_dim])
for idx in np.arange(n_dim // 2):
# period: 10000 ^ (2i / n_dim)
pd = np.power(10000, 2 * idx / n_dim)
# sin(n / 10000 ^ (2i / n_dim))
sin_tab[:, 2 * idx] = np.sin( np.arange(n_pos) / pd)
# cos(n / 10000 ^ ((2i+1) / n_dim))
sin_tab[:, 2 * idx+1] = np.cos( np.arange(n_pos) / pd)
# remove the dummy positioning encoding
if padding_idx is not None:
sin_tab[padding_idx] = 0
return sin_tab
class FeedforwardBlock(torch_nn.Module):
"""Feedforward block in Transformer
"""
def __init__(self, feat_dim):
super(FeedforwardBlock, self).__init__()
self.m_block = torch_nn.Sequential(
torch_nn.Linear(feat_dim, feat_dim * 4),
torch_nn.ReLU(),
torch_nn.Linear(feat_dim * 4, feat_dim)
#torch_nn.Dropout(p=0.1)
)
self.m_layernorm = torch_nn.LayerNorm(feat_dim)
# initialization
torch_nn.init.xavier_uniform_(
self.m_block[0].weight, gain=torch_nn.init.calculate_gain('relu'))
torch_nn.init.xavier_uniform_(
self.m_block[2].weight, gain=torch_nn.init.calculate_gain('linear'))
return
def forward(self, feat):
""" out = FeedforwardBlock(feat)
input
-----
feat: tensor, (batch, length, feat_dim)
output
------
output: tensor, (batch, length, feat_dim)
"""
return self.m_layernorm(self.m_block(feat) + feat)
class FeedforwardBlockv2(torch_nn.Module):
"""Feedforward block in Transformer
"""
def __init__(self, feat_dim, dropout=0.0, flag_norm_before=False):
super(FeedforwardBlockv2, self).__init__()
self.m_block = torch_nn.Sequential(
torch_nn.Linear(feat_dim, feat_dim * 4),
torch_nn.ReLU(),
torch_nn.Dropout(p=dropout),
torch_nn.Linear(feat_dim * 4, feat_dim)
)
self.flag_norm_before = flag_norm_before
self.m_layernorm = torch_nn.LayerNorm(feat_dim)
# initialization
torch_nn.init.xavier_uniform_(
self.m_block[0].weight, gain=torch_nn.init.calculate_gain('relu'))
torch_nn.init.xavier_uniform_(
self.m_block[-1].weight, gain=torch_nn.init.calculate_gain('linear'))
return
def forward(self, feat):
""" out = FeedforwardBlock(feat)
input
-----
feat: tensor, (batch, length, feat_dim)
output
------
output: tensor, (batch, length, feat_dim)
"""
if not self.flag_norm_before:
return self.m_layernorm(self.m_block(feat) + feat)
else:
return self.m_block(feat) + feat
if __name__ == "__main__":
print("block_attention.py")
| 16,852 | 33.464213 | 81 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/block_wavenet.py | #!/usr/bin/env python
"""
model.py for WaveNet
version: 1
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import time
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
import core_scripts.other_tools.display as nii_warn
import sandbox.block_nn as nii_nn
import sandbox.block_dist as nii_dist
import sandbox.util_dsp as nii_dsp
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
##############
#
class CondModule(torch_nn.Module):
""" Conditiona module: upsample and transform input features
"""
def __init__(self, input_dim, output_dim, up_sample, \
blstm_s = 64, cnn_kernel_s = 3):
""" CondModule(input_dim, output_dim, up_sample,
blstm_s=64, cnn_kernel_s=3)
Args
----
input_dim: int, input tensor should be (batchsize, len1, input_dim)
output_dim: int, output tensor will be (batchsize, len2, output_dim)
up_sample: int, up-sampling rate, len2 = len1 * up_sample
blstm_s: int, layer size of the Bi-LSTM layer
cnn_kernel_s: int, kernel size of the conv1d
"""
super(CondModule, self).__init__()
# configurations
self.input_dim = input_dim
self.output_dim = output_dim
self.up_sample = up_sample
self.blstm_s = blstm_s
self.cnn_kernel_s = cnn_kernel_s
# layers
self.l_blstm = nii_nn.BLSTMLayer(input_dim, self.blstm_s)
self.l_conv1d = nii_nn.Conv1dKeepLength(
self.blstm_s, output_dim, 1, self.cnn_kernel_s)
self.l_upsamp = nii_nn.UpSampleLayer(
self.output_dim, self.up_sample, True)
def forward(self, feature):
""" transformed_feat = forward(input_feature)
input
-----
feature: (batchsize, length, input_dim)
output
------
transformed_feat: tensor (batchsize, length*up_sample, out_dim)
"""
return self.l_upsamp(self.l_conv1d(self.l_blstm(feature)))
class WaveNetBlock(torch_nn.Module):
""" WaveNet block based on dilated-1D, gated-activation, and skip-connect.
Based on http://tonywangx.github.io/slide.html#misc CURRENNT WaveNet,
page 19-31.
"""
def __init__(self, input_dim, skip_ch_dim, gated_act_dim, cond_dim,
dilation_size, cnn_kernel_size=2, causal=True):
""" WaveNetBlock(input_dim, skip_ch_dim, gated_act_dim, cond_dim,
dilation_size, cnn_kernel_size = 2)
Args
----
input_dim: int, input tensor should be (batch-size, length, input_dim)
this is the dimension of residual channel
skip_ch_dim: int, tensors to be send to output blocks is in shape
(batch-size, length, skip_ch_dim)
gated_act_dim: int, tensors given by tanh(.) * sig(.) is in shape
(batch-size, length, gated_act_dim)
cond_dim: int, conditional feature (batchsize, length, cond_dim)
dilation_size: int, dilation size of the conv
cnn_kernel_size: int, kernel size of dilated conv1d (default, 2)
causal: bool, whether this block is used in AR model (default, True)
Note that causal==False will raise error if step-by-step generation
is conducted by inference(input_feat, cond_feat, step_idx) with
step_idx != None.
For causal==False, always use inference(input_feat, cond_feat, None)
"""
super(WaveNetBlock, self).__init__()
#####
# configurations
#####
# input tensor: (batchsize, length, self.input_dim)
self.input_dim = input_dim
# tensor sent to next WaveNetBlock, same shape as input
self.res_ch_dim = input_dim
#
self.skip_ch_dim = skip_ch_dim
self.gated_act_dim = gated_act_dim
self.cond_dim = cond_dim
self.dilation_size = dilation_size
self.conv_kernel_s = cnn_kernel_size
######
# layers
######
# dilated convolution
self.l_conv1d = nii_nn.Conv1dForARModel(
self.input_dim, self.gated_act_dim * 2, self.dilation_size,
self.conv_kernel_s, tanh=False)
# condition feature transform
self.l_cond_trans = torch_nn.Sequential(
torch_nn.Linear(self.cond_dim, self.gated_act_dim*2),
torch_nn.LeakyReLU())
# transformation after gated act
self.l_res_trans = torch_nn.Linear(self.gated_act_dim, self.res_ch_dim)
# transformation for skip channels
self.l_skip_trans = torch_nn.Linear(self.res_ch_dim, self.skip_ch_dim)
return
def _forward(self, input_feat, cond_feat, step_idx=None):
""" res_feat, skip_feat = forward(input_feat, cond_feat)
input
-----
input_feat: input feature tensor, (batchsize, length, input_dim)
cond_feat: condition feature tensor, (batchsize, length, cond_dim)
step_idx: None: tranining phase
int: idx of the time step during step-by-step generation
output
------
res_feat: residual channel feat tensor, (batchsize, length, input_dim)
skip_feat: skip channel feat tensor, , (batchsize, length, skip_dim)
"""
# dilated 1d convolution
hid = self.l_conv1d(input_feat, step_idx)
# transform and add condition feature
hid = hid + self.l_cond_trans(cond_feat)
# gated activation
hid = torch.tanh(hid[:, :, 0:self.gated_act_dim]) \
* torch.sigmoid(hid[:, :, self.gated_act_dim:])
# res-channel transform
res_feat = self.l_res_trans(hid) + input_feat
# skip-channel transform
skip_feat = self.l_skip_trans(res_feat)
# done
return res_feat, skip_feat
def forward(self, input_feat, cond_feat):
""" res_feat, skip_feat = forward(input_feat, cond_feat)
input
-----
input_feat: input feature tensor, (batchsize, length, input_dim)
cond_feat: condition feature tensor, (batchsize, length, cond_dim)
output
------
res_feat: residual channel feat tensor, (batchsize, length, input_dim)
skip_feat: skip channel feat tensor, , (batchsize, length, skip_dim)
Note that input_dim refers to the residual channel dimension.
Thus, input_feat should be embedding(audio), not audio.
"""
return self._forward(input_feat, cond_feat)
def inference(self, input_feat, cond_feat, step_idx):
""" res_feat, skip_feat = inference(input_feat, cond_feat, step_idx)
input
-----
input_feat: input feature tensor, (batchsize, length, input_dim)
cond_feat: condition feature tensor, (batchsize, length, cond_dim)
step_idx: int, idx of the time step during step-by-step generation
output
------
res_feat: residual channel feat tensor, (batchsize, length, input_dim)
skip_feat: skip channel feat tensor, , (batchsize, length, skip_dim)
"""
return self._forward(input_feat, cond_feat, step_idx)
class WaveNetBlock_v2(torch_nn.Module):
""" WaveNet block based on dilated-1D, gated-activation, and skip-connect.
Based on http://tonywangx.github.io/slide.html#misc CURRENNT WaveNet
(page 19-31) and WN in Pytorch WaveGlow.
The difference from WaveNetBlock
1. weight_norm
2. skip_channel is computed from gated-activation's output, not res_channel
"""
def __init__(self, input_dim, skip_ch_dim, gated_act_dim, cond_dim,
dilation_size, cnn_kernel_size=2, causal=True):
""" WaveNetBlock(input_dim, skip_ch_dim, gated_act_dim, cond_dim,
dilation_size, cnn_kernel_size = 2)
Args
----
input_dim: int, input tensor should be (batch-size, length, input_dim)
skip_ch_dim: int, tensors to be send to output blocks is in shape
(batch-size, length, skip_ch_dim)
gated_act_dim: int, tensors given by tanh(.) * sig(.) is in shape
(batch-size, length, gated_act_dim)
cond_dim: int, conditional feature (batchsize, length, cond_dim)
dilation_size: int, dilation size of the conv
cnn_kernel_size: int, kernel size of dilated conv1d (default, 2)
causal: bool, whether this block is used for AR model (default, True)
Note that when causal == False, step-by-step generation using step_index
will raise error.
"""
super(WaveNetBlock_v2, self).__init__()
#####
# configurations
#####
# input tensor: (batchsize, length, self.input_dim)
self.input_dim = input_dim
# tensor sent to next WaveNetBlock, same shape as input
self.res_ch_dim = input_dim
#
self.skip_ch_dim = skip_ch_dim
self.gated_act_dim = gated_act_dim
self.cond_dim = cond_dim
self.dilation_size = dilation_size
self.conv_kernel_s = cnn_kernel_size
######
# layers
######
# dilated convolution
tmp_layer = nii_nn.Conv1dForARModel(
self.input_dim, self.gated_act_dim * 2, self.dilation_size,
self.conv_kernel_s, tanh=False, causal = causal)
self.l_conv1d = torch.nn.utils.weight_norm(tmp_layer, name='weight')
# condition feature transform
tmp_layer = torch_nn.Linear(self.cond_dim, self.gated_act_dim*2)
self.l_cond_trans = torch.nn.utils.weight_norm(tmp_layer, name='weight')
# transformation after gated act
tmp_layer = torch_nn.Linear(self.gated_act_dim, self.res_ch_dim)
self.l_res_trans = torch.nn.utils.weight_norm(tmp_layer, name='weight')
# transformation for skip channels
#tmp_layer = torch_nn.Linear(self.res_ch_dim, self.skip_ch_dim)
tmp_layer = torch_nn.Linear(self.gated_act_dim, self.skip_ch_dim)
self.l_skip_trans = torch.nn.utils.weight_norm(tmp_layer, name='weight')
return
def _forward(self, input_feat, cond_feat, step_idx=None):
""" res_feat, skip_feat = forward(input_feat, cond_feat)
input
-----
input_feat: input feature tensor, (batchsize, length, input_dim)
cond_feat: condition feature tensor, (batchsize, length, cond_dim)
step_idx: None: tranining phase
int: idx of the time step during step-by-step generation
output
------
res_feat: residual channel feat tensor, (batchsize, length, input_dim)
skip_feat: skip channel feat tensor, , (batchsize, length, skip_dim)
"""
# dilated 1d convolution, add condition feature
hid = self.l_conv1d(input_feat, step_idx) + self.l_cond_trans(cond_feat)
# gated activation
hid = torch.tanh(hid[:, :, 0:self.gated_act_dim]) \
* torch.sigmoid(hid[:, :, self.gated_act_dim:])
# res-channel transform
res_feat = self.l_res_trans(hid) + input_feat
# skip-channel transform
# if we use skip_feat = self.l_skip_trans(res_feat), this cause
# exploding output when using skip_feat to produce scale and bias
# of affine transformation (e.g., in WaveGlow)
skip_feat = self.l_skip_trans(hid)
# done
return res_feat, skip_feat
def forward(self, input_feat, cond_feat):
""" res_feat, skip_feat = forward(input_feat, cond_feat)
input
-----
input_feat: input feature tensor, (batchsize, length, input_dim)
cond_feat: condition feature tensor, (batchsize, length, cond_dim)
output
------
res_feat: residual channel feat tensor, (batchsize, length, input_dim)
skip_feat: skip channel feat tensor, , (batchsize, length, skip_dim)
"""
return self._forward(input_feat, cond_feat)
def inference(self, input_feat, cond_feat, step_idx):
""" res_feat, skip_feat = inference(input_feat, cond_feat, step_idx)
input
-----
input_feat: input feature tensor, (batchsize, length, input_dim)
cond_feat: condition feature tensor, (batchsize, length, cond_dim)
step_idx: int, idx of the time step during step-by-step generation
output
------
res_feat: residual channel feat tensor, (batchsize, length, input_dim)
skip_feat: skip channel feat tensor, , (batchsize, length, skip_dim)
"""
return self._forward(input_feat, cond_feat, step_idx)
class OutputBlock(torch_nn.Module):
"""Output block to produce waveform distribution given skip-channel features
"""
def __init__(self, input_dim, output_dim, hid_dim=512):
""" OutputBlock(input_dim, output_dim)
Args
----
input_dim: int, input tensor should be (batchsize, length, input_dim)
it should be the sum of skip-channel features
output_dim: int, output tensor will be (batchsize, length, output_dim)
hid_dim: int, dimension of intermediate linear layers
"""
super(OutputBlock, self).__init__()
# config
self.input_dim = input_dim
self.output_dim = output_dim
self.hid_dim = hid_dim
# transformation layers before softmax
self.l_trans = torch_nn.Sequential(
torch_nn.Linear(self.input_dim, self.hid_dim // 2),
torch_nn.LeakyReLU(),
torch_nn.Linear(self.hid_dim // 2, self.hid_dim),
torch_nn.LeakyReLU(),
torch_nn.Linear(self.hid_dim, self.output_dim))
# output distribution
self.l_dist = nii_dist.DistCategorical(self.output_dim)
return
def forward(self, input_feat, target):
"""loss = forward(input_feat, target)
This method is supposed to be used to compute the loss
input
-----
input_feat: tensor in shape (batchsize, length, input_dim)
target: waveform tensor in shape (batchsize, length, dim=1)
output
------
loss: tensor or scalar
"""
# transform hidden feature vector to logit
tmp_logit = self.l_trans(input_feat)
# calculate the likelihood
return self.l_dist(tmp_logit, target)
def inference(self, input_feat):
"""output = inference(input_feat)
input
-----
input_feat: tensor in shape (batchsize, length, input_dim)
output
------
target: waveform tensor in shape (batchsize, length, dim=1)
"""
# transform hidden feature vector to logit
tmp_logit = self.l_trans(input_feat)
return self.l_dist.inference(tmp_logit)
################################
## Example of WaveNet definition
################################
class WaveNet_v1(torch_nn.Module):
""" Model definition of WaveNet
Example definition of WaveNet, version 1
"""
def __init__(self, in_dim, up_sample_rate, num_bits = 10, wnblock_ver=1,
pre_emphasis=True):
""" WaveNet(in_dim, up_sample_rate, num_bits=10, wnblock_ver=1,
pre_emphasis=False)
Args
----
in_dim: int, dimension of condition feature (batch, length, in_dim)
up_sample_rate, int, condition feature will be up-sampled by
using this rate
num_bits: int, number of bits for mu-law companding, default 10
wnblock_ver: int, version of the WaveNet Block, default 1
wnblock_ver = 1 uses WaveNetBlock
wnblock_ver = 2 uses WaveNetBlock_v2
pre_emphasis: bool, whether use pre-emphasis on the target waveform
up_sample_rate can be calculated using frame_shift of condition feature
and waveform sampling rate. For example, 16kHz waveform, condition
feature (e.g., Mel-spectrogram) extracted using 5ms frame shift, then
up_sample_rate = 16000 * 0.005 = 80. In other words, every frame will
be replicated 80 times.
"""
super(WaveNet_v1, self).__init__()
#################
## model config
#################
# number of bits for mu-law
self.num_bits = num_bits
self.num_classes = 2 ** self.num_bits
# up-sample rate
self.up_sample = up_sample_rate
# wavenet blocks
# residual channel dim
self.res_ch_dim = 64
# gated activate dim
self.gate_act_dim = 64
# condition feature dim
self.cond_dim = 64
# skip channel dim
self.skip_ch_dim = 256
# dilation size
self.dilations = [2 ** (x % 10) for x in range(30)]
# input dimension of (conditional feature)
self.input_dim = in_dim
# version of wavenet block
self.wnblock_ver = wnblock_ver
# whether pre-emphasis
self.pre_emphasis = pre_emphasis
###############
## network definition
###############
# condition module
self.l_cond = CondModule(self.input_dim, self.cond_dim, self.up_sample)
# waveform embedding layer
self.l_wav_emb = torch_nn.Embedding(self.num_classes, self.res_ch_dim)
# dilated convolution layers
tmp_wav_blocks = []
for dilation in self.dilations:
if self.wnblock_ver == 2:
tmp_wav_blocks.append(
WaveNetBlock_v2(
self.res_ch_dim, self.skip_ch_dim, self.gate_act_dim,
self.cond_dim, dilation))
else:
tmp_wav_blocks.append(
WaveNetBlock(
self.res_ch_dim, self.skip_ch_dim, self.gate_act_dim,
self.cond_dim, dilation))
self.l_wavenet_blocks = torch_nn.ModuleList(tmp_wav_blocks)
# output block
self.l_output = OutputBlock(self.skip_ch_dim, self.num_classes)
# done
return
def _waveform_encode_target(self, target_wav):
return nii_dsp.mulaw_encode(target_wav, self.num_classes)
def _waveform_decode_target(self, gen_wav):
return nii_dsp.mulaw_decode(gen_wav, self.num_classes)
def forward(self, input_feat, wav):
"""loss = forward(self, input_feat, wav)
input
-----
input_feat: tensor, input features (batchsize, length1, input_dim)
wav: tensor, target waveform (batchsize, length2, 1)
it should be raw waveform, flot valued, between (-1, 1)
it will be companded using mu-law automatically
output
------
loss: tensor / scalar
Note: returned loss can be directly used as the loss value
no need to write Loss()
"""
# step1. prepare the target waveform and feedback waveform
# do mu-law companding
# shifting by 1 time step for feedback waveform
with torch.no_grad():
if self.pre_emphasis:
wav[:, 1:, :] = wav[:, 1:, :] - 0.97 * wav[:, 0:-1, :]
wav = wav.clamp(-1, 1)
# mu-law companding (int values)
# note that _waveform_encoder_target will produce int values
target_wav = self._waveform_encode_target(wav)
# feedback wav
fb_wav = torch.zeros(
target_wav.shape, device=wav.device, dtype=target_wav.dtype)
fb_wav[:, 1:] = target_wav[:, :-1]
# step2. condition feature
hid_cond = self.l_cond(input_feat)
# step3. feedback waveform embedding
hid_wav_emb = self.l_wav_emb(fb_wav.squeeze(-1))
# step4. stacks of wavenet
# buffer to save skip-channel features
skip_ch_feat = torch.zeros(
[target_wav.shape[0],target_wav.shape[1], self.skip_ch_dim],
device=input_feat.device, dtype=input_feat.dtype)
res_ch_feat = hid_wav_emb
for l_wavblock in self.l_wavenet_blocks:
res_ch_feat, tmp_skip_ch_feat = l_wavblock(res_ch_feat, hid_cond)
skip_ch_feat += tmp_skip_ch_feat
# step5. get output
likelihood = self.l_output(skip_ch_feat, target_wav)
return likelihood
def inference(self, input_feat):
"""wav = inference(mels)
input
-----
input_feat: tensor, input features (batchsize, length1, input_dim)
output
------
wav: tensor, target waveform (batchsize, length2, 1)
Note: length2 will be = length1 * self.up_sample
"""
# prepare
batchsize = input_feat.shape[0]
wavlength = input_feat.shape[1] * self.up_sample
time_idx_marker = wavlength // 10
#
# step1. condition features
hid_cond = self.l_cond(input_feat)
# step2. do computation step-by-step
# initialzie the buffer
gen_wav_buf = torch.zeros(
[batchsize, wavlength, 1],
dtype=input_feat.dtype, device=input_feat.device)
fb_wav_buf = torch.zeros(
[batchsize, 1, 1],
dtype=input_feat.dtype, device=input_feat.device)
skip_ch_feat = torch.zeros(
[batchsize, 1, self.skip_ch_dim],
dtype=input_feat.dtype, device=input_feat.device)
# loop over all time steps
print("Total time steps: {:d}. Progress: ".format(wavlength),
end=' ', flush=True)
for time_idx in range(wavlength):
# show messages
if time_idx % 500 == 1:
print(time_idx, end=' ', flush=True)
# feedback
if time_idx > 0:
fb_wav_buf = gen_wav_buf[:, time_idx-1:time_idx, :]
# initialize skip
skip_ch_feat *= 0
# embedding
hid_wav_emb = self.l_wav_emb(fb_wav_buf.squeeze(-1).to(torch.int64))
# condition feature for current time step
# for other time steps, intermediate feat is saved by wave blocks
hid_cond_tmp = hid_cond[:, time_idx:time_idx+1, :]
# loop over wavblocks
res_ch_feat = hid_wav_emb
for l_wavblock in self.l_wavenet_blocks:
res_ch_feat, tmp_skip_ch_feat = l_wavblock.inference(
res_ch_feat, hid_cond_tmp, time_idx)
skip_ch_feat += tmp_skip_ch_feat
# draw sample
drawn_sample = self.l_output.inference(skip_ch_feat)
gen_wav_buf[:, time_idx:time_idx+1, :] = drawn_sample
# decode mu-law
wave = self._waveform_decode_target(gen_wav_buf)
# de-emphasis if necessary
if self.pre_emphasis:
for idx in range(wave.shape[1] - 1):
wave[:, idx+1, :] = wave[:, idx+1, :] + 0.97 * wave[:, idx, :]
return wave
if __name__ == "__main__":
print("Definition of model")
| 23,685 | 34.887879 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/eval_asvspoof.py | #!/usr/bin/env python
"""
Functions for evaluation - asvspoof and related binary classification tasks
Python Function from min tDCF on asvspoof.org
All functions before tDCF_wrapper are licensed by Creative Commons.
----- License ----
This work is licensed under the Creative Commons
Attribution-NonCommercial-ShareAlike 4.0 International
License. To view a copy of this license, visit
http://creativecommons.org/licenses/by-nc-sa/4.0/
or send a letter to
Creative Commons, 444 Castro Street, Suite 900,
Mountain View, California, 94041, USA.
------------------
"""
from __future__ import print_function
import os
import sys
import numpy as np
import core_scripts.data_io.io_tools as nii_io
class CustomDict:
def __init__(self, missing_value=-1):
self.databuf = {}
self.misval = missing_value
return
def __setitem__(self, key, value):
self.databuf[key] = value
return
def __getitem__(self, key):
if key in self.databuf:
return self.databuf[key]
else:
return self.misval
def keys(self):
return self.databuf.keys()
def protocol_parse_asvspoof2019(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = CustomDict()
if len(protocol_filepath) and os.path.isfile(protocol_filepath):
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
else:
print("Cannot load {:s}".format(protocol_filepath))
print("Use an empty dictionary")
return data_buffer
def protocol_parse_general(protocol_filepaths, sep=' '):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = CustomDict()
if type(protocol_filepaths) is str:
tmp = [protocol_filepaths]
else:
tmp = protocol_filepaths
for protocol_filepath in tmp:
if len(protocol_filepath) and os.path.isfile(protocol_filepath):
with open(protocol_filepath, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip('\n')
cols = line.split(sep)
if cols[-1] == 'bonafide':
data_buffer[cols[1]] = 1
else:
data_buffer[cols[1]] = 0
return data_buffer
def protocol_parse_attack_label_asvspoof2019(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> attack type
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-2] == '-':
data_buffer[row[1]] = 'bonafide'
else:
data_buffer[row[1]] = row[-2]
return data_buffer
def obtain_asv_error_rates(tar_asv, non_asv, spoof_asv, asv_threshold):
# False alarm and miss rates for ASV
Pfa_asv = sum(non_asv >= asv_threshold) / non_asv.size
Pmiss_asv = sum(tar_asv < asv_threshold) / tar_asv.size
# Rate of rejecting spoofs in ASV
if spoof_asv.size == 0:
Pmiss_spoof_asv = None
Pfa_spoof_asv = None
else:
Pmiss_spoof_asv = np.sum(spoof_asv < asv_threshold) / spoof_asv.size
Pfa_spoof_asv = np.sum(spoof_asv >= asv_threshold) / spoof_asv.size
return Pfa_asv, Pmiss_asv, Pmiss_spoof_asv, Pfa_spoof_asv
def compute_det_curve(target_scores, nontarget_scores):
n_scores = target_scores.size + nontarget_scores.size
all_scores = np.concatenate((target_scores, nontarget_scores))
labels = np.concatenate((np.ones(target_scores.size),
np.zeros(nontarget_scores.size)))
# Sort labels based on scores
indices = np.argsort(all_scores, kind='mergesort')
labels = labels[indices]
# Compute false rejection and false acceptance rates
tar_trial_sums = np.cumsum(labels)
nontarget_trial_sums = (nontarget_scores.size -
(np.arange(1, n_scores + 1) - tar_trial_sums))
frr = np.concatenate((np.atleast_1d(0), tar_trial_sums/target_scores.size))
# false rejection rates
far = np.concatenate((np.atleast_1d(1),
nontarget_trial_sums / nontarget_scores.size))
# false acceptance rates
thresholds = np.concatenate((np.atleast_1d(all_scores[indices[0]] - 0.001),
all_scores[indices]))
# Thresholds are the sorted scores
return frr, far, thresholds
def compute_eer(target_scores, nontarget_scores):
""" Returns equal error rate (EER) and the corresponding threshold. """
frr, far, thresholds = compute_det_curve(target_scores, nontarget_scores)
abs_diffs = np.abs(frr - far)
min_index = np.argmin(abs_diffs)
eer = np.mean((frr[min_index], far[min_index]))
return eer, thresholds[min_index]
def compute_tDCF_legacy(
bonafide_score_cm, spoof_score_cm,
Pfa_asv, Pmiss_asv, Pmiss_spoof_asv, cost_model, print_cost=False):
"""
Compute Tandem Detection Cost Function (t-DCF) [1] for a fixed ASV system.
In brief, t-DCF returns a detection cost of a cascaded system of this form,
Speech waveform -> [CM] -> [ASV] -> decision
where CM stands for countermeasure and ASV for automatic speaker
verification. The CM is therefore used as a 'gate' to decided whether or
not the input speech sample should be passed onwards to the ASV system.
Generally, both CM and ASV can do detection errors. Not all those errors
are necessarily equally cost, and not all types of users are necessarily
equally likely. The tandem t-DCF gives a principled with to compare
different spoofing countermeasures under a detection cost function
framework that takes that information into account.
INPUTS:
bonafide_score_cm A vector of POSITIVE CLASS (bona fide or human)
detection scores obtained by executing a spoofing
countermeasure (CM) on some positive evaluation trials
trial represents a bona fide case.
spoof_score_cm A vector of NEGATIVE CLASS (spoofing attack)
detection scores obtained by executing a spoofing
CM on some negative evaluation trials.
Pfa_asv False alarm (false acceptance) rate of the ASV
system that is evaluated in tandem with the CM.
Assumed to be in fractions, not percentages.
Pmiss_asv Miss (false rejection) rate of the ASV system that
is evaluated in tandem with the spoofing CM.
Assumed to be in fractions, not percentages.
Pmiss_spoof_asv Miss rate of spoof samples of the ASV system that
is evaluated in tandem with the spoofing CM. That
is, the fraction of spoof samples that were
rejected by the ASV system.
cost_model A struct that contains the parameters of t-DCF,
with the following fields.
Ptar Prior probability of target speaker.
Pnon Prior probability of nontarget speaker
(zero-effort impostor)
Psoof Prior probability of spoofing attack.
Cmiss_asv Cost of ASV falsely rejecting target.
Cfa_asv Cost of ASV falsely accepting nontarget.
Cmiss_cm Cost of CM falsely rejecting target.
Cfa_cm Cost of CM falsely accepting spoof.
print_cost Print a summary of the cost parameters and the
implied t-DCF cost function?
OUTPUTS:
tDCF_norm Normalized t-DCF curve across the different CM
system operating points; see [2] for more details.
Normalized t-DCF > 1 indicates a useless
countermeasure (as the tandem system would do
better without it). min(tDCF_norm) will be the
minimum t-DCF used in ASVspoof 2019 [2].
CM_thresholds Vector of same size as tDCF_norm corresponding to
the CM threshold (operating point).
NOTE:
o In relative terms, higher detection scores values are assumed to
indicate stronger support for the bona fide hypothesis.
o You should provide real-valued soft scores, NOT hard decisions. The
recommendation is that the scores are log-likelihood ratios (LLRs)
from a bonafide-vs-spoof hypothesis based on some statistical model.
This, however, is NOT required. The scores can have arbitrary range
and scaling.
o Pfa_asv, Pmiss_asv, Pmiss_spoof_asv are in fractions, not percentages.
References:
[1] T. Kinnunen, K.-A. Lee, H. Delgado, N. Evans, M. Todisco,
M. Sahidullah, J. Yamagishi, D.A. Reynolds: "t-DCF: a Detection
Cost Function for the Tandem Assessment of Spoofing Countermeasures
and Automatic Speaker Verification", Proc. Odyssey 2018: the
Speaker and Language Recognition Workshop, pp. 312--319,
Les Sables d'Olonne,
France, June 2018
https://www.isca-speech.org/archive/Odyssey_2018/pdfs/68.pdf)
[2] ASVspoof 2019 challenge evaluation plan
TODO: <add link>
"""
# Sanity check of cost parameters
if cost_model['Cfa_asv'] < 0 or cost_model['Cmiss_asv'] < 0 or \
cost_model['Cfa_cm'] < 0 or cost_model['Cmiss_cm'] < 0:
print('WARNING: Usually the cost values should be positive!')
if cost_model['Ptar'] < 0 or cost_model['Pnon'] < 0 or \
cost_model['Pspoof'] < 0 or \
np.abs(cost_model['Ptar'] + cost_model['Pnon'] + cost_model['Pspoof'] - 1) > 1e-10:
sys.exit('ERROR: Your prior probabilities should be positive and sum up to one.')
# Unless we evaluate worst-case model, we need to have some spoof tests against asv
if Pmiss_spoof_asv is None:
sys.exit('ERROR: you should provide miss rate of spoof tests against your ASV system.')
# Sanity check of scores
combined_scores = np.concatenate((bonafide_score_cm, spoof_score_cm))
if np.isnan(combined_scores).any() or np.isinf(combined_scores).any():
sys.exit('ERROR: Your scores contain nan or inf.')
# Sanity check that inputs are scores and not decisions
n_uniq = np.unique(combined_scores).size
if n_uniq < 3:
sys.exit('ERROR: You should provide soft CM scores - not binary decisions')
# Obtain miss and false alarm rates of CM
Pmiss_cm, Pfa_cm, CM_thresholds = compute_det_curve(bonafide_score_cm, spoof_score_cm)
# Constants - see ASVspoof 2019 evaluation plan
C1 = cost_model['Ptar'] * (cost_model['Cmiss_cm'] - cost_model['Cmiss_asv'] * Pmiss_asv) - \
cost_model['Pnon'] * cost_model['Cfa_asv'] * Pfa_asv
C2 = cost_model['Cfa_cm'] * cost_model['Pspoof'] * (1 - Pmiss_spoof_asv)
# Sanity check of the weights
if C1 < 0 or C2 < 0:
sys.exit('You should never see this error but I cannot evalute tDCF with negative weights - please check whether your ASV error rates are correctly computed?')
# Obtain t-DCF curve for all thresholds
tDCF = C1 * Pmiss_cm + C2 * Pfa_cm
# Normalized t-DCF
tDCF_norm = tDCF / np.minimum(C1, C2)
# Everything should be fine if reaching here.
if print_cost:
print('t-DCF evaluation from [Nbona={}, Nspoof={}] trials\n'.format(bonafide_score_cm.size, spoof_score_cm.size))
print('t-DCF MODEL')
print(' Ptar = {:8.5f} (Prior probability of target user)'.format(cost_model['Ptar']))
print(' Pnon = {:8.5f} (Prior probability of nontarget user)'.format(cost_model['Pnon']))
print(' Pspoof = {:8.5f} (Prior probability of spoofing attack)'.format(cost_model['Pspoof']))
print(' Cfa_asv = {:8.5f} (Cost of ASV falsely accepting a nontarget)'.format(cost_model['Cfa_asv']))
print(' Cmiss_asv = {:8.5f} (Cost of ASV falsely rejecting target speaker)'.format(cost_model['Cmiss_asv']))
print(' Cfa_cm = {:8.5f} (Cost of CM falsely passing a spoof to ASV system)'.format(cost_model['Cfa_cm']))
print(' Cmiss_cm = {:8.5f} (Cost of CM falsely blocking target utterance which never reaches ASV)'.format(cost_model['Cmiss_cm']))
print('\n Implied normalized t-DCF function (depends on t-DCF parameters and ASV errors), s=CM threshold)')
if C2 == np.minimum(C1, C2):
print(' tDCF_norm(s) = {:8.5f} x Pmiss_cm(s) + Pfa_cm(s)\n'.format(C1 / C2))
else:
print(' tDCF_norm(s) = Pmiss_cm(s) + {:8.5f} x Pfa_cm(s)\n'.format(C2 / C1))
return tDCF_norm, CM_thresholds
def compute_tDCF(
bonafide_score_cm, spoof_score_cm,
Pfa_asv, Pmiss_asv, Pfa_spoof_asv, cost_model, print_cost):
"""
Compute Tandem Detection Cost Function (t-DCF) [1] for a fixed ASV system.
In brief, t-DCF returns a detection cost of a cascaded system of this form,
Speech waveform -> [CM] -> [ASV] -> decision
where CM stands for countermeasure and ASV for automatic speaker
verification. The CM is therefore used as a 'gate' to decided whether or
not the input speech sample should be passed onwards to the ASV system.
Generally, both CM and ASV can do detection errors. Not all those errors
are necessarily equally cost, and not all types of users are necessarily
equally likely. The tandem t-DCF gives a principled with to compare
different spoofing countermeasures under a detection cost function
framework that takes that information into account.
INPUTS:
bonafide_score_cm A vector of POSITIVE CLASS (bona fide or human)
detection scores obtained by executing a spoofing
countermeasure (CM) on some positive evaluation trials.
trial represents a bona fide case.
spoof_score_cm A vector of NEGATIVE CLASS (spoofing attack)
detection scores obtained by executing a spoofing
CM on some negative evaluation trials.
Pfa_asv False alarm (false acceptance) rate of the ASV
system that is evaluated in tandem with the CM.
Assumed to be in fractions, not percentages.
Pmiss_asv Miss (false rejection) rate of the ASV system that
is evaluated in tandem with the spoofing CM.
Assumed to be in fractions, not percentages.
Pmiss_spoof_asv Miss rate of spoof samples of the ASV system that
is evaluated in tandem with the spoofing CM. That
is, the fraction of spoof samples that were
rejected by the ASV system.
cost_model A struct that contains the parameters of t-DCF,
with the following fields.
Ptar Prior probability of target speaker.
Pnon Prior probability of nontarget speaker (zero-effort impostor)
Psoof Prior probability of spoofing attack.
Cmiss Cost of tandem system falsely rejecting target speaker.
Cfa Cost of tandem system falsely accepting nontarget speaker.
Cfa_spoof Cost of tandem system falsely accepting spoof.
print_cost Print a summary of the cost parameters and the
implied t-DCF cost function?
OUTPUTS:
tDCF_norm Normalized t-DCF curve across the different CM
system operating points; see [2] for more details.
Normalized t-DCF > 1 indicates a useless
countermeasure (as the tandem system would do
better without it). min(tDCF_norm) will be the
minimum t-DCF used in ASVspoof 2019 [2].
CM_thresholds Vector of same size as tDCF_norm corresponding to
the CM threshold (operating point).
NOTE:
o In relative terms, higher detection scores values are assumed to
indicate stronger support for the bona fide hypothesis.
o You should provide real-valued soft scores, NOT hard decisions. The
recommendation is that the scores are log-likelihood ratios (LLRs)
from a bonafide-vs-spoof hypothesis based on some statistical model.
This, however, is NOT required. The scores can have arbitrary range
and scaling.
o Pfa_asv, Pmiss_asv, Pmiss_spoof_asv are in fractions, not percentages.
References:
[1] T. Kinnunen, H. Delgado, N. Evans,K.-A. Lee, V. Vestman,
A. Nautsch, M. Todisco, X. Wang, M. Sahidullah, J. Yamagishi,
and D.-A. Reynolds, "Tandem Assessment of Spoofing Countermeasures
and Automatic Speaker Verification: Fundamentals," IEEE/ACM Transaction on
Audio, Speech and Language Processing (TASLP).
[2] ASVspoof 2019 challenge evaluation plan
https://www.asvspoof.org/asvspoof2019/asvspoof2019_evaluation_plan.pdf
"""
# Sanity check of cost parameters
if cost_model['Cfa'] < 0 or cost_model['Cmiss'] < 0 or \
cost_model['Cfa'] < 0 or cost_model['Cmiss'] < 0:
print('WARNING: Usually the cost values should be positive!')
if cost_model['Ptar'] < 0 or cost_model['Pnon'] < 0 or cost_model['Pspoof'] < 0 or \
np.abs(cost_model['Ptar'] + cost_model['Pnon'] + cost_model['Pspoof'] - 1) > 1e-10:
sys.exit('ERROR: Your prior probabilities should be positive and sum up to one.')
# Unless we evaluate worst-case model, we need to have some spoof tests against asv
if Pfa_spoof_asv is None:
sys.exit('ERROR: you should provide false alarm rate of spoof tests against your ASV system.')
# Sanity check of scores
combined_scores = np.concatenate((bonafide_score_cm, spoof_score_cm))
if np.isnan(combined_scores).any() or np.isinf(combined_scores).any():
sys.exit('ERROR: Your scores contain nan or inf.')
# Sanity check that inputs are scores and not decisions
n_uniq = np.unique(combined_scores).size
if n_uniq < 3:
sys.exit('ERROR: You should provide soft CM scores - not binary decisions')
# Obtain miss and false alarm rates of CM
Pmiss_cm, Pfa_cm, CM_thresholds = compute_det_curve(bonafide_score_cm, spoof_score_cm)
# Constants - see ASVspoof 2019 evaluation plan
C0 = cost_model['Ptar'] * cost_model['Cmiss'] * Pmiss_asv + cost_model['Pnon']*cost_model['Cfa']*Pfa_asv
C1 = cost_model['Ptar'] * cost_model['Cmiss'] - (cost_model['Ptar'] * cost_model['Cmiss'] * Pmiss_asv + cost_model['Pnon'] * cost_model['Cfa'] * Pfa_asv)
C2 = cost_model['Pspoof'] * cost_model['Cfa_spoof'] * Pfa_spoof_asv;
# Sanity check of the weights
if C0 < 0 or C1 < 0 or C2 < 0:
sys.exit('You should never see this error but I cannot evalute tDCF with negative weights - please check whether your ASV error rates are correctly computed?')
# Obtain t-DCF curve for all thresholds
tDCF = C0 + C1 * Pmiss_cm + C2 * Pfa_cm
# Obtain default t-DCF
tDCF_default = C0 + np.minimum(C1, C2)
# Normalized t-DCF
tDCF_norm = tDCF / tDCF_default
# Everything should be fine if reaching here.
if print_cost:
print('t-DCF evaluation from [Nbona={}, Nspoof={}] trials\n'.format(bonafide_score_cm.size, spoof_score_cm.size))
print('t-DCF MODEL')
print(' Ptar = {:8.5f} (Prior probability of target user)'.format(cost_model['Ptar']))
print(' Pnon = {:8.5f} (Prior probability of nontarget user)'.format(cost_model['Pnon']))
print(' Pspoof = {:8.5f} (Prior probability of spoofing attack)'.format(cost_model['Pspoof']))
print(' Cfa = {:8.5f} (Cost of tandem system falsely accepting a nontarget)'.format(cost_model['Cfa']))
print(' Cmiss = {:8.5f} (Cost of tandem system falsely rejecting target speaker)'.format(cost_model['Cmiss']))
print(' Cfa_spoof = {:8.5f} (Cost of tandem sysmte falsely accepting spoof)'.format(cost_model['Cfa_spoof']))
print('\n Implied normalized t-DCF function (depends on t-DCF parameters and ASV errors), t_CM=CM threshold)')
print(' tDCF_norm(t_CM) = {:8.5f} + {:8.5f} x Pmiss_cm(t_CM) + {:8.5f} x Pfa_cm(t_CM)\n'.format(C0/tDCF_default, C1/tDCF_default, C2/tDCF_default))
print(' * The optimum value is given by the first term (0.06273). This is the normalized t-DCF obtained with an error-free CM system.')
print(' * The minimum normalized cost (minimum over all possible thresholds) is always <= 1.00.')
print('')
return tDCF_norm, CM_thresholds
def tDCF_wrapper(bonafide_cm_scores, spoof_cm_scores,
tar_asv_scores=None, non_asv_scores=None,
spoof_asv_scores=None,
flag_verbose=False, flag_legacy=True):
"""
mintDCF, eer, eer_thre = tDCF_wrapper(bonafide_cm_scores, spoof_cm_scores,
tar_asv_scores=None, non_asv_scores=None,
spoof_asv_scores=None, flag_verbose=False, flag_legacy=True)
input
-----
bonafide_cm_scores: np.array of bona fide scores
spoof_cm_scores: np.array of spoof scores
tar_asv_scores: np.array of ASV target scores, or None
non_asv_scores: np.array of ASV non-target scores, or None
spoof_asv_scores: np.array of ASV spoof trial scores, or None,
flag_verbose: print detailed messages
flag_legacy: True: use legacy min-tDCF in ASVspoof2019
False: use min-tDCF revised
output
------
mintDCF: scalar, value of min-tDCF
eer: scalar, value of EER
eer_thre: scalar, value of threshold corresponding to EER
"""
if flag_legacy:
Pspoof = 0.05
cost_model = {
'Pspoof': Pspoof, # Prior probability of a spoofing attack
'Ptar': (1 - Pspoof) * 0.99, # Prior probability of target speaker
'Pnon': (1 - Pspoof) * 0.01, # Prior probability of nontarget speaker
'Cmiss_asv': 1, # Cost of ASV system falsely rejecting target speaker
'Cfa_asv': 10, # Cost of ASV system falsely accepting nontarget speaker
'Cmiss_cm': 1, # Cost of CM system falsely rejecting target speaker
'Cfa_cm': 10, # Cost of CM system falsely accepting spoof
}
else:
Pspoof = 0.05
cost_model = {
'Pspoof': Pspoof, # Prior probability of a spoofing attack
'Ptar': (1 - Pspoof) * 0.99, # Prior probability of target speaker
'Pnon': (1 - Pspoof) * 0.01, # Prior probability of nontarget speaker
'Cmiss': 1, # Cost of tandem system falsely rejecting target speaker
'Cfa': 10, # Cost of tandem system falsely accepting nontarget speaker
'Cfa_spoof': 10, # Cost of tandem system falsely accepting spoof
}
# read provided ASV scores
if tar_asv_scores is None or non_asv_scores is None or \
spoof_asv_scores is None:
file_name = os.path.dirname(__file__)+ \
'/data/asvspoof2019/ASVspoof2019.LA.asv.eval.gi.trl.scores.bin'
data = nii_io.f_read_raw_mat(file_name, 2)
tar_asv_scores = data[data[:, 1] == 2, 0]
non_asv_scores = data[data[:, 1] == 1, 0]
spoof_asv_scores = data[data[:, 1] == 0, 0]
eer_asv, asv_threshold = compute_eer(tar_asv_scores, non_asv_scores)
eer_cm, eer_threshold = compute_eer(bonafide_cm_scores, spoof_cm_scores)
[Pfa_asv,Pmiss_asv,Pmiss_spoof_asv,Pfa_spoof_asv] = obtain_asv_error_rates(
tar_asv_scores, non_asv_scores, spoof_asv_scores, asv_threshold)
if flag_legacy:
tDCF_curve, CM_thresholds = compute_tDCF_legacy(
bonafide_cm_scores, spoof_cm_scores,
Pfa_asv, Pmiss_asv, Pmiss_spoof_asv, cost_model, flag_verbose)
else:
tDCF_curve, CM_thresholds = compute_tDCF(
bonafide_cm_scores, spoof_cm_scores,
Pfa_asv, Pmiss_asv, Pfa_spoof_asv, cost_model, flag_verbose)
min_tDCF_index = np.argmin(tDCF_curve)
min_tDCF = tDCF_curve[min_tDCF_index]
return min_tDCF, eer_cm, eer_threshold
def tDCF_wrapper2(bonafide_score_cm, spoof_score_cm, C0, C1, C2):
""" mintDCF, eer = tDCF_wrapper2(bonafide_score_cm,
spoof_score_cm, C0, C1, C2)
compute_tDCF can be factorized into two parts:
C012 computation and min t-DCF computation.
This is for min t-DCF computation, given the values of C012
input
-----
bonafide_score_cm np.array, score of bonafide data
spoof_score_cm np.array, score of spoofed data
C0 scalar, coefficient for min tDCF computation
C1 scalar, coefficient for min tDCF computation
C2 scalar, coefficient for min tDCF computation
output
------
eer scalar, value of EER
mintDCF scalar, value of min tDCF
For C0, C1, C2, see Appendix Eqs.(1-2) in evaluation plan [1],
or Eqs.(10-11) in [2]
References:
[1] T. Kinnunen, H. Delgado, N. Evans,K.-A. Lee, V. Vestman,
A. Nautsch, M. Todisco, X. Wang, M. Sahidullah, J. Yamagishi,
and D.-A. Reynolds, "Tandem Assessment of Spoofing Countermeasures
and Automatic Speaker Verification: Fundamentals," IEEE/ACM Transaction on
Audio, Speech and Language Processing (TASLP).
[2] ASVspoof 2019 challenge evaluation plan
https://www.asvspoof.org/asvspoof2019/asvspoof2019_evaluation_plan.pdf
"""
# Sanity check of scores
combined_scores = np.concatenate((bonafide_score_cm, spoof_score_cm))
if np.isnan(combined_scores).any() or np.isinf(combined_scores).any():
sys.exit('ERROR: Your scores contain nan or inf.')
# Sanity check that inputs are scores and not decisions
n_uniq = np.unique(combined_scores).size
if n_uniq < 3:
sys.exit('ERROR: You should provide soft CM scores - not binary decisions')
# Obtain miss and false alarm rates of CM
Pmiss_cm, Pfa_cm, CM_thresholds = compute_det_curve(
bonafide_score_cm, spoof_score_cm)
# =====
# tDCF
# =====
if np.isnan(C0) or np.isnan(C1) or np.isnan(C2):
# this is a case where
mintDCF = np.nan
else:
# tDCF values
tDCF = C0 + C1 * Pmiss_cm + C2 * Pfa_cm
# Obtain default t-DCF
tDCF_default = C0 + np.minimum(C1, C2)
# Normalized t-DCF
tDCF_norm = tDCF / tDCF_default
# min t-DCF
mintDCF = tDCF_norm[tDCF_norm.argmin()]
# ====
# EER
# ====
abs_diffs = np.abs(Pmiss_cm - Pfa_cm)
min_index = np.argmin(abs_diffs)
eer = np.mean((Pmiss_cm[min_index], Pfa_cm[min_index]))
return mintDCF, eer
def ASVspoof2019_evaluate(bonafide_cm_scores, bonafide_cm_file_names,
spoof_cm_scores, spoof_cm_file_names, verbose=False,
protocol_alternative=None):
""" Decompose scores for each attack. For ASVspoof2019
ASVspoof2019_decompose(bonafide_cm_scores, bonafide_cm_file_names,
spoof_cm_scores, spoof_cm_file_names, verbose=False)
input
-----
bonafide_cm_scores: np.array of bonafide scores
bonafide_cm_file_names: file name list corresponding to bonafide_cm_scores
spoof_cm_scores: np.array of spoofed scores (all attack types)
spoof_cm_file_names: file name list corresponding to spoof_cm_scores
verbose: print information from tDCF computation (default: False)
protocol_alternative: alternative protocol to ASVspoof2019 (default: None)
output
------
min_tDCF: np.array of min tDCF for each attack
eer_cm: np.array of EER for each attack
eer_threshold: np.array of threshold for EER (not min tDCF threshod)
spoof_attack_types: list of attack types
"""
if protocol_alternative is not None:
# if provided alternative procotol, use it.
# this is for protocol tweaking
file_name = protocol_alternative
else:
# official protocol
file_name = os.path.dirname(__file__)+ '/data/asvspoof2019/protocol.txt'
protocol_data = np.genfromtxt(file_name,
dtype=[('spk', 'U10'), ('file', 'U20'),
('misc', 'U5'), ('spoof', 'U5'),
('type','U10')], delimiter=" ")
spoof_type_dic = {protocol_data[x][1]:protocol_data[x][3] for x in \
range(protocol_data.shape[0])}
spoof_attack_types = list(set([x[3] for x in protocol_data]))
spoof_attack_types.sort()
# default set to -1
min_tDCF = np.zeros([len(spoof_attack_types) + 1]) - 1
eer_cm = np.zeros([len(spoof_attack_types) + 1]) - 1
eer_threshold = np.zeros([len(spoof_attack_types) + 1])
# decompose results
decomposed_spoof_scores = []
for idx, spoof_attack_type in enumerate(spoof_attack_types):
tmp_spoof_scores = [spoof_cm_scores[x] for x, y in \
enumerate(spoof_cm_file_names) \
if spoof_type_dic[y] == spoof_attack_type]
tmp_spoof_scores = np.array(tmp_spoof_scores)
decomposed_spoof_scores.append(tmp_spoof_scores.copy())
if len(tmp_spoof_scores):
x1, x2, x3 = tDCF_wrapper(bonafide_cm_scores, tmp_spoof_scores)
min_tDCF[idx] = x1
eer_cm[idx] = x2
eer_threshold[idx] = x3
# pooled results
x1, x2, x3 = tDCF_wrapper(bonafide_cm_scores, spoof_cm_scores)
min_tDCF[-1] = x1
eer_cm[-1] = x2
eer_threshold[-1] = x3
spoof_attack_types.append("pooled")
decomposed_spoof_scores.append(spoof_cm_scores)
for idx in range(len(spoof_attack_types)):
if verbose and eer_cm[idx] > -1:
print("{:s}\tmin-tDCF: {:2.5f}\tEER: {:2.3f}%\t Thre:{:f}".format(
spoof_attack_types[idx], min_tDCF[idx], eer_cm[idx] * 100,
eer_threshold[idx]))
decomposed_spoof_scores = [decomposed_spoof_scores[x] \
for x, y in enumerate(min_tDCF) if y > -1]
spoof_attack_types = [spoof_attack_types[x] \
for x, y in enumerate(min_tDCF) if y > -1]
eer_threshold = [eer_threshold[x] \
for x, y in enumerate(min_tDCF) if y > -1]
eer_cm = [eer_cm[x] for x, y in enumerate(min_tDCF) if y > -1]
min_tDCF = [y for x, y in enumerate(min_tDCF) if y > -1]
return min_tDCF, eer_cm, eer_threshold, spoof_attack_types, \
decomposed_spoof_scores
##############
# for Pytorch models in this project
##############
def parse_pytorch_output_txt(score_file_path):
""" parse_pytorch_output_txt(file_path)
parse the score files generated by the pytorch models
input
-----
file_path: path to the log file
output
------
bonafide: np.array, bonafide scores
bonafide_names: list of file names corresponding to bonafide scores
spoofed: np.array, spoofed scores
spoofed_names: list of file names corresponding to spoofed scores
"""
bonafide = []
spoofed = []
bonafide_names = []
spoofed_names = []
with open(score_file_path, 'r') as file_ptr:
for line in file_ptr:
if line.startswith('Output,'):
temp = line.split(',')
flag = int(temp[2])
if np.isnan(float(temp[3])):
print(line)
continue
if flag:
bonafide.append(float(temp[3]))
bonafide_names.append(temp[1].strip())
else:
spoofed.append(float(temp[3]))
spoofed_names.append(temp[1].strip())
bonafide = np.array(bonafide)
spoofed = np.array(spoofed)
return bonafide, bonafide_names, spoofed, spoofed_names
def ASVspoof2019_decomposed_results(score_file_path, flag_return_results=False,
flag_verbose=True):
""" Get the results from input score log file
ASVspoof2019_decomposed_results(score_file_path, flag_return_results=False,
flag_verbose=True)
input
-----
score_file_path: path to the score file produced by the Pytorch code
flag_return_results: whether return the results (default False)
flag_verbose: print EERs and mintDCFs for each attack (default True)
output
------
if flag_return_results is True:
mintDCFs: list of min tDCF, for each attack
eers: list of EER, for each attack
cm_thres: list of threshold for EER, for each attack
spoof_types: list of spoof attack types
spoof_scores: list of spoof file scores (np.array)
bona: bonafide score
"""
bona, b_names, spoofed, s_names = parse_pytorch_output_txt(score_file_path)
mintDCFs, eers, cm_thres, spoof_types, spoof_scores = ASVspoof2019_evaluate(
bona, b_names, spoofed, s_names, flag_verbose)
if flag_return_results:
return mintDCFs, eers, cm_thres, spoof_types, spoof_scores, bona
else:
return
def ASVspoofNNN_decomposed_results(score_file_path,
flag_return_results=False,
flag_verbose=True,
protocol_alternative=None):
""" Similar to ASVspoof2019_decomposed_results, but use alternative protocol
"""
bona, b_names, spoofed, s_names = parse_pytorch_output_txt(score_file_path)
mintDCFs, eers, cm_thres, spoof_types, spoof_scores = ASVspoof2019_evaluate(
bona, b_names, spoofed, s_names, flag_verbose, protocol_alternative)
if flag_return_results:
return mintDCFs, eers, cm_thres, spoof_types, spoof_scores, bona
else:
return
##############
# for testing using ./data/cm_dev.txt and asv_dev.txt
##############
def read_asv_txt_file(file_path):
data = np.genfromtxt(
file_path, dtype=[('class', 'U10'),('type', 'U10'),
('score','f4')], delimiter=" ")
data_new = np.zeros([data.shape[0], 2])
for idx, data_entry in enumerate(data):
data_new[idx, 0] = data_entry[-1]
if data_entry[1] == 'target':
data_new[idx, 1] = 2
elif data_entry[1] == 'nontarget':
data_new[idx, 1] = 1
else:
data_new[idx, 1] = 0
return data_new
def read_cm_txt_file(file_path):
data = np.genfromtxt(
file_path, dtype=[('class', 'U10'),('type', 'U10'),
('flag', 'U10'),
('score','f4')], delimiter=" ")
data_new = np.zeros([data.shape[0], 2])
for idx, data_entry in enumerate(data):
data_new[idx, 0] = data_entry[-1]
if data_entry[-2] == 'bonafide':
data_new[idx, 1] = 1
else:
data_new[idx, 1] = 0
return data_new
if __name__ == "__main__":
# these two files are deleted from the repo
# for usage on the evaluation tool,
# please check ../tutorials/README.md
asv_scores = read_asv_txt_file('./data/asvspoof2019/asv_dev.txt')
cm_scores = read_cm_txt_file('./data/asvspoof2019/cm_dev.txt')
tar_asv = asv_scores[asv_scores[:, 1]==2, 0]
non_asv = asv_scores[asv_scores[:, 1]==1, 0]
spoof_asv = asv_scores[asv_scores[:, 1]==0, 0]
bona_cm = cm_scores[cm_scores[:, 1]==1, 0]
spoof_cm = cm_scores[cm_scores[:, 1]==0, 0]
mintdcf, eer, eer_threshold = tDCF_wrapper(
bona_cm, spoof_cm, tar_asv, non_asv, spoof_asv)
print("min tDCF: {:f}".format(mintdcf))
print("EER: {:f}%".format(eer*100))
| 37,875 | 41.461883 | 167 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/block_dist.py | ##!/usr/bin/env python
"""
Module definition for distributions
Definition of distributions for generative models.
Each module should define two methods: forward and inference.
1. forward(input_feat, target): computes distribution given input_feat and
likelihood given target_data
2. inference(input_feat): computes distribution given input_feat and draw sample
Note that Modules defined in core_modules/*.py are for discrminative models.
There is no method for inference. But they may be combined with this code
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
class DistCategorical(torch_nn.Module):
"""Output layer that implements categorical distribution
This Module implements two methods: forward and inference.
forward(input_feat, target): computes the categorical
distribution given input_feat and likelihood given target_data
inference(input_feat): computes the categorical
distribution given input_feat and generate output
Input_feat is the logits before softmax. It will be converted
into a probablity vector inside this Module.
In other words, input_feat does not need to be a probablity vector.
Example:
dim = 4
logits = torch.rand([2, 3, dim])
logits[0, 1, 0] += 9.9
logits[0, 2, 1] += 9.9
logits[0, 0, 2] += 9.9
logits[1, 1, 1] += 9.9
logits[1, 2, 2] += 9.9
logits[1, 0, 0] += 9.9
target = torch.tensor([[[2], [0], [1]], [[0], [1], [2]]])
l_cat = DistCategorical(dim)
samples = l_cat.inference(logits)
print(logits)
print(samples)
loss = l_cat.forward(logits, target)
print(loss)
"""
def __init__(self, category_size):
""" DistCategorical(category_size)
Args
----
category_size: int, number of category
"""
super(DistCategorical, self).__init__()
self.category_size = category_size
self.loss = torch_nn.CrossEntropyLoss()
def _check_input(self, input_feat):
""" check whether input feature vector has the correct dimension
torch.dist does not check, it will gives output no matter what
the shape of input_feat
"""
if input_feat.shape[-1] != self.category_size:
mes = "block_dist.DistCategorical expects input_feat with "
mes += "last dimension of size {:d}. ".format(self.category_size)
mes += "But receives {:d}".format(input_feat.shape[-1])
raise Exception(mes)
return True
def forward(self, input_feat, target):
""" likelihood = forward(input_feat, target)
input
-----
input_feat: tensor (batchsize, length, categorize_size)
tensor to be converted into categorical distribution
target: (batchsize, length, dim=1)
tensor to be used to evaluate the likelihood
output
------
likelihood: tensor scaler
Note that input_feat is the logits, it will be converted to prob. vec
through LogSoftmax, Then NLLoss is computed.
In practice, we directly use this API that will do the two steps
https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html
"""
self._check_input(input_feat)
# transpose input_feat to (batchsize, cateogrical_size, length)
# squeeze target to (batchsize, length)
return self.loss(input_feat.transpose(1, 2), target.squeeze(-1))
def inference(self, input_feat):
""" sample = inference(input_feat)
input
-----
input_feat: tensor (batchsize, length, categorize_size)
tensor to be converted into categorical distribution
output
------
sample: (batchsize, length, dim=1)
Note that input_feat is the logits, it will be converted to prob. vec
through Softmax in this method
"""
# check
self._check_input(input_feat)
# compute probability
prob_vec = torch_nn_func.softmax(input_feat, dim=2)
# distribution
distrib = torch.distributions.Categorical(prob_vec)
# draw samples and save
sample = torch.zeros(
[input_feat.shape[0], input_feat.shape[1], 1],
dtype=input_feat.dtype, device=input_feat.device)
sample[:, :, 0] = distrib.sample()
return sample
if __name__ == "__main__":
print("Definition of distributions modules")
| 4,823 | 31.16 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/block_resnet.py | ##!/usr/bin/env python
"""
ResNet model
Modified based on https://github.com/joaomonteirof/e2e_antispoofing
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
from scipy import signal as scipy_signal
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import torch.nn.init as torch_init
import sandbox.block_nn as nii_nn
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##
class PreActBlock(torch_nn.Module):
""" Pre-activation version of the BasicBlock
"""
expansion = 1
def __init__(self, in_planes, planes, stride, *args, **kwargs):
super(PreActBlock, self).__init__()
# input batchnorm
self.bn1 = torch_nn.BatchNorm2d(in_planes)
# conv1
self.conv1 = torch_nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = torch_nn.BatchNorm2d(planes)
self.conv2 = torch_nn.Conv2d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = torch_nn.Sequential(
torch_nn.Conv2d(in_planes, self.expansion * planes,
kernel_size=1, stride=stride, bias=False))
def forward(self, x):
out = torch_nn_func.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(torch_nn_func.relu(self.bn2(out)))
out += shortcut
return out
class PreActBottleneck(torch_nn.Module):
""" Pre-activation version of the original Bottleneck module.
"""
expansion = 4
def __init__(self, in_planes, planes, stride, *args, **kwargs):
super(PreActBottleneck, self).__init__()
#
self.bn1 = torch_nn.BatchNorm2d(in_planes)
self.conv1 = torch_nn.Conv2d(
in_planes, planes, kernel_size=1, bias=False)
self.bn2 = torch_nn.BatchNorm2d(planes)
self.conv2 = torch_nn.Conv2d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = torch_nn.BatchNorm2d(planes)
self.conv3 = torch_nn.Conv2d(
planes, self.expansion * planes, kernel_size=1, bias=False)
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = torch_nn.Sequential(
torch_nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False))
def forward(self, x):
out = torch_nn_func.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(torch_nn_func.relu(self.bn2(out)))
out = self.conv3(torch_nn_func.relu(self.bn3(out)))
out += shortcut
return out
def conv3x3(in_planes, out_planes, stride=1):
return torch_nn.Conv2d(in_planes, out_planes,
kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
return torch_nn.Conv2d(in_planes, out_planes,
kernel_size=1, stride=stride, bias=False)
RESNET_CONFIGS = {'18': [[2, 2, 2, 2], PreActBlock],
'28': [[3, 4, 6, 3], PreActBlock],
'34': [[3, 4, 6, 3], PreActBlock],
'50': [[3, 4, 6, 3], PreActBottleneck],
'101': [[3, 4, 23, 3], PreActBottleneck]
}
class ResNet(torch_nn.Module):
def __init__(self, enc_dim, resnet_type='18', nclasses=2):
self.in_planes = 16
super(ResNet, self).__init__()
layers, block = RESNET_CONFIGS[resnet_type]
self._norm_layer = torch_nn.BatchNorm2d
# laye 1
self.conv1 = torch_nn.Conv2d(1, 16, kernel_size=(9, 3),
stride=(3, 1), padding=(1, 1), bias=False)
self.bn1 = torch_nn.BatchNorm2d(16)
self.activation = torch_nn.ReLU()
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.conv5 = torch_nn.Conv2d(
512 * block.expansion, 256, kernel_size=(3, 3),
stride=(1, 1), padding=(0, 1), bias=False)
self.bn5 = torch_nn.BatchNorm2d(256)
self.fc = torch_nn.Linear(256 * 2, enc_dim)
if nclasses >= 2:
self.fc_mu = torch_nn.Linear(enc_dim, nclasses)
else:
self.fc_mu = torch_nn.Linear(enc_dim, 1)
self.initialize_params()
self.attention = nii_nn.SelfWeightedPooling(256)
def initialize_params(self):
for layer in self.modules():
if isinstance(layer, torch.nn.Conv2d):
torch_init.kaiming_normal_(layer.weight, a=0, mode='fan_out')
elif isinstance(layer, torch.nn.Linear):
torch_init.kaiming_uniform_(layer.weight)
elif isinstance(layer, torch.nn.BatchNorm2d) or \
isinstance(layer, torch.nn.BatchNorm1d):
layer.weight.data.fill_(1)
layer.bias.data.zero_()
def _make_layer(self, block, planes, num_blocks, stride=1):
norm_layer = self._norm_layer
downsample = None
if stride != 1 or self.in_planes != planes * block.expansion:
downsample = torch_nn.Sequential(
conv1x1(self.in_planes, planes * block.expansion, stride),
norm_layer(planes * block.expansion))
layers = []
layers.append(
block(self.in_planes, planes, stride, downsample,
1, 64, 1, norm_layer))
self.in_planes = planes * block.expansion
for _ in range(1, num_blocks):
layers.append(
block(self.in_planes, planes, 1, groups=1,
base_width=64, dilation=False, norm_layer=norm_layer))
return torch_nn.Sequential(*layers)
def forward(self, x, without_pooling=False):
x = self.conv1(x)
x = self.activation(self.bn1(x))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.conv5(x)
x = self.activation(self.bn5(x)).squeeze(2)
if without_pooling:
return x
else:
stats = self.attention(x.permute(0, 2, 1).contiguous())
feat = self.fc(stats)
mu = self.fc_mu(feat)
return feat, mu
if __name__ == "__main__":
print("Definition of Resnet for anti-spoofing")
| 6,982 | 33.399015 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/block_glow.py | #!/usr/bin/env python
"""
Building blocks for glow
"""
from __future__ import absolute_import
import os
import sys
import time
import numpy as np
import scipy.linalg
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import torch.nn.init as torch_init
import sandbox.block_nn as nii_nn
import core_scripts.data_io.conf as nii_io_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def sum_over_keep_batch(data):
# (batch, dim1, dim2, ..., ) -> (batch)
# sum over dim1, dim2, ...
sum_dims = [x for x in range(data.ndim)][1:]
return torch.sum(data, dim=sum_dims)
def sum_over_keep_batch2(data, factor):
# (batch, dim1, dim2, ..., ) -> (batch)
# device each value by factor and
# sum over dim1, dim2, ...
sum_dims = [x for x in range(data.ndim)][1:]
return torch.sum(data / factor, dim=sum_dims)
class ActNorm(torch_nn.Module):
"""Activation Normalization
Activation normalization layer used in
Kingma, D. P. & Dhariwal, P. Glow
Generative Flow with Invertible 1x1 Convolutions.
arXiv Prepr. arXiv1807.03039 (2018)
For debug:
m_actnorm = ActNorm(5, flag_detjac=True)
data = torch.rand([2, 5, 5])
out, detjac = m_actnorm(data)
data_new = m_actnorm.reverse(out)
print(detjac)
#print(data.mean(dim=[0, 1]))
#print(data.std(dim=[0, 1]))
#print(m_actnorm.m_bias)
#print(m_actnorm.m_scale)
print(torch.sum(torch.log(torch.abs(m_actnorm.m_scale))) * 5 * 2)
print(data - data_new)
"""
def __init__(self, feat_dim, flag_detjac=False):
"""ActNorm(feat_dim, flag_detjac)
Args
----
feat_dim: int, feature dimension (channel for image),
input tensor (batch, ..., feature dimension)
flag_detjac: bool, whether output determinant of jacobian
Note that, it assumes y -> H(.) -> x, where H(.) is ActNorm.forward,
it then returns |det(dH(y)/dy)|
"""
super(ActNorm, self).__init__()
# flag
# whether return det of jacobian matrix
self.flag_detjac = flag_detjac
#
self.feat_dim = feat_dim
# parameter
self.m_scale = torch_nn.Parameter(torch.ones(feat_dim),
requires_grad=True)
self.m_bias = torch_nn.Parameter(torch.zeros(feat_dim),
requires_grad=True)
# flag to prevent re-initialization of the scale and bias
self.m_init_flag = torch_nn.Parameter(torch.zeros(1),
requires_grad=False)
return
def _log(self, x):
# add a floor
#return torch.log(x + torch.finfo(x.dtype).eps)
return torch.log(x)
def _detjac(self, factor=1):
"""
"""
# \sum log |s|, this same value is used for all data
# in this mini-batch, no need to duplicate to (batch,)
return torch.sum(self._log(torch.abs(self.m_scale)) / factor)
def _detjac_size_factor(self, y):
""" h * w * detjac
we need to compute h * w
"""
with torch.no_grad():
# tensor in shape (batch, d1, d2, ... feat_dim)
# then the factor will be d1 x d2 ...
data_size = torch.tensor(y.shape[1:-1])
data_factor = torch.prod(data_size)
return data_factor
def _init_scale_m(self, y):
""" initialize scale and bias for transformation
"""
with torch.no_grad():
# (batch, ... ,feat_dim) -> (-1, feat_dim)
tmp_y = y.view(-1, self.feat_dim)
# get mean and std per feat_dim
m = torch.mean(tmp_y, dim=0)
std = torch.std(tmp_y, dim=0) + 1e-6
# because the transform is (y + bias) * scale
# save scale = 1/std and bias = -m
self.m_scale.data = 1 / std
self.m_bias.data = -1 * m
# prevent further initialization
self.m_init_flag += 1
return
def forward(self, y, factor=1):
"""x = ActNorm.forward(y)
input
-----
y: tensor, (batch, dim1, ..., feat_dim)
output
------
x: tensor, (batch, dim1, ..., feat_dim)
if self.flag_detjac, also returns log_detjac (scalar)
"""
# do initialization for the 1st time
if self.m_init_flag.item() < 1:
self._init_scale_m(y)
# in initial stage, this is equivalent to (y - m)/std
x = (y + self.m_bias) * self.m_scale
if self.flag_detjac:
log_detjac = self._detjac(factor) * self._detjac_size_factor(y)
return x, log_detjac
else:
return x
def reverse(self, x):
"""y = ActNorm.reverse(x)
input
-----
x: tensor, (batch, dim1, ..., feat_dim)
output
------
y: tensor, (batch, dim1, ..., feat_dim)
"""
return x / self.m_scale - self.m_bias
class InvertibleTrans(torch_nn.Module):
"""InvertibleTransformation
Invertible transformation layer used in
Kingma, D. P. & Dhariwal, P. Glow
Generative Flow with Invertible 1x1 Convolutions.
arXiv Prepr. arXiv1807.03039 (2018)
1x1 convolution is implemented using torch.matmul
Example:
feat_dim = 5
m_trans = InvertibleTrans(feat_dim, flag_detjac=True)
data = torch.rand([2, feat_dim, feat_dim])
out, detjac = m_trans(data)
data_new = m_trans.reverse(out)
print(data_new - data)
print(detjac)
"""
def __init__(self, feat_dim, flag_detjac=False):
"""InvertibleTrans(feat_dim, flag_detjac)
Args
----
feat_dim: int, feature dimension (channel for image),
input tensor (batch, ..., feature dimension)
flag_detjac: bool, whether output determinant of jacobian
It assumes y -> H(.) -> x, where H(.) is InvertibleTrans.forward,
it then returns |det(dH(y)/dy)|
"""
super(InvertibleTrans, self).__init__()
#
self.feat_dim = feat_dim
# create initial permutation, lower, and upper triangle matrices
seed_mat = np.random.randn(feat_dim, feat_dim)
# qr decomposition, rotation_mat is a unitary matrix
rotation_mat, _ = scipy.linalg.qr(seed_mat)
# LU decomposition
permute_mat, lower_mat, upper_mat = scipy.linalg.lu(rotation_mat)
# mask matrix (with zero on the diagonal line)
u_mask = np.triu(np.ones_like(seed_mat), k=1)
d_mask = u_mask.T
# permuate matrix, fixed
self.m_permute_mat = torch_nn.Parameter(
torch.tensor(permute_mat.copy(), dtype=nii_io_conf.d_dtype),
requires_grad=False)
# Lower triangle matrix, trainable
self.m_lower_tria = torch_nn.Parameter(
torch.tensor(lower_mat.copy(), dtype=nii_io_conf.d_dtype),
requires_grad=True)
# Uppper triangle matrix, trainable
self.m_upper_tria = torch_nn.Parameter(
torch.tensor(upper_mat.copy(), dtype=nii_io_conf.d_dtype),
requires_grad=True)
# diagonal line
tmp_diag_line = torch.tensor(
upper_mat.diagonal().copy(),dtype=nii_io_conf.d_dtype)
# use log(|s|)
self.m_log_abs_diag = torch_nn.Parameter(
torch.log(torch.abs(tmp_diag_line)), requires_grad=True)
# save the sign of s as fixed parameter
self.m_diag_sign = torch_nn.Parameter(
torch.sign(tmp_diag_line), requires_grad=False)
# mask and all-1 diangonal line
self.m_l_mask = torch_nn.Parameter(
torch.tensor(d_mask.copy(), dtype=nii_io_conf.d_dtype),
requires_grad=False)
self.m_u_mask = torch_nn.Parameter(
torch.tensor(u_mask.copy(), dtype=nii_io_conf.d_dtype),
requires_grad=False)
self.m_eye = torch_nn.Parameter(
torch.eye(self.feat_dim, dtype=nii_io_conf.d_dtype),
requires_grad=False)
# buffer for inverse matrix
self.flag_invered = False
self.m_inver = torch_nn.Parameter(
torch.tensor(permute_mat.copy(), dtype=nii_io_conf.d_dtype),
requires_grad=False)
#
self.flag_detjac = flag_detjac
return
def _inverse(self):
""" inverse of the transformation matrix
"""
return torch.inverse(self._compose_mat())
def _compose_mat(self):
""" compose the transformation matrix
W = P L (U + sign * exp( log|s|))
"""
# U + sign * exp(log|s|)
tmp_u = torch.diag(self.m_diag_sign * torch.exp(self.m_log_abs_diag))
tmp_u = tmp_u + self.m_upper_tria * self.m_u_mask
# L
tmp_l = self.m_lower_tria * self.m_l_mask + self.m_eye
return torch.matmul(self.m_permute_mat, torch.matmul(tmp_l, tmp_u))
def _log(self, x):
# add a floor
#return torch.log(x + torch.finfo(x.dtype).eps)
return torch.log(x)
def _detjac(self, factor=1):
"""
"""
# \sum log|s|
# no need to duplicate to each data in the batch
# they all use the same detjac
return torch.sum(self.m_log_abs_diag / factor)
def _detjac_size_factor(self, y):
with torch.no_grad():
# tensor in shape (batch, d1, d2, ... feat_dim)
# then the factor will be d1 x d2 ...
data_size = torch.tensor(y.shape[1:-1])
data_factor = torch.prod(data_size)
return data_factor
def forward(self, y, factor=1):
# y W
# for other implementation, this is done with conv2d 1x1 convolution
# to be consistent, we can use .T to transpose the matrix first
if self.flag_detjac:
detjac = self._detjac(factor) * self._detjac_size_factor(y)
return torch.matmul(y, self._compose_mat()), detjac
else:
return torch.matmul(y, self._compose_mat()),
def reverse(self, x):
if self.training:
# if it is for training, compute inverse everytime
self.m_inver.data = self._inverse().clone()
else:
# during inference, only do this once
if self.flag_invered is False:
self.m_inver.data = self._inverse().clone()
# only compute inverse matrix once
self.flag_invered = True
return torch.matmul(x, self.m_inver)
class ZeroInitConv2dForGlow(torch_nn.Module):
"""ZeroIniConv2dForGlow
Last Conv2d layer of Glow uses zero-initialized conv2d
This is only used for images
"""
def __init__(self, in_feat_dim, out_feat_dim, kernel_size=3, padding=1):
super().__init__()
# conv
self.m_conv = torch_nn.Conv2d(in_feat_dim, out_feat_dim,
kernel_size, padding=0)
self.m_conv.weight.data.zero_()
self.m_conv.bias.data.zero_()
# scale parameter, following https://github.com/rosinality/glow-pytorch/
self.m_scale = torch_nn.Parameter(
torch.zeros(out_feat_dim, dtype=nii_io_conf.d_dtype))
#
self.m_pad_size = padding
return
def _zerobias(self):
self.m_conv.bias.data.zero_()
return
def _normal_weight(self):
self.m_conv.weight.data.normal_(0, 0.05)
return
def forward(self, x):
p = self.m_pad_size
# pad
y = torch_nn_func.pad(x.permute(0, 3, 1, 2), [p,p,p,p], value=1)
# conv
y = self.m_conv(y).permute(0, 2, 3, 1).contiguous()
# scale parameter, following https://github.com/rosinality/glow-pytorch/
return y * torch.exp(self.m_scale * 3)
class Conv2dForGlow(torch_nn.Module):
"""Conv2dForGlow
Other Conv2d layer of Glow uses zero-initialized conv2d
This is only used for images
"""
def __init__(self, in_feat_dim, out_feat_dim, kernel_size=3, padding=1):
super().__init__()
self.m_conv = torch_nn.Conv2d(in_feat_dim, out_feat_dim,
kernel_size, padding=padding)
return
def _zerobias(self):
self.m_conv.bias.data.zero_()
return
def _normal_weight(self):
self.m_conv.weight.data.normal_(0, 0.05)
return
def forward(self, x):
return self.m_conv(x.permute(0, 3, 1, 2)).permute(0,2,3,1).contiguous()
class AffineCouplingGlow(torch_nn.Module):
"""AffineCouplingGlow
AffineCoupling block in Glow
Example:
m_affine = AffineCouplingGlow(10, 32, flag_affine=False,flag_detjac=True)
data = torch.randn([2, 4, 4, 10])
data_out, detjac = m_affine(data)
data_inv = m_affine.reverse(data_out)
print(data_inv - data)
print(detjac)
"""
def __init__(self, feat_dim, conv_out_dim=512,
flag_affine=True, flag_detjac=False):
"""AffineCouplingGlow(feat_dim, conv_out_dim=512,
flag_affine=True, flag_detjac=False)
Args:
-----
feat_dim: int, dimension of input feature (channel number of image)
feat_dim must be an even number
conv_out_dim: int, dimension of output feature of the intermediate
conv layer, default 512
flag_affine: bool, whether use affine or additive transformation?
default True
flag_detjac: bool, whether return the determinant of Jacobian,
default False
It assumes that y -> H(.) -> x, where H(.) is AffineCouplingGlow.forward
When flag_affine == True, H(y) = concante([y1, exp(s) \odot y_2 + b])
When flag_affine == False, H(y) = concante([y1, y_2 + b])
where, [s, b] = NN(y1)
"""
super(AffineCouplingGlow, self).__init__()
self.flag_affine = flag_affine
self.flag_detjac = flag_detjac
if feat_dim % 2 > 0:
print("AffineCoulingGlow(feat_dim), feat_dim is an odd number?!")
sys.exit(1)
if self.flag_affine:
self.m_nn_outdim = feat_dim
else:
self.m_nn_outdim = feat_dim//2
# create network
self.m_conv = torch_nn.Sequential(
Conv2dForGlow(feat_dim//2, conv_out_dim, kernel_size=3, padding=1),
torch_nn.ReLU(),
Conv2dForGlow(conv_out_dim, conv_out_dim, kernel_size=1, padding=0),
torch_nn.ReLU(),
ZeroInitConv2dForGlow(conv_out_dim, self.m_nn_outdim,
kernel_size=3, padding=1)
)
# no bias, normal initial weight
self.m_conv[0]._zerobias()
self.m_conv[0]._normal_weight()
self.m_conv[2]._zerobias()
self.m_conv[2]._normal_weight()
return
def _detjac(self, log_scale, factor=1):
# (batch, dim1, dim2, ..., feat_dim) -> (batch)
# sum over dim1, ... feat_dim
return sum_over_keep_batch(log_scale/factor)
def _nn_trans(self, y1):
if self.flag_affine:
log_scale, bias = self.m_conv(y1).chunk(2, -1)
# follow openai implementation
scale = torch.sigmoid(log_scale + 2)
log_scale = torch.log(scale)
else:
bias = self.m_conv(y1)
scale = torch.ones_like(y1)
log_scale = torch.zeros_like(y1)
return scale, bias, log_scale
def forward(self, y, factor=1):
"""AffineCoulingGlow(y)
input
-----
y: tensor, (batch, dim1, dim2, ..., feat_dim)
output
------
out: tensor, (batch, dim1, dim2, ..., feat_dim)
"""
# split
y1, y2 = y.chunk(2, -1)
scale, bias, log_scale = self._nn_trans(y1)
# transform
x1 = y1
x2 = (y2 + bias) * scale
# concatenate
x = torch.cat([x1, x2], dim=-1)
if self.flag_detjac:
return x, self._detjac(log_scale, factor)
else:
return x
def reverse(self, x):
# split
x1, x2 = x.chunk(2, -1)
# reverse transform
y1 = x1
scale, bias, log_scale = self._nn_trans(y1)
y2 = x2 / scale - bias
#
return torch.cat([y1, y2], dim=-1)
class SqueezeForGlow(torch_nn.Module):
"""SqueezeForGlow
Squeeze layer for Glow
See doc of __init__ for different operation modes
Example:
data = torch.randn([2, 4, 4, 3])
m_squeeze = SqueezeForGlow()
data_squeezed = m_squeeze(data)
data_unsqu = m_squeeze.reverse(data_squeezed)
print(data)
print(data_squeezed)
print(torch.std(data_unsqu - data))
print(data[0, :, :, 0])
print(data_squeezed[0, :, :, 0])
print(data_squeezed[0, :, :, 1])
print(data_squeezed[0, :, :, 2])
print(data_squeezed[0, :, :, 3])
"""
def __init__(self, mode = 1):
"""SqueezeForGlow(mode=1)
Args
----
mode: int, 1: for image
2: for audio
mode == 1:
(batch, height, width, channel)->(batch, height/2, width/2, channel*4)
"""
super(SqueezeForGlow, self).__init__()
self.m_mode = mode
return
def get_squeeze_factor(self):
if self.m_mode == 1:
# for image, the channel number will be compressed by 4
return 4
def forward(self, x):
"""
"""
if self.m_mode == 1:
# assume (batch, height, weight, channel)
if len(x.shape) != 4:
print("SqueezeForGlow(mode=1)")
print(", input should be (batch, height, weight, channel)")
sys.exit(1)
batch, height, width, channel = x.shape
# (batch, height, 2, width, 2, channel)
x_squeezed = x.view(batch, height // 2, 2, width // 2, 2, channel)
# (batch, height, width, channel * 2 * 2)
x_squeezed = x_squeezed.permute(0, 1, 3, 5, 2, 4).contiguous()
x_squeezed = x_squeezed.view(batch, height//2, width//2, channel*4)
else:
print("SqueezeForGlow not implemented")
return x_squeezed
def reverse(self, x_squeezed):
if self.m_mode == 1:
# assume (batch, height, weight, channel)
if len(x_squeezed.shape) != 4:
print("SqueezeForGlow(mode=1)")
print(", input should be (batch, height, weight, channel)")
sys.exit(1)
batch, height, width, channel = x_squeezed.shape
x = x_squeezed.view(batch, height, width, channel // 4, 2, 2)
# (batch, height * 2, width * 2, channel)
x = x.permute(0, 1, 4, 2, 5, 3).contiguous()
x = x.view(batch, height*2, width*2, channel//4)
else:
print("SqueezeForGlow not implemented")
return x
class PriorTransform(torch_nn.Module):
"""Prior transformation at the end of each Glow block
This is not written in paper but implemented in official code.
https://github.com/rosinality/glow-pytorch/issues/11
This is wrapper around the split operation. However, additional
affine transformation is included.
Given y,
If flag_split == True:
x, z_1 <- y.split()
z_0 <- (z_1 - f_bias(x)) / f_scale(x)
In native implementation, we can directly evaluate N(z_1; 0, I).
However, this block further converts z_1 -> z_0
If flag_split == False:
if flag_final_block == True:
z_1 <- y
z_0 <- (z_1 - f_bias(0)) / f_scale(0), final latent
x <- None , no input for the next Glowblock
else
x <- y , which is used for the next Glowblock
x <- (x - f_bias(0)) / f_scale(0), input to the next GlowBlock
z_0 <- None , no split output
"""
def __init__(self, feat_dim, flag_split, flag_final_block):
"""PriorTransform(feat_dim)
Args
----
feat_dim: int, feature dimension or channel number
input tensor should be (batch, dim1, dim2, ..., feat_dim)
image should be (batch, height, weight, feat_dim)
flag_split: bool, split or not split
flag_final_block: bool, whether this is the for the final block
"""
super(PriorTransform, self).__init__()
self.flag_split = flag_split
if flag_split:
self.m_nn = ZeroInitConv2dForGlow(feat_dim // 2, feat_dim)
else:
self.m_nn = ZeroInitConv2dForGlow(feat_dim, feat_dim * 2)
self.flag_final_block = flag_final_block
if flag_final_block and flag_split:
print("PriorTransform flag_split and flag_final_block are True")
print("This is unexpected. please check model definition")
sys.exit(1)
return
def _detjac(self, log_scale, factor=1):
# log|\prod 1/exp(log_scale)| = -\sum log_scale
# note that we should return a tensor (batch,)
return sum_over_keep_batch(-1 * log_scale / factor)
def forward(self, y, factor=1):
"""PriorTransform(y)
y -> H() -> [x, z_0]
input
-----
y: (batch, dim1, ..., feat_dim)
output
------
x: tensor or None, input to the next GlowBlock
z_0: tensor or None, latent variable for evaluating N(z_0; 0, I)
log_detjac: scalar
Note that
If self.flag_split==True, x, z_0 will (batch, dim1, ..., feat_dim//2)
If self.flag_split==False and self.flag_final_block==True:
x = None, which indicates no input for the next GlowBlock
z_0, (batch, dim1, ..., feat_dim)
If self.flag_split==False and self.flag_final_block==False:
z_0 = None, which indicates no latent output from this GlowBlock
x, (batch, dim1, ..., feat_dim), input to the next GlowBlock
"""
if not self.flag_split:
zeros = torch.zeros_like(y)
z_mean, z_log_std = self.m_nn(zeros).chunk(2, -1)
if self.flag_final_block:
# For z_1 <- y
# z_0 <- (z_1 - f_bias(zero)) / f_scale(zero)
# x <- None
z_0 = (y - z_mean) / torch.exp(z_log_std)
x = None
else:
# z_0 <- None
# x <- (z_1 - f_bias(zero)) / f_scale(zero)
z_0 = None
x = (y - z_mean) / torch.exp(z_log_std)
else:
# For x, z_1 <- y.split()
# z_0 <- (z_1 - f_bias(x)) / f_scale(x)
x, z_1 = y.chunk(2, -1)
z_mean, z_log_std = self.m_nn(x).chunk(2, -1)
z_0 = (z_1 - z_mean) / torch.exp(z_log_std)
return x, z_0, self._detjac(z_log_std, factor)
def reverse(self, x, z_out):
"""PriorTransform(y)
y <- H() <- x, z_0
input
-----
x: tensor or None
z_0: tensor or None
output
------
y: (batch, dim1, ..., feat_dim)
Note that
If self.flag_split==True
x, z_out should be (batch, dim1, ..., feat_dim//2)
If self.flag_split==False and self.flag_final_block==True:
x = None, which indicates no input for from the following GlowBlock
z_0, (batch, dim1, ..., feat_dim)
If self.flag_split==False and self.flag_final_block==False:
z_0 = None, which indicates no latent additional this GlowBlock
x, (batch, dim1, ..., feat_dim), input from the following GlowBlock
"""
if self.flag_split:
if x is not None:
z_mean, z_log_std = self.m_nn(x).chunk(2, -1)
z_tmp = z_out * torch.exp(z_log_std) + z_mean
y_tmp = torch.cat([x, z_tmp], -1)
else:
print("PriorTransform.reverse receives None")
sys.exit(1)
else:
if self.flag_final_block:
zeros = torch.zeros_like(z_out)
z_mean, z_log_std = self.m_nn(zeros).chunk(2, -1)
y_tmp = z_out * torch.exp(z_log_std) + z_mean
else:
zeros = torch.zeros_like(x)
z_mean, z_log_std = self.m_nn(zeros).chunk(2, -1)
y_tmp = x * torch.exp(z_log_std) + z_mean
return y_tmp
class FlowstepGlow(torch_nn.Module):
"""FlowstepGlow
One flow step in Glow
"""
def __init__(self, feat_dim, flag_affine=True, conv_coup_dim=512):
"""FlowstepGlow(feat_dim, flag_affine=True)
Args:
-----
feat_dim: int, dimension of input feature (channel number of image)
feat_dim must be an even number
flag_affine: bool, whether use affine or additive transformation in
AffineCouplingGlow layer (see AffineCouplingGlow)
default True.
conv_coup_dim: int, dimension of intermediate cnn layer in coupling
default 512, (see AffineCouplingGlow)
It assumes that y -> H(.) -> x, where H(.) is FlowstepGlow.forward
"""
super(FlowstepGlow, self).__init__()
self.flag_affine = flag_affine
# layers
self.m_actnorm = ActNorm(feat_dim, flag_detjac=True)
self.m_invtrans = InvertibleTrans(feat_dim, flag_detjac=True)
self.m_coupling = AffineCouplingGlow(feat_dim, conv_coup_dim,
flag_affine, flag_detjac=True)
return
def forward(self, y):
x_tmp, log_tmp1 = self.m_actnorm(y)
x_tmp, log_tmp2 = self.m_invtrans(x_tmp)
x_tmp, log_tmp3 = self.m_coupling(x_tmp)
return x_tmp, log_tmp1 + log_tmp2 + log_tmp3
def reverse(self, x):
# prevent accidental reverse during training
y_tmp = self.m_coupling.reverse(x)
y_tmp = self.m_invtrans.reverse(y_tmp)
y_tmp = self.m_actnorm.reverse(y_tmp)
return y_tmp
class GlowBlock(torch_nn.Module):
"""GlowBlock
One Glow block, squeeze + step_of_flow + (split), Fig2.(b) in original paper
Example:
m_glow = GlowBlock(3, num_flow_step=32)
data = torch.randn([2, 64, 64, 3])
x, z, detjac = m_glow(data)
m_glow.eval()
data_new = m_glow.reverse(x, z)
#print(m_glow.training)
#print(x, z)
print(torch.std(data_new - data))
"""
def __init__(self, feat_dim, num_flow_step=12, conv_coup_dim = 512,
flag_split=True, flag_final_block=False,
flag_affine=True, squeeze_mode=1):
"""GlowBlock(feat_dim, num_flow_step=12, conv_coup_dim = 512,
flag_split=True, flag_affine=True, squeeze_mode=1)
Args
----
feat_dim: int, dimension of input feature (channel number of image)
feat_dim must be an even number
num_flow_step: int, number of flow steps, default 12
conv_coup_dim: int, dimension of intermediate cnn layer in coupling
default 512, (see AffineCouplingGlow)
flag_split: bool, whether split out.
Last GlowBlock uses flag_split=False
default True
flag_final_block: bool, whether this is the final GlowBlock
default False
flag_affine: bool, whether use affine or additive transformation in
AffineCouplingGlow layer (see AffineCouplingGlow)
default True.
squeeze_mode: int, mode for squeeze, default 1 (see SqueezeForGlow)
"""
super(GlowBlock, self).__init__()
# squeeze
self.m_squeeze = SqueezeForGlow(squeeze_mode)
# number of feat-dim after sequeeze (other channels)
squeezed_feat_dim = feat_dim * self.m_squeeze.get_squeeze_factor()
# steps of flow
self.m_flow_steps = []
for i in range(num_flow_step):
self.m_flow_steps.append(
FlowstepGlow(squeezed_feat_dim, flag_affine, conv_coup_dim))
self.m_flow_steps = torch_nn.ModuleList(self.m_flow_steps)
# prior transform
self.flag_split = flag_split
self.flag_final_block = flag_final_block
if self.flag_final_block and self.flag_split:
print("GlowBlock flag_split and flag_final_block are True")
print("This is unexpected. Please check model definition")
sys.exit(1)
self.m_prior = PriorTransform(
squeezed_feat_dim, self.flag_split, self.flag_final_block)
return
def forward(self, y):
"""x, z, log_detjac = GlowBlock(y)
input
-----
y: tensor, (batch, height, width, channel)
output
------
x: tensor, (batch, height, width, channel//2),
z: tensor, (batch, height, width, channel//2),
log_detjac: tensor or scalar
For multi-scale glow, z is the whitenned noise
"""
log_detjac = 0
# squeeze
y_suqeezed = self.m_squeeze(y)
# flows
x_tmp = y_suqeezed
for m_flow in self.m_flow_steps:
x_tmp, log_detjac_tmp = m_flow(x_tmp)
log_detjac += log_detjac_tmp
# prior transform
x, z, log_detjac_tmp = self.m_prior(x_tmp)
log_detjac += log_detjac_tmp
# [x, z] should have the same size as input y_suqeezed
return x, z, log_detjac
def reverse(self, x, z):
"""
"""
# prior
x_tmp = self.m_prior.reverse(x, z)
# flow
for m_flow in self.m_flow_steps[::-1]:
x_tmp = m_flow.reverse(x_tmp)
# squeeze
y = self.m_squeeze.reverse(x_tmp)
return y
class Glow(torch_nn.Module):
"""Glow
"""
def __init__(self, feat_dim, flow_step_num=32, flow_block_num=4,
flag_affine=False, conv_coup_dim=512, squeeze_mode=1):
"""Glow(feat_dim, flow_step_num=32, flow_block_num=4,
flag_affine=True, conv_coup_dim=512, squeeze_mode=1)
Args
----
feat_dim: int, dimension of feature, or channel of input image
flow_step_num: int, number of flow steps per block, default 32
flow_block_num: int, number of flow blocks, default 4
flag_affine: bool, whether use affine transformation or not
default True, see AffineCouplingLayer
conv_coup_dim: int, channel size of intermediate conv layer in
coupling layer NN(). see AffineCouplingLayer
squeeze_mode: int, mode for suqeezing.
1 for image. See squeezeLayer
"""
super(Glow, self).__init__()
self.m_blocks = []
self.m_flag_splits = []
for i in range(flow_block_num):
# Whether the block uses split or not is completely determined by
# whether this block is the last block or not
# last block does not split output
flag_split = True if i < (flow_block_num - 1) else False
# save this information for generating random noise
self.m_flag_splits.append(flag_split)
# whether this is the final block
flag_final_block = True if i == (flow_block_num - 1) else False
self.m_blocks.append(
GlowBlock(
feat_dim * (2**i), flow_step_num, conv_coup_dim,
flag_split=flag_split, flag_final_block=flag_final_block,
flag_affine=flag_affine,
squeeze_mode=1))
self.m_blocks = torch_nn.ModuleList(self.m_blocks)
return
def _normal_lh(self, noise):
# likelihood of normal distribution on the given noise
return -0.5 * np.log(2 * np.pi) - 0.5 * noise ** 2
def forward(self, y):
"""Glow.forward(y)
Conducts y -> H(.) -> z, where z is supposed to be Gaussian noise
input
-----
y: tensor, (batch, dim1, dim2, ..., feat_dim)
for image, (batch, height, width, channel)
output
------
z: list of tensor, random noise from each block
neg_logp_y: scalar, - log p(y)
logp_z: scalar, -log N(z), averaged over batch and pixels
logdet: scalar, -|det dH(.)/dy|, averaged over batch and pixels
Because Glow uses multi-scale structure, z will be a list of noise
"""
batch_size = y.shape[0]
# for image, np.log(2) computes bit
# np.prod([dim for dim in y.shape[1:]]) is the image size in pixels
factor = np.log(2) * np.prod([dim for dim in y.shape[1:]])
z_bags = []
log_detjac = 0
log_pz = 0
h_tmp = y
for m_block in self.m_blocks:
h_tmp, z_tmp, log_detjac_tmp = m_block(h_tmp)
z_bags.append(z_tmp)
log_detjac += log_detjac_tmp / factor
# keep log_pz for each data in batch (batchsize,)
log_pz += sum_over_keep_batch(self._normal_lh(z_tmp)) / factor
# average over batch and pixels
neg_logp_y = -(log_pz + log_detjac).mean()
return z_bags, neg_logp_y, \
log_pz.mean(), log_detjac.mean()
def reverse(self, z_bags):
""" y = Glow.reverse(z_bags)
input
-----
z_bags: list of tensors
output
------
y: tensor, (batch, dim1, dim2, ..., feat_dim)
The random noise in z_bags should be compatible with the
model. You may use Glow.get_z_noises to retrieve a z_bags
"""
for i, (z, m_block) in enumerate(zip(z_bags[::-1],
self.m_blocks[::-1])):
if i == 0:
# the last block without split
y_tmp = m_block.reverse(None, z)
else:
y_tmp = m_block.reverse(y_tmp, z)
return y_tmp
def get_z_noises(self, image_size, noise_std=0.7, batchsize=16):
"""z_bags = Glow.get_z_noises(image_size, noise_std=0.7, batchsize=16)
Return a list of random noises for random sampling
input
-----
image_size: int, size of the image, assume image is square,
this number just specifies the height / width
noise_std: float, std of Gaussian noise, default 0.7
batchsize: int, batch size of this random data, default 16
output
------
z_bags: list of tensors
Shape of the random noise in z_bags is decided by Glow configuration.
Glow.reverse(z_bags) can be used to produce image from this z_bags
"""
device = next(self.parameters()).device
z_bags = []
tmp_im_size = image_size
tmp_chan = 3
for flag_split in self.m_flag_splits:
if flag_split:
tmp_im_size = tmp_im_size // 2
tmp_chan = tmp_chan * 2
else:
tmp_im_size = tmp_im_size // 2
tmp_chan = tmp_chan * 4
z_tmp = torch.randn([batchsize, tmp_im_size, tmp_im_size, tmp_chan],
dtype=nii_io_conf.d_dtype, device=device)
z_bags.append(z_tmp * noise_std)
return z_bags
if __name__ == "__main__":
print("Definition of Glow and its components")
| 36,862 | 33.809254 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/block_nsf.py | ##!/usr/bin/env python
"""
Major blocks defined for NSF
These blocks are originall defined in ../project/01_nsf/*/.model.py
Definition are gathered here for convience.
CondModule, SourceModule, and FilterModule are not copied here since
they may change according to the model for certain application
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
from scipy import signal as scipy_signal
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import torch.nn.init as torch_init
import sandbox.block_nn as nii_nn
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
#######
# Neural filter block
#######
class NeuralFilterBlock(torch_nn.Module):
""" Wrapper over a single filter block
NeuralFilterBlock(signal_size, hidden_size, kernel_size, conv_num=10)
args
----
signal_size: int, input signal is in shape (batch, length, signal_size)
hidden_size: int, output of conv layers is (batch, length, hidden_size)
kernel_size: int, kernel size of the conv layers
conv_num: number of conv layers in this neural filter block (default 10)
legacy_scale: Bool, whether load scale as parameter or magic number
To be compatible with old models that defines self.scale
No impact on the result, just different ways to load a
fixed self.scale
"""
def __init__(self, signal_size, hidden_size, kernel_size=3, conv_num=10,
legacy_scale = False):
super(NeuralFilterBlock, self).__init__()
self.signal_size = signal_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.conv_num = conv_num
self.dilation_size = [np.power(2, x) for x in np.arange(conv_num)]
# ff layer to expand dimension
self.l_ff_1 = torch_nn.Linear(signal_size, hidden_size, \
bias=False)
self.l_ff_1_tanh = torch_nn.Tanh()
# dilated conv layers
tmp = [nii_nn.Conv1dKeepLength(hidden_size, hidden_size, x, \
kernel_size, causal=True, bias=False) \
for x in self.dilation_size]
self.l_convs = torch_nn.ModuleList(tmp)
# ff layer to de-expand dimension
self.l_ff_2 = torch_nn.Linear(hidden_size, hidden_size//4,
bias=False)
self.l_ff_2_tanh = torch_nn.Tanh()
self.l_ff_3 = torch_nn.Linear(hidden_size//4, signal_size,
bias=False)
self.l_ff_3_tanh = torch_nn.Tanh()
# a simple scale: to be consistent with CURRENNT implementation
if legacy_scale:
# in case this scale is defined as model parameter in
# some old models
self.scale = torch_nn.Parameter(
torch.tensor([0.1]), requires_grad=False)
else:
# simple hyper-parameter should be OK
self.scale = 0.1
return
def forward(self, signal, context):
"""
input
-----
signal (batchsize, length, signal_size)
context (batchsize, length, hidden_size)
context is produced from the condition module
output
------
output: (batchsize, length, signal_size)
"""
# expand dimension
tmp_hidden = self.l_ff_1_tanh(self.l_ff_1(signal))
# loop over dilated convs
# output of a d-conv is input + context + d-conv(input)
for l_conv in self.l_convs:
tmp_hidden = tmp_hidden + l_conv(tmp_hidden) + context
# to be consistent with legacy configuration in CURRENNT
tmp_hidden = tmp_hidden * self.scale
# compress the dimesion and skip-add
tmp_hidden = self.l_ff_2_tanh(self.l_ff_2(tmp_hidden))
tmp_hidden = self.l_ff_3_tanh(self.l_ff_3(tmp_hidden))
output_signal = tmp_hidden + signal
return output_signal
############################
# Source signal generator
############################
class SineGen(torch_nn.Module):
""" Definition of sine generator
SineGen(samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False)
args
----
samp_rate: flaot, sampling rate in Hz
harmonic_num: int, number of harmonic overtones (default 0, i.e., only F0)
sine_amp: float, amplitude of sine-wavefrom (default 0.1)
noise_std: float, std of Gaussian noise (default 0.003)
voiced_threshold: int, F0 threshold for U/V classification (default 0)
F0 < voiced_threshold will be set as unvoiced regions
flag_for_pulse: Bool, whether this SinGen is used inside PulseGen
(default False)
Note: when flag_for_pulse is True, the first time step of a voiced
segment is always sin(np.pi) or cos(0)
"""
def __init__(self, samp_rate, harmonic_num = 0, sine_amp = 0.1,
noise_std = 0.003, voiced_threshold = 0, flag_for_pulse=False):
super(SineGen, self).__init__()
self.sine_amp = sine_amp
self.noise_std = noise_std
self.harmonic_num = harmonic_num
self.dim = self.harmonic_num + 1
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
self.flag_for_pulse = flag_for_pulse
return
def _f02uv(self, f0):
# generate uv signal
uv = torch.ones_like(f0)
uv = uv * (f0 > self.voiced_threshold)
return uv
def _f02sine(self, f0_values):
"""
input
-----
f0_values: (batchsize, length_in_time, dim)
where dim is the number of fundamental tone plus harmonic overtones
f0_values are supposed to be up-sampled. In other words, length should
be equal to the number of waveform sampling points.
output
------
sine_values: (batchsize, length_in_times, dim)
sine_values[i, :, k] is decided by the F0s in f0_values[i, :, k]
"""
# convert to F0 in rad. The interger part n can be ignored
# because 2 * np.pi * n doesn't affect phase
rad_values = (f0_values / self.sampling_rate) % 1
# initial phase noise (no noise for fundamental component)
rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2],\
device = f0_values.device)
rand_ini[:, 0] = 0
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
if not self.flag_for_pulse:
# for normal case
# To prevent torch.cumsum numerical overflow,
# it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
# Buffer tmp_over_one_idx indicates the time step to add -1.
# This will not change F0 of sine because (x-1) * 2*pi = x *2*pi
tmp_over_one = torch.cumsum(rad_values, 1) % 1
tmp_over_one_idx = (tmp_over_one[:, 1:, :] -
tmp_over_one[:, :-1, :]) < 0
cumsum_shift = torch.zeros_like(rad_values)
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1)
* 2 * np.pi)
else:
# If necessary, make sure that the first time step of every
# voiced segments is sin(pi) or cos(0)
# This is used for pulse-train generation
# identify the last time step in unvoiced segments
uv = self._f02uv(f0_values)
uv_1 = torch.roll(uv, shifts=-1, dims=1)
uv_1[:, -1, :] = 1
u_loc = (uv < 1) * (uv_1 > 0)
# get the instantanouse phase
tmp_cumsum = torch.cumsum(rad_values, dim=1)
# different batch needs to be processed differently
for idx in range(f0_values.shape[0]):
temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
# stores the accumulation of i.phase within
# each voiced segments
tmp_cumsum[idx, :, :] = 0
tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
# rad_values - tmp_cumsum: remove the accumulation of i.phase
# within the previous voiced segment.
i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
# get the sines
sines = torch.cos(i_phase * 2 * np.pi)
return sines
def forward(self, f0):
""" sine_tensor, uv = forward(f0)
input
-----
F0: tensor, in shape (batchsize, length, dim=1)
up-sampled F0, length should be equal to the waveform length
Input F0 should be discontinuous.
F0 for unvoiced steps should be 0
output
------
sine_tensor: tensor, (batchsize, length, output_dim)
output uv: tensor, (batchsize, length, 1)
noise: tensor, (batchsize, length, 1)
note that output_dim = 1 + harmonic_num
"""
with torch.no_grad():
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \
device=f0.device)
# fundamental component
f0_buf[:, :, 0] = f0[:, :, 0]
for idx in np.arange(self.harmonic_num):
# idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
f0_buf[:, :, idx+1] = f0_buf[:, :, 0] * (idx+2)
# generate sine waveforms
sine_waves = self._f02sine(f0_buf) * self.sine_amp
# generate uv signal
#uv = torch.ones(f0.shape)
#uv = uv * (f0 > self.voiced_threshold)
uv = self._f02uv(f0)
# noise: for unvoiced should be similar to sine_amp
# std = self.sine_amp/3 -> max value ~ self.sine_amp
#. for voiced regions is self.noise_std
noise_amp = uv * self.noise_std + (1-uv) * self.sine_amp / 3
noise = noise_amp * torch.randn_like(sine_waves)
# first: set the unvoiced part to 0 by uv
# then: additive noise
sine_waves = sine_waves * uv + noise
return sine_waves, uv, noise
class PulseGen(torch_nn.Module):
""" Definition of Pulse train generator
There are many ways to implement pulse generator.
Here, PulseGen is based on SinGen.
This is used in cyclic-noise NSF
"""
def __init__(self, samp_rate, pulse_amp = 0.1,
noise_std = 0.003, voiced_threshold = 0):
super(PulseGen, self).__init__()
self.pulse_amp = pulse_amp
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
self.noise_std = noise_std
self.l_sinegen = SineGen(self.sampling_rate, harmonic_num=0,\
sine_amp=self.pulse_amp, noise_std=0,\
voiced_threshold=self.voiced_threshold,\
flag_for_pulse=True)
def forward(self, f0):
""" Pulse train generator
pulse_train, uv = forward(f0)
input
-----
F0: tensor, (batchsize, length, dim=1)
up-sampled F0
f0 for unvoiced steps should be 0
length should be equal to the expected waveform length
output
------
pulse_train: tensor, (batchsize, length, dim)
sine_wave: tensor, (batchsize, length, dim), sine waveform that
is used to derive the pulse train
uv: tensor, (batchsize, length, 1), u/v flag
pulse_noise: tensor, (batchsize, length, dim), additive noise in
pulse_train
"""
with torch.no_grad():
sine_wav, uv, noise = self.l_sinegen(f0)
# sine without additive noise
pure_sine = sine_wav - noise
# step t corresponds to a pulse if
# sine[t] > sine[t+1] & sine[t] > sine[t-1]
# & sine[t-1], sine[t+1], and sine[t] are voiced
# or
# sine[t] is voiced, sine[t-1] is unvoiced
# we use torch.roll to simulate sine[t+1] and sine[t-1]
sine_1 = torch.roll(pure_sine, shifts=1, dims=1)
uv_1 = torch.roll(uv, shifts=1, dims=1)
uv_1[:, 0, :] = 0
sine_2 = torch.roll(pure_sine, shifts=-1, dims=1)
uv_2 = torch.roll(uv, shifts=-1, dims=1)
uv_2[:, -1, :] = 0
loc = (pure_sine > sine_1) * (pure_sine > sine_2) \
* (uv_1 > 0) * (uv_2 > 0) * (uv > 0) \
+ (uv_1 < 1) * (uv > 0)
# pulse train without noise
pulse_train = pure_sine * loc
# additive noise to pulse train
# note that noise from sinegen is zero in voiced regions
pulse_noise = torch.randn_like(pure_sine) * self.noise_std
# with additive noise on pulse, and unvoiced regions
pulse_train += pulse_noise * loc + pulse_noise * (1 - uv)
return pulse_train, sine_wav, uv, pulse_noise
if __name__ == "__main__":
print("Definition of major components in NSF")
| 13,906 | 37.311295 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/dist.py | #!/usr/bin/env python
"""
util_dist.py
Utilities for probabilistic distribution
Code adapted from
https://github.com/fatchord/WaveRNN/blob/master/utils/distribution.py
I haven't used these codes before.
To be moved to core_scripts/math_tools
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def log_sum_exp(x):
""" Adapted from
https://github.com/fatchord/WaveRNN/blob/master/utils/distribution.py
numerically stable log_sum_exp implementation that prevents overflow """
# TF ordering
axis = len(x.size()) - 1
m, _ = torch.max(x, dim=axis)
m2, _ = torch.max(x, dim=axis, keepdim=True)
return m + torch.log(torch.sum(torch.exp(x - m2), dim=axis))
def discretized_mix_logistic_loss(y_hat, y, num_classes=65536,
log_scale_min=None, reduce=True):
""" Adapted from
https://github.com/fatchord/WaveRNN/blob/master/utils/distribution.py
https://github.com/r9y9/wavenet_vocoder/blob/master/wavenet_vocoder/mixture.py
"""
if log_scale_min is None:
log_scale_min = float(np.log(1e-14))
y_hat = y_hat.permute(0,2,1)
assert y_hat.dim() == 3
assert y_hat.size(1) % 3 == 0
nr_mix = y_hat.size(1) // 3
# (B x T x C)
y_hat = y_hat.transpose(1, 2)
# unpack parameters. (B, T, num_mixtures) x 3
logit_probs = y_hat[:, :, :nr_mix]
means = y_hat[:, :, nr_mix:2 * nr_mix]
log_scales = torch.clamp(y_hat[:, :, 2 * nr_mix:3 * nr_mix],
min=log_scale_min)
# B x T x 1 -> B x T x num_mixtures
y = y.expand_as(means)
centered_y = y - means
inv_stdv = torch.exp(-log_scales)
plus_in = inv_stdv * (centered_y + 1. / (num_classes - 1))
cdf_plus = torch.sigmoid(plus_in)
min_in = inv_stdv * (centered_y - 1. / (num_classes - 1))
cdf_min = torch.sigmoid(min_in)
# log probability for edge case of 0 (before scaling)
# equivalent: torch.log(F.sigmoid(plus_in))
log_cdf_plus = plus_in - F.softplus(plus_in)
# log probability for edge case of 255 (before scaling)
# equivalent: (1 - F.sigmoid(min_in)).log()
log_one_minus_cdf_min = -F.softplus(min_in)
# probability for all other cases
cdf_delta = cdf_plus - cdf_min
mid_in = inv_stdv * centered_y
# log probability in the center of the bin, to be used in extreme cases
# (not actually used in our code)
log_pdf_mid = mid_in - log_scales - 2. * F.softplus(mid_in)
# tf equivalent
"""
log_probs = tf.where(x < -0.999, log_cdf_plus,
tf.where(x > 0.999, log_one_minus_cdf_min,
tf.where(cdf_delta > 1e-5,
tf.log(tf.maximum(cdf_delta, 1e-12)),
log_pdf_mid - np.log(127.5))))
"""
# TODO: cdf_delta <= 1e-5 actually can happen. How can we choose the value
# for num_classes=65536 case? 1e-7? not sure..
inner_inner_cond = (cdf_delta > 1e-5).float()
inner_inner_out = inner_inner_cond * \
torch.log(torch.clamp(cdf_delta, min=1e-12)) + \
(1. - inner_inner_cond) * (log_pdf_mid - np.log((num_classes - 1) / 2))
inner_cond = (y > 0.999).float()
inner_out = inner_cond * log_one_minus_cdf_min + \
(1. - inner_cond) * inner_inner_out
cond = (y < -0.999).float()
log_probs = cond * log_cdf_plus + (1. - cond) * inner_out
log_probs = log_probs + F.log_softmax(logit_probs, -1)
if reduce:
return -torch.mean(log_sum_exp(log_probs))
else:
return -log_sum_exp(log_probs).unsqueeze(-1)
def sample_from_discretized_mix_logistic(y, log_scale_min=None):
"""
Sample from discretized mixture of logistic distributions
Args:
y (Tensor): B x C x T
log_scale_min (float): Log scale minimum value
Returns:
Tensor: sample in range of [-1, 1].
"""
if log_scale_min is None:
log_scale_min = float(np.log(1e-14))
assert y.size(1) % 3 == 0
nr_mix = y.size(1) // 3
# B x T x C
y = y.transpose(1, 2)
logit_probs = y[:, :, :nr_mix]
# sample mixture indicator from softmax
temp = logit_probs.data.new(logit_probs.size()).uniform_(1e-5, 1.0 - 1e-5)
temp = logit_probs.data - torch.log(- torch.log(temp))
_, argmax = temp.max(dim=-1)
# (B, T) -> (B, T, nr_mix)
one_hot = F.one_hot(argmax, nr_mix).float()
# select logistic parameters
means = torch.sum(y[:, :, nr_mix:2 * nr_mix] * one_hot, dim=-1)
log_scales = torch.clamp(torch.sum(
y[:, :, 2 * nr_mix:3 * nr_mix] * one_hot, dim=-1), min=log_scale_min)
# sample from logistic & clip to interval
# we don't actually round to the nearest 8bit value when sampling
u = means.data.new(means.size()).uniform_(1e-5, 1.0 - 1e-5)
x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1. - u))
x = torch.clamp(torch.clamp(x, min=-1.), max=1.)
return x
if __name__ == "__main__":
print("dist")
| 5,244 | 32.196203 | 82 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/block_rawnet.py | #!/usr/bin/env python
"""
This file contains code for RawNet2
Hemlata Tak, Jose Patino, Massimiliano Todisco, Andreas Nautsch,
Nicholas Evans, and Anthony Larcher. End-to-End Anti-Spoofing with RawNet2.
In Proc. ICASSP, 6369--6373. 2020.
Implementation based on RawNet in
https://github.com/asvspoof-challenge/2021/
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torchaudio
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import core_scripts.other_tools.debug as nii_debug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
class SincConv2(torch_nn.Module):
"""
"""
@staticmethod
def to_mel(hz):
return 2595 * np.log10(1 + hz / 700)
@staticmethod
def to_hz(mel):
return 700 * (10 ** (mel / 2595) - 1)
def __init__(self, num_filters, kernel_size, in_channels=1,
sample_rate = 16000, num_freq_bin = 257,
stride = 1, dilation = 1,
flag_pad = True, flag_trainable=False):
"""
SincConv2(num_filters, kernel_size, in_channels=1,
sample_rate = 16000, num_freq_bins = 257,
stride = 1, dilation = 1,
flag_pad = True, flag_trainable=False)
Args
----
num_filters: int, number of sinc-filters
kernel_size: int, length of each sinc-filter
in_channels: int, dimension of input signal,
(batchsize, length, in_channels)
sample_rate: int, sampling rate
num_freq_bin: number of frequency bins, not really important
here. Default 257
stride: int, stride of convoluiton, default 1
dilation: int, dilaion of conv, default 1
flag_pad: bool, whether pad the sequence to make input and
output have equal length, default True
flag_trainable: bool, whether the filter is trainable
default False
Num_filters and in_channels decide the output tensor dimension
If input is (batchsize, length, in_channels), output will be
(batchsize, length, in_channels * num_filters)
This is done through depwise convolution,
https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
i.e., each input dimension will go through all the num_filters.
"""
super(SincConv2,self).__init__()
self.m_out_channels = num_filters
self.m_in_channels = in_channels
self.m_sample_rate=sample_rate
# Forcing the filters to be odd (i.e, perfectly symmetrics)
self.m_kernel_size = kernel_size
if kernel_size % 2 == 0:
self.m_kernel_size = self.m_kernel_size + 1
self.m_stride = stride
self.m_dilation = dilation
# Pad to original length
if flag_pad:
self.m_padding = dilation * (self.m_kernel_size - 1) + 1 - stride
if stride % 2 == 0:
print("Warning: padding in SincCov is not perfect because of")
print("stride {:d}".format(stride))
self.m_padding = self.m_padding // 2
else:
self.m_padding = 0
# initialize filterbanks using Mel scale
f = int(self.m_sample_rate / 2) * np.linspace(0, 1, num_freq_bin)
# Hz to mel conversion
fmel = self.to_mel(f)
fmelmax = np.max(fmel)
fmelmin = np.min(fmel)
filbandwidthsmel = np.linspace(fmelmin, fmelmax, self.m_out_channels+1)
# Mel to Hz conversion
filbandwidthsf = self.to_hz(filbandwidthsmel)
# mel band
self.m_mel = filbandwidthsf
# time index
self.m_hsupp = torch.arange(-(self.m_kernel_size-1)/2,
(self.m_kernel_size-1)/2+1)
# filter coeffs
self.m_filters = torch.zeros(self.m_out_channels, self.m_kernel_size)
# create filter coefficient
for i in range(self.m_out_channels):
fmin = self.m_mel[i]
fmax = self.m_mel[i+1]
hHigh = np.sinc(2 * fmax * self.m_hsupp / self.m_sample_rate)
hHigh = (2 * fmax / self.m_sample_rate) * hHigh
hLow = np.sinc(2 * fmin * self.m_hsupp / self.m_sample_rate)
hLow = (2 * fmin / self.m_sample_rate) * hLow
# band pass filters
hideal = hHigh - hLow
# applying windowing
self.m_filters[i,:] = torch.tensor(
np.hamming(self.m_kernel_size) * hideal)
# repeat to (output_channels * in_channels)
self.m_filters = self.m_filters.repeat(self.m_in_channels, 1)
# save as model parameter
self.m_filters = self.m_filters.view(
self.m_out_channels * self.m_in_channels, 1, self.m_kernel_size)
self.m_filters = torch_nn.Parameter(
self.m_filters, requires_grad=flag_trainable)
return
def forward(self,x):
"""SincConv(x)
input
-----
x: tensor, shape (batchsize, length, feat_dim)
output
------
y: tensor, shape (batchsize, length, output-channel)
"""
return torch_nn_func.conv1d(
x.permute(0, 2, 1), self.m_filters, stride=self.m_stride,
padding=self.m_padding, dilation=self.m_dilation,
bias=None, groups=x.shape[-1]).permute(0, 2, 1)
class FMS(torch_nn.Module):
"""filter-wise feature map scaling
Hemlata Tak, Jose Patino, Massimiliano Todisco, Andreas Nautsch,
Nicholas Evans, and Anthony Larcher.
End-to-End Anti-Spoofing with RawNet2.
In Proc. ICASSP, 6369--6373. 2020.
Example:
l_fms = FMS(5)
with torch.no_grad():
data = torch.randn(2, 1000, 5)
out = l_fms(data)
"""
def __init__(self, feat_dim):
"""FMS(feat_dim)
Args
----
feat_dim: int, dimension of input, in shape (batch, length, dim)
"""
super(FMS, self).__init__()
self.m_dim = feat_dim
self.m_pooling = torch_nn.AdaptiveAvgPool1d(1)
self.m_dim_change = torch_nn.Linear(feat_dim, feat_dim)
self.m_act = torch_nn.Sigmoid()
return
def forward(self, x):
"""FMS(x)
input
-----
x: tensor, (batch, length, dim)
output
-----
y: tensor, (batch, length, dim)
"""
if x.shape[-1] != self.m_dim:
print("FMS expects data of dim {:d}".format(self.m_dim))
sys.exit(1)
# pooling expects (batch, dim, length)
# y will be (batch, dim, 1)
y = self.m_pooling(x.permute(0, 2, 1))
# squeeze to (batch, dim), unsqueeze to (batch, 1, dim, )
y = self.m_act(self.m_dim_change(y.squeeze(-1))).unsqueeze(1)
# scaling and shifting
return (x * y + y)
class Residual_block(torch_nn.Module):
"""Residual block used in RawNet2 for Anti-spoofing
"""
def __init__(self, nb_filts, flag_bn_input = False):
"""Residual_block(bn_filts, flga_bn_input)
Args
----
bn_filts: list of int, [input_channel, output_channel]
flag_bn_input: bool, whether do BatchNorm and LReLU
default False
"""
super(Residual_block, self).__init__()
# whether batch normalize input
if flag_bn_input:
self.bn1 = torch_nn.Sequential(
torch_nn.BatchNorm1d(num_features = nb_filts[0]),
torch_nn.LeakyReLU(negative_slope=0.3))
else:
self.bn1 = None
self.conv = torch_nn.Sequential(
torch_nn.Conv1d(in_channels = nb_filts[0],
out_channels = nb_filts[1],
kernel_size = 3,
padding = 1,
stride = 1),
torch_nn.BatchNorm1d(num_features = nb_filts[1]),
torch_nn.Conv1d(in_channels = nb_filts[1],
out_channels = nb_filts[1],
padding = 1,
kernel_size = 3,
stride = 1)
)
# for dimension change
if nb_filts[0] != nb_filts[1]:
self.dim_change = torch_nn.Conv1d(
in_channels = nb_filts[0],
out_channels = nb_filts[1],
padding = 0,
kernel_size = 1,
stride = 1)
else:
self.dim_change = None
# maxpooling
self.mp = torch_nn.MaxPool1d(3)
return
def forward(self, x):
""" y= Residual_block(x)
input
-----
x: tensor, (batchsize, length, dim)
output
------
y: tensor, (batchsize, length, dim2)
"""
identity = x.permute(0, 2, 1)
if self.bn1 is None:
out = x.permute(0, 2, 1)
else:
out = self.bn1(x.permute(0, 2, 1))
out = self.conv(out)
if self.dim_change is not None:
identity = self.dim_change(identity)
out += identity
out = self.mp(out)
return out.permute(0, 2, 1)
class RawNet(torch_nn.Module):
"""RawNet based on
https://github.com/asvspoof-challenge/2021/
"""
def __init__(self, num_sinc_filter, sinc_filter_len, in_dim, sampling_rate,
res_ch_1, res_ch_2, gru_node, gru_layer, emb_dim, num_class):
super(RawNet, self).__init__()
# sinc filter layer
self.m_sinc_conv = SincConv2(
num_sinc_filter,
kernel_size = sinc_filter_len,
in_channels = in_dim,
sample_rate = sampling_rate,
flag_pad = False,
flag_trainable=False)
# res block group
self.m_resgroup = torch_nn.Sequential(
nii_nn.BatchNorm1DWrapper(num_sinc_filter),
torch_nn.SELU(),
Residual_block([num_sinc_filter, res_ch_1], flag_bn_input=False),
FMS(res_ch_1),
Residual_block([res_ch_1, res_ch_1], flag_bn_input=True),
FMS(res_ch_1),
Residual_block([res_ch_1, res_ch_2], flag_bn_input=True),
FMS(res_ch_2),
Residual_block([res_ch_2, res_ch_2], flag_bn_input=True),
FMS(res_ch_2),
Residual_block([res_ch_2, res_ch_2], flag_bn_input=True),
FMS(res_ch_2),
Residual_block([res_ch_2, res_ch_2], flag_bn_input=True),
FMS(res_ch_2),
)
# GRU part
self.m_before_gru = torch_nn.Sequential(
nii_nn.BatchNorm1DWrapper(res_ch_2),
torch_nn.SELU()
)
self.m_gru = torch_nn.GRU(input_size = res_ch_2,
hidden_size = gru_node,
num_layers = gru_layer,
batch_first = True)
self.m_emb = torch_nn.Linear(in_features = gru_node,
out_features = emb_dim)
# output score
self.m_output = torch_nn.Linear(in_features = emb_dim,
out_features = num_class,
bias=True)
#
self.logsoftmax = torch_nn.LogSoftmax(dim=1)
return
def _compute_embedding(self, x):
"""
input
-----
x: tensor, (batch, length, dim)
output
------
y: tensor, (batch, emb_dim)
"""
batch, length, dim = x.shape
#
x = self.m_sinc_conv(x)
x = self.m_resgroup(x)
x, _ = self.m_gru(self.m_before_gru(x))
return self.m_emb(x[:, -1, :])
def _compute_score(self, emb, inference=True):
"""
input
-----
emb: tensor, (batch, emb_dim)
output
------
score: tensor, (batch, num_class)
Score here refers to
"""
# we should not use logsoftmax if we will use CrossEntropyLoss
flag_logsoftmax = False
if inference:
# no softmax
return self.m_output(emb)
elif flag_logsoftmax:
# Logsoftmax for training loss
# this is used when the training criterion is NLLoss
return self.logsoftmax(self.m_output(emb))
else:
return self.m_output(emb)
def forward(self, x):
"""
input
-----
x: tensor, (batch, length, dim)
output
------
y: tensor, (batch, num_class)
y is the log-probablity after softmax
"""
emb = self._compute_embedding(x)
return self._compute_score(emb, inference=False)
def inference(self, x):
"""
input
-----
x: tensor, (batch, length, dim)
output
------
y: tensor, (batch, num_class)
y is the input activation to softmax
"""
emb = self._compute_embedding(x)
return self._compute_score(emb, inference=True)
if __name__ == "__main__":
print("Definition of RawNet2")
| 13,819 | 31.28972 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/util_frontend.py | #!/usr/bin/env python
"""
util_frontend.py
Utilities for frontend feature extraction
It includes:
LFCC: based on asvspoof.org baseline matlab code
LFB: Linear filterbank feature
Chen, T., Kumar, A., Nagarsheth, P., Sivaraman, G. & Khoury, E.
Generalization of Audio Deepfake Detection. in Proc. Odyssey 132-137
(2020). doi:10.21437/Odyssey.2020-19
According to the author's email:
LFB = np.log(FilterBank(Amplitude(STFT(x))))
There is no DCT. But it does have logarithm.
Implemented based on LFCC API
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.util_dsp as nii_dsp
import core_scripts.data_io.conf as nii_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
##################
## other utilities
##################
def stft_wrapper(x, fft_n, frame_shift, frame_length, window,
pad_mode="constant", return_complex=False):
"""Due to the different signature of torch.stft, write a
wrapper to handle this
input
-----
x: tensor, waveform, (batch, length)
window: tensor, window coef, (frame_length, )
output
------
tensor: (batch, frame_num, bin_num, 2)
"""
# there are better ways, but for convenience
if torch.__version__.split('.')[1].isnumeric() and \
int(torch.__version__.split('.')[1]) < 7:
# torch 1.6.*
return torch.stft(x, fft_n, frame_shift, frame_length,
window=window, onesided=True, pad_mode=pad_mode)
else:
# torch > 1.7
return torch.stft(x, fft_n, frame_shift, frame_length,
window=window, onesided=True, pad_mode=pad_mode,
return_complex=return_complex)
def istft_wrapper(x, fft_n, frame_shift, frame_length, window,
pad_mode="constant"):
# there are better ways, but for convenience
if torch.__version__.split('.')[1].isnumeric() and \
int(torch.__version__.split('.')[1]) < 7:
# torch 1.6.*
return torch.istft(x, fft_n, frame_shift, frame_length,
window=window, onesided=True, pad_mode=pad_mode)
else:
# torch > 1.7
return torch.istft(x, fft_n, frame_shift, frame_length,
window=window, onesided=True)
def trimf(x, params):
"""
trimf: similar to Matlab definition
https://www.mathworks.com/help/fuzzy/trimf.html?s_tid=srchtitle
"""
if len(params) != 3:
print("trimp requires params to be a list of 3 elements")
sys.exit(1)
a = params[0]
b = params[1]
c = params[2]
if a > b or b > c:
print("trimp(x, [a, b, c]) requires a<=b<=c")
sys.exit(1)
y = torch.zeros_like(x, dtype=nii_conf.d_dtype)
if a < b:
index = torch.logical_and(a < x, x < b)
y[index] = (x[index] - a) / (b - a)
if b < c:
index = torch.logical_and(b < x, x < c)
y[index] = (c - x[index]) / (c - b)
y[x == b] = 1
return y
def delta(x):
""" By default
input
-----
x (batch, Length, dim)
output
------
output (batch, Length, dim)
Delta is calculated along Length dimension
"""
length = x.shape[1]
output = torch.zeros_like(x)
x_temp = torch_nn_func.pad(x.unsqueeze(1), (0, 0, 1, 1),
'replicate').squeeze(1)
output = -1 * x_temp[:, 0:length] + x_temp[:,2:]
return output
def linear_fb(fn, sr, filter_num):
"""linear_fb(fn, sr, filter_num)
create linear filter bank based on trim
input
-----
fn: int, FFT points
sr: int, sampling rate (Hz)
filter_num: int, number of filters in filter-bank
output
------
fb: tensor, (fn//2+1, filter_num)
Note that this filter bank is supposed to be used on
spectrum of dimension fn//2+1.
See example in LFCC.
"""
# build the triangle filter bank
f = (sr / 2) * torch.linspace(0, 1, fn//2+1)
filter_bands = torch.linspace(min(f), max(f), filter_num+2)
filter_bank = torch.zeros([fn//2+1, filter_num])
for idx in range(filter_num):
filter_bank[:, idx] = trimf(
f, [filter_bands[idx],
filter_bands[idx+1],
filter_bands[idx+2]])
return filter_bank
#################
## LFCC front-end
#################
class LFCC(torch_nn.Module):
""" Based on asvspoof.org baseline Matlab code.
Difference: with_energy is added to set the first dimension as energy
"""
def __init__(self, fl, fs, fn, sr, filter_num,
with_energy=False, with_emphasis=True,
with_delta=True, flag_for_LFB=False,
num_coef=None, min_freq=0, max_freq=1):
""" Initialize LFCC
Para:
-----
fl: int, frame length, (number of waveform points)
fs: int, frame shift, (number of waveform points)
fn: int, FFT points
sr: int, sampling rate (Hz)
filter_num: int, number of filters in filter-bank
with_energy: bool, (default False), whether replace 1st dim to energy
with_emphasis: bool, (default True), whether pre-emphaze input wav
with_delta: bool, (default True), whether use delta and delta-delta
for_LFB: bool (default False), reserved for LFB feature
num_coef: int or None, number of coeffs to be taken from filter bank.
Note that this is only used for LFCC, i.e., for_LFB=False
When None, num_coef will be equal to filter_num
min_freq: float (default 0), min_freq * sr // 2 will be the minimum
frequency of extracted FFT spectrum
max_freq: float (default 1), max_freq * sr // 2 will be the maximum
frequency of extracted FFT spectrum
"""
super(LFCC, self).__init__()
self.fl = fl
self.fs = fs
self.fn = fn
self.sr = sr
self.filter_num = filter_num
self.num_coef = num_coef
# decide the range of frequency bins
if min_freq >= 0 and min_freq < max_freq and max_freq <= 1:
self.min_freq_bin = int(min_freq * (fn//2+1))
self.max_freq_bin = int(max_freq * (fn//2+1))
self.num_fft_bins = self.max_freq_bin - self.min_freq_bin
else:
print("LFCC cannot work with min_freq {:f} and max_freq {:}".format(
min_freq, max_freq))
sys.exit(1)
# build the triangle filter bank
f = (sr / 2) * torch.linspace(min_freq, max_freq, self.num_fft_bins)
filter_bands = torch.linspace(min(f), max(f), filter_num+2)
filter_bank = torch.zeros([self.num_fft_bins, filter_num])
for idx in range(filter_num):
filter_bank[:, idx] = trimf(
f, [filter_bands[idx],
filter_bands[idx+1],
filter_bands[idx+2]])
self.lfcc_fb = torch_nn.Parameter(filter_bank, requires_grad=False)
# DCT as a linear transformation layer
self.l_dct = nii_dsp.LinearDCT(filter_num, 'dct', norm='ortho')
# opts
self.with_energy = with_energy
self.with_emphasis = with_emphasis
self.with_delta = with_delta
self.flag_for_LFB = flag_for_LFB
if self.num_coef is None:
self.num_coef = filter_num
# Add a buf to store window coefficients
#
self.window_buf = None
return
def forward(self, x):
"""
input:
------
x: tensor(batch, length), where length is waveform length
output:
-------
lfcc_output: tensor(batch, frame_num, dim_num)
"""
# pre-emphsis
if self.with_emphasis:
# to avoid side effect
x_copy = torch.zeros_like(x) + x
x_copy[:, 1:] = x[:, 1:] - 0.97 * x[:, 0:-1]
else:
x_copy = x
if self.window_buf is None:
self.window_buf = torch.hamming_window(self.fl).to(x.device)
# STFT
#x_stft = torch.stft(x_copy, self.fn, self.fs, self.fl,
# window=torch.hamming_window(self.fl).to(x.device),
# onesided=True, pad_mode="constant")
x_stft = stft_wrapper(x_copy, self.fn, self.fs, self.fl,self.window_buf)
# amplitude
sp_amp = torch.norm(x_stft, 2, -1).pow(2).permute(0, 2, 1).contiguous()
if self.min_freq_bin > 0 or self.max_freq_bin < (self.fn//2+1):
sp_amp = sp_amp[:, :, self.min_freq_bin:self.max_freq_bin]
# filter bank
fb_feature = torch.log10(torch.matmul(sp_amp, self.lfcc_fb) +
torch.finfo(torch.float32).eps)
# DCT (if necessary, remove DCT)
lfcc = self.l_dct(fb_feature) if not self.flag_for_LFB else fb_feature
# Truncate the output of l_dct when necessary
if not self.flag_for_LFB and self.num_coef != self.filter_num:
lfcc = lfcc[:, :, :self.num_coef]
# Add energy
if self.with_energy:
power_spec = sp_amp / self.fn
energy = torch.log10(power_spec.sum(axis=2)+
torch.finfo(torch.float32).eps)
lfcc[:, :, 0] = energy
# Add delta coefficients
if self.with_delta:
lfcc_delta = delta(lfcc)
lfcc_delta_delta = delta(lfcc_delta)
lfcc_output = torch.cat((lfcc, lfcc_delta, lfcc_delta_delta), 2)
else:
lfcc_output = lfcc
# done
return lfcc_output
#################
## LFB front-end
#################
class LFB(LFCC):
""" Linear filterbank feature
Chen, T., Kumar, A., Nagarsheth, P., Sivaraman, G. & Khoury, E.
Generalization of Audio Deepfake Detection. in Proc. Odyssey 132-137
(2020). doi:10.21437/Odyssey.2020-19
"""
def __init__(self, fl, fs, fn, sr, filter_num,
with_energy=False, with_emphasis=True,
with_delta=False):
""" Initialize LFB
Para:
-----
fl: int, frame length, (number of waveform points)
fs: int, frame shift, (number of waveform points)
fn: int, FFT points
sr: int, sampling rate (Hz)
filter_num: int, number of filters in filter-bank
with_energy: bool, (default False), whether replace 1st dim to energy
with_emphasis: bool, (default True), whether pre-emphaze input wav
with_delta: bool, (default True), whether use delta and delta-delta
"""
super(LFB, self).__init__(fl, fs, fn, sr, filter_num, with_energy,
with_emphasis, with_delta, flag_for_LFB=True)
return
def forward(self, x):
"""
input:
------
x: tensor(batch, length), where length is waveform length
output:
-------
lfb_output: tensor(batch, frame_num, dim_num)
"""
return super(LFB, self).forward(x)
#################
## Spectrogram (FFT) front-end
#################
class Spectrogram(torch_nn.Module):
""" Spectrogram front-end
"""
def __init__(self, fl, fs, fn, sr,
with_emphasis=True, with_delta=False, in_db=False):
""" Initialize LFCC
Para:
-----
fl: int, frame length, (number of waveform points)
fs: int, frame shift, (number of waveform points)
fn: int, FFT points
sr: int, sampling rate (Hz)
with_emphasis: bool, (default True), whether pre-emphaze input wav
with_delta: bool, (default False), whether use delta and delta-delta
in_db: bool, (default False), use 20log10(amp)? if False, use amp
"""
super(Spectrogram, self).__init__()
self.fl = fl
self.fs = fs
self.fn = fn
self.sr = sr
# opts
self.with_emphasis = with_emphasis
self.with_delta = with_delta
self.in_db = in_db
# buf to store window coefficients
self.window_buf = None
return
def forward(self, x):
"""
input:
------
x: tensor(batch, length), where length is waveform length
output:
-------
lfcc_output: tensor(batch, frame_num, dim_num)
"""
# pre-emphsis
if self.with_emphasis:
x[:, 1:] = x[:, 1:] - 0.97 * x[:, 0:-1]
if self.window_buf is None:
self.window_buf = torch.hamming_window(self.fl).to(x.device)
# STFT
#x_stft = torch.stft(x, self.fn, self.fs, self.fl,
# window=torch.hamming_window(self.fl).to(x.device),
# onesided=True, pad_mode="constant")
x_stft = stft_wrapper(x, self.fn, self.fs, self.fl, self.window_buf)
# amplitude
sp_amp = torch.norm(x_stft, 2, -1).pow(2).permute(0, 2, 1).contiguous()
if self.in_db:
sp_amp = torch.log10(sp_amp + torch.finfo(torch.float32).eps)
# Add delta coefficients
if self.with_delta:
sp_delta = delta(sp_amp)
sp_delta_delta = delta(sp_delta)
sp_output = torch.cat((sp_amp, sp_delta, sp_delta_delta), 2)
else:
sp_output = sp_amp
# done
return sp_amp
#################
## MFCC front-end
#################
from core_scripts.data_io import dsp_tools
class MFCC(torch_nn.Module):
""" Based on asvspoof.org baseline Matlab code.
Difference: with_energy is added to set the first dimension as energy
"""
def __init__(self, fl, fs, fn, sr, filter_num,
with_energy=False, with_emphasis=True,
with_delta=True, flag_for_MelSpec=False,
num_coef=None, min_freq=0, max_freq=1):
""" Initialize LFCC
Para:
-----
fl: int, frame length, (number of waveform points)
fs: int, frame shift, (number of waveform points)
fn: int, FFT points
sr: int, sampling rate (Hz)
filter_num: int, number of filters in filter-bank
with_energy: bool, (default False), whether replace 1st dim to energy
with_emphasis: bool, (default True), whether pre-emphaze input wav
with_delta: bool, (default True), whether use delta and delta-delta
flag_for_MelSpec: bool (default False), reserved for LFB feature
num_coef: int or None, number of coeffs to be taken from filter bank.
Note that this is only used for LFCC, i.e., for_LFB=False
When None, num_coef will be equal to filter_num
min_freq: float (default 0), min_freq * sr // 2 will be the minimum
frequency of extracted FFT spectrum
max_freq: float (default 1), max_freq * sr // 2 will be the maximum
frequency of extracted FFT spectrum
"""
super(MFCC, self).__init__()
self.fl = fl
self.fs = fs
self.fn = fn
self.sr = sr
self.filter_num = filter_num
self.num_coef = num_coef
# decide the range of frequency bins
if min_freq >= 0 and min_freq < max_freq and max_freq <= 1:
pass
else:
print("MFCC cannot work with min_freq {:f} and max_freq {:}".format(
min_freq, max_freq))
sys.exit(1)
# opts
self.with_energy = with_energy
self.with_emphasis = with_emphasis
self.with_delta = with_delta
self.flag_for_MelSpec = flag_for_MelSpec
if self.num_coef is None:
self.num_coef = filter_num
# get filter bank
tmp_config = dsp_tools.Melspec(sr, fl, fs, fn, filter_num,
sr/2*min_freq, sr/2*max_freq)
filter_bank = torch.tensor(tmp_config.melfb.T, dtype=nii_conf.d_dtype)
self.mel_fb = torch_nn.Parameter(filter_bank, requires_grad=False)
# DCT as a linear transformation layer
if not self.flag_for_MelSpec:
self.l_dct = nii_dsp.LinearDCT(filter_num, 'dct', norm='ortho')
else:
self.l_dct = None
# Add a buf to store window coefficients
#
self.window_buf = None
return
def forward(self, x):
"""
input:
------
x: tensor(batch, length), where length is waveform length
output:
-------
lfcc_output: tensor(batch, frame_num, dim_num)
"""
# pre-emphsis
if self.with_emphasis:
# to avoid side effect
x_copy = torch.zeros_like(x) + x
x_copy[:, 1:] = x[:, 1:] - 0.97 * x[:, 0:-1]
else:
x_copy = x
if self.window_buf is None:
self.window_buf = torch.hamming_window(self.fl).to(x.device)
# STFT
x_stft = stft_wrapper(x_copy,
self.fn, self.fs, self.fl, self.window_buf)
# amplitude
sp_amp = torch.norm(x_stft, 2, -1).pow(2).permute(0, 2, 1).contiguous()
# filter bank
fb_feature = torch.log10(torch.matmul(sp_amp, self.mel_fb) +
torch.finfo(torch.float32).eps)
# DCT (if necessary, remove DCT)
if not self.flag_for_MelSpec:
output = self.l_dct(fb_feature)
else:
output = fb_feature
# Truncate the output of l_dct when necessary
if not self.flag_for_MelSpec and self.num_coef != self.filter_num:
output = output[:, :, :self.num_coef]
# Add energy
if self.with_energy:
power_spec = sp_amp / self.fn
energy = torch.log10(power_spec.sum(axis=2)+
torch.finfo(torch.float32).eps)
output[:, :, 0] = energy
# Add delta coefficients
if self.with_delta:
output_delta = delta(output)
output_delta_delta = delta(output_delta)
output = torch.cat((output, output_delta, output_delta_delta), 2)
else:
pass
# done
return output
#######################
# spectrum substraction
#######################
def spectral_substraction(input_wav, noise_wav, ratio = 0.1,
fft_n = 512, frame_shift = 256, frame_length = 512):
"""
output = spectrum_substraction(input_wav, noise_wav)
input
-----
input_wav: tensor, (batch, length1, 1)
noise_wav: tensor, (batch, length2, 1)
ratio: float, default 0.1, ratio to be multiplied with noise spectrum
fft_n: int, default 1024, fft length
frame_shift: int, default 256, frame shift
frame_length: int, default 512, frame_shift
output
------
output: tensor, de-noised waveform
Note: this function do spectral_substraction.
noise_wav does not need to have the same length as input_wav.
spectra amplitude of noise_wav will be averaged and subtracted from
input_wav stft spectra amplitude
"""
window = torch.hamming_window(frame_length).to(input_wav.device)
# stft
input_spec = stft_wrapper(
input_wav.squeeze(-1), fft_n, frame_shift, frame_length, window,
return_complex=True)
noise_spec = stft_wrapper(
noise_wav.squeeze(-1), fft_n, frame_shift, frame_length, window,
return_complex=True)
# input wave spectral amplitude and phase (batch, fft_n//2+1, length, )
input_spec_abs = torch.abs(input_spec)
input_spec_pha = torch.angle(input_spec)
# noise spectral, averaged
noise_spec_abs = torch.abs(noise_spec).mean(axis=-1).unsqueeze(-1)
# spectral subtraction
denoi_spec_abs = input_spec_abs - noise_spec_abs * ratio
denoi_spec_abs = torch.clamp(denoi_spec_abs, min=0.00000001)
# recover waveform
input_spec = torch.complex(
denoi_spec_abs * torch.cos(input_spec_pha),
denoi_spec_abs * torch.sin(input_spec_pha),
)
output = istft_wrapper(
input_spec, fft_n, frame_shift, frame_length, window)
# adjust waveform length
length = min([input_wav.shape[1], output.shape[1]])
output_new = torch.zeros_like(input_wav)
output_new[:, 0:length, 0] = output[:, 0:length]
return output_new
if __name__ == "__main__":
print("Definition of front-end for Anti-spoofing")
| 21,157 | 32.163009 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/block_waveglow.py | #!/usr/bin/env python
"""
Building blocks for waveglow
"""
from __future__ import absolute_import
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import torch.nn.init as torch_init
import sandbox.block_nn as nii_nn
import sandbox.block_wavenet as nii_wavenet
import sandbox.block_glow as nii_glow
import core_scripts.data_io.conf as nii_io_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
class Invertible1x1ConvWaveGlow(torch.nn.Module):
def __init__(self, feat_dim, flag_detjac=False):
super(Invertible1x1ConvWaveGlow, self).__init__()
torch.manual_seed(100)
with torch.no_grad():
W = torch.qr(torch.FloatTensor(feat_dim, feat_dim).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:,0] = -1*W[:,0]
# not necessary
W = W.transpose(0, 1)
self.weight = torch_nn.Parameter(W)
self.weight_inv = torch_nn.Parameter(W.clone())
self.weight_inv_flag = False
self.flag_detjac = flag_detjac
return
def forward(self, y, factor):
batch_size, length, feat_dim = y.size()
# Forward computation
log_det_W = length / factor * torch.logdet(self.weight)
z = torch.matmul(y, self.weight)
if self.flag_detjac:
return z, log_det_W
else:
return z
def reverse(self, x):
if not self.weight_inv_flag:
self.weight_inv.data = torch.inverse(self.weight.data)
self.weight_inv_flag = True
return torch.matmul(x, self.weight_inv)
class upsampleByTransConv(torch_nn.Module):
"""upsampleByTransConv
Upsampling layer using transposed convolution
"""
def __init__(self, feat_dim, upsample_rate, window_ratio=5):
"""upsampleByTransConv(feat_dim, upsample_rate, window_ratio=5)
Args
----
feat_dim: int, input feature should be (batch, length, feat_dim)
upsample_rate, int, output feature will be
(batch, length*upsample_rate, feat_dim)
window_ratio: int, default 5, window length of transconv will be
upsample_rate * window_ratio
"""
super(upsampleByTransConv, self).__init__()
window_l = upsample_rate * window_ratio
self.m_layer = torch_nn.ConvTranspose1d(
feat_dim, feat_dim, window_l, stride=upsample_rate)
self.m_uprate = upsample_rate
return
def forward(self, x):
""" y = upsampleByTransConv(x)
input
-----
x: tensor, (batch, length, feat_dim)
output
------
y: tensor, (batch, length*upsample_rate, feat_dim)
"""
l = x.shape[1] * self.m_uprate
y = self.m_layer(x.permute(0, 2, 1))[:, :, 0:l]
return y.permute(0, 2, 1).contiguous()
class SqueezeForWaveGlow(torch_nn.Module):
"""SqueezeForWaveGlow
Squeeze layer for WaveGlow
"""
def __init__(self, mode = 1):
"""SqueezeForGlow(mode=1)
Args
----
mode: int, mode of this squeeze layer
mode == 1: original squeeze method by squeezing 8 points
"""
super(SqueezeForWaveGlow, self).__init__()
self.m_mode = mode
# mode 1, squeeze by 8
self.m_mode_1_para = 8
return
def get_expected_squeeze_length(self, orig_length):
# return expected length after squeezing
if self.m_mode == 1:
return orig_length//self.m_mode_1_para
def get_squeeze_factor(self):
# return the configuration for squeezing
if self.m_mode == 1:
return self.m_mode_1_para
def forward(self, x):
"""SqueezeForWaveGlow(x)
input
-----
x: tensor, (batch, length, feat_dim)
output
------
y: tensor, (batch, length // squeeze, feat_dim * squeeze)
"""
if self.m_mode == 1:
# squeeze, the 8 points should be the last dimension
squeeze_len = x.shape[1] // self.m_mode_1_para
# trim length first
trim_len = squeeze_len * self.m_mode_1_para
x_tmp = x[:, 0:trim_len, :]
# (batch, time//squeeze_size, squeeze_size, dim)
x_tmp = x_tmp.view(x_tmp.shape[0], squeeze_len,
self.m_mode_1_para, -1)
# (batch, time//squeeze_size, dim, squeeze_size)
x_tmp = x_tmp.permute(0, 1, 3, 2).contiguous()
# (batch, time//squeeze_size, dim * squeeze_size)
return x_tmp.view(x_tmp.shape[0], squeeze_len, -1)
else:
print("SqueezeForWaveGlow not implemented")
return x_squeezed
def reverse(self, x_squeezed):
if self.m_mode == 1:
# (batch, time//squeeze_size, dim * squeeze_size)
batch, squeeze_len, squeeze_dim = x_squeezed.shape
# (batch, time//squeeze_size, dim, squeeze_size)
x_tmp = x_squeezed.view(
batch, squeeze_len, squeeze_dim // self.m_mode_1_para,
self.m_mode_1_para)
# (batch, time//squeeze_size, squeeze_size, dim)
x_tmp = x_tmp.permute(0, 1, 3, 2).contiguous()
# (batch, time, dim)
x = x_tmp.view(batch, squeeze_len * self.m_mode_1_para, -1)
else:
print("SqueezeForWaveGlow not implemented")
return x
class AffineCouplingWaveGlow_legacy(torch_nn.Module):
"""AffineCouplingWaveGlow_legacy
AffineCoupling block in WaveGlow
Example:
m_tmp = AffineCouplingWaveGlow_legacy(10, 10, 8, 512, 3, True, True)
data1 = torch.randn([2, 100, 10])
cond = torch.randn([2, 100, 10])
output, log_det = m_tmp(data1, cond)
data1_re = m_tmp.reverse(output, cond)
torch.std(data1 - data1_re)
"""
def __init__(self, in_dim, cond_dim,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine=True, flag_detjac=False):
"""AffineCouplingWaveGlow_legacy(in_dim, cond_dim,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine=True, flag_detjac=False)
Args:
-----
in_dim: int, dim of input audio data (batch, length, in_dim)
cond_dim, int, dim of condition feature (batch, length, cond_dim)
wn_num_conv1d: int, number of dilated conv WaveNet blocks
wn_dim_channel: int, dime of the WaveNet residual & skip channels
wn_kernel_size: int, kernel size of the dilated convolution layers
flag_affine: bool, whether use affine or additive transformation?
default True
flag_detjac: bool, whether return the determinant of Jacobian,
default False
y -> split() -> y1, y2 -> concate([y1, (y2+bias) * scale])
When flag_affine == True, y1 -> H() -> scale, bias
When flag_affine == False, y1 -> H() -> bias, scale=1
Here, H() is WaveNet blocks (dilated conv + gated activation)
"""
super(AffineCouplingWaveGlow_legacy, self).__init__()
self.flag_affine = flag_affine
self.flag_detjac = flag_detjac
if in_dim % 2 > 0:
print("AffineCoulingGlow(feat_dim), feat_dim is an odd number?!")
sys.exit(1)
if self.flag_affine:
# scale and bias
self.m_nn_outdim = in_dim // 2 * 2
else:
# only bias
self.m_nn_outdim = in_dim // 2
# pre-transform, change input audio dimension
# only half of the features will be used to produce scale and bias
tmp_l = torch_nn.Linear(in_dim // 2, wn_dim_channel)
# weight normalization
self.m_wn_pre = torch_nn.utils.weight_norm(tmp_l, name='weight')
# WaveNet blocks (dilated conv, gated activation functions)
tmp_wn = []
for i in range(wn_num_conv1d):
dilation = 2 ** i
tmp_wn.append(nii_wavenet.WaveNetBlock_v2(
wn_dim_channel, wn_dim_channel, wn_dim_channel, cond_dim,
dilation, cnn_kernel_size=wn_kernel_size, causal=False))
self.m_wn = torch_nn.ModuleList(tmp_wn)
# post-transform, change dim from WN channel to audio feature
tmp_l = torch_nn.Linear(wn_dim_channel, self.m_nn_outdim)
# For better initialization, bias=0, scale=1 for first mini-batch
tmp_l.weight.data.zero_()
tmp_l.bias.data.zero_()
self.m_wn_post = tmp_l
return
def _detjac(self, log_scale, factor=1):
# (batch, dim1, dim2, ..., feat_dim) -> (batch)
# sum over dim1, ... feat_dim
return nii_glow.sum_over_keep_batch(log_scale / factor)
def _nn_trans(self, y1, cond):
"""_nn_trans(self, y1, cond)
input
-----
y1: tensor, input feature, (batch, lengh, input_dim//2)
cond: tensor, condition feature, (batch, length, cond_dim)
output
------
scale: tensor, (batch, lengh, input_dim // 2)
bias: tensor, (batch, lengh, input_dim // 2)
log_scale: tensor, (batch, lengh, input_dim // 2)
Affine transformaiton can be done by scale * feature + bias
log_scale is used for det Jacobian computation
"""
# pre-transformation (batch, length, in_dim//2)
# -> (batch, length, WN_channel)
y1_trans = self.m_wn_pre(y1)
# WaveNet blocks
wn_output = 0
res_ch = y1_trans
for wn_layer in self.m_wn:
res_ch, ski_ch = wn_layer(res_ch, cond)
wn_output = wn_output + ski_ch / len(self.m_wn)
#wn_output = wn_output + res_ch / len(self.m_wn)
# post-transformation
y1_tmp = self.m_wn_post(wn_output)
if self.flag_affine:
log_scale, bias = y1_tmp.chunk(2, -1)
scale = torch.exp(log_scale)
else:
bias = y1_tmp
scale = torch.ones_like(y1)
log_scale = torch.zeros_like(y1)
return scale, bias, log_scale
def forward(self, y, cond, factor=1):
"""AffineCouplingWaveGlow_legacy.forward(y, cond)
input
-----
y: tensor, input feature, (batch, lengh, input_dim)
cond: tensor, condition feature , (batch, lengh, cond_dim)
output
------
x: tensor, input feature, (batch, lengh, input_dim)
detjac: tensor, det of jacobian, (batch,)
y1, y2 = split(y)
scale, bias = WN(y1)
x2 = y2 * scale + bias or (y2 + bias) * scale
return [y1, x2]
"""
# split
y1, y2 = y.chunk(2, -1)
scale, bias, log_scale = self._nn_trans(y1, cond)
# transform
x1 = y1
x2 = (y2 + bias) * scale
# concatenate
x = torch.cat([x1, x2], dim=-1)
if self.flag_detjac:
return x, self._detjac(log_scale, factor)
else:
return x
def reverse(self, x, cond):
"""AffineCouplingWaveGlow_legacy.reverse(y, cond)
input
-----
x: tensor, input feature, (batch, lengh, input_dim)
cond: tensor, condition feature , (batch, lengh, cond_dim)
output
------
y: tensor, input feature, (batch, lengh, input_dim)
x1, x2 = split(x)
scale, bias = WN(x1)
y2 = x2 / scale - bias
return [x1, y2]
"""
# split
x1, x2 = x.chunk(2, -1)
# reverse transform
y1 = x1
scale, bias, log_scale = self._nn_trans(y1, cond)
y2 = x2 / scale - bias
return torch.cat([y1, y2], dim=-1)
class WaveNetModuleForNonAR(torch_nn.Module):
"""WaveNetModuleWaveGlow
Casecade of multiple WaveNet blocks:
x -> ExpandDim -> conv1 -> gated -> res -> conv1 -> gated -> res ...
^ |
| v
cond skip
output = sum(skip_channels)
"""
def __init__(self, input_dim, cond_dim, out_dim, n_blocks,
gate_dim, res_ch, skip_ch, kernel_size=3):
super(WaveNetModuleForNonAR, self).__init__()
self.m_block_num = n_blocks
self.m_res_ch_dim = res_ch
self.m_skip_ch_dim = skip_ch
self.m_gate_dim = gate_dim
self.m_kernel_size = kernel_size
self.m_n_blocks = n_blocks
if self.m_gate_dim % 2 != 0:
self.m_gate_dim = self.m_gate_dim // 2 * 2
# input dimension expanding
tmp = torch_nn.Conv1d(input_dim, res_ch, 1)
self.l_expand = torch_nn.utils.weight_norm(tmp, name='weight')
# end dimension compressing
tmp = torch_nn.Conv1d(skip_ch, out_dim, 1)
tmp.weight.data.zero_()
tmp.bias.data.zero_()
self.l_compress = tmp
# dilated convolution and residual-skip-channel transformation
self.l_conv1 = []
self.l_resskip = []
for idx in range(n_blocks):
dilation = 2 ** idx
padding = int((kernel_size * dilation - dilation)/2)
conv1 = torch_nn.Conv1d(
res_ch, gate_dim, self.m_kernel_size,
dilation = dilation, padding=padding)
conv1 = torch_nn.utils.weight_norm(conv1, name='weight')
self.l_conv1.append(conv1)
if idx < n_blocks - 1:
outdim = self.m_res_ch_dim + self.m_skip_ch_dim
else:
outdim = self.m_skip_ch_dim
resskip = torch_nn.Conv1d(self.m_gate_dim//2, outdim, 1)
resskip = torch_nn.utils.weight_norm(resskip, name='weight')
self.l_resskip.append(resskip)
self.l_conv1 = torch_nn.ModuleList(self.l_conv1)
self.l_resskip = torch_nn.ModuleList(self.l_resskip)
# a single conditional feature transformation layer
cond_layer = torch_nn.Conv1d(cond_dim, gate_dim * n_blocks, 1)
cond_layer = torch_nn.utils.weight_norm(cond_layer, name='weight')
self.l_cond = cond_layer
return
def forward(self, x, cond):
"""
"""
# input feature expansion
# change the format to (batch, dimension, length)
x_expanded = self.l_expand(x.permute(0, 2, 1))
# condition feature transformation
cond_proc = self.l_cond(cond.permute(0, 2, 1))
# skip-channel accumulation
skip_ch_out = 0
conv_input = x_expanded
for idx, (l_conv1, l_resskip) in \
enumerate(zip(self.l_conv1, self.l_resskip)):
tmp_dim = idx * self.m_gate_dim
# condition feature of this layer
cond_tmp = cond_proc[:, tmp_dim : tmp_dim + self.m_gate_dim, :]
# conv transformed
conv_tmp = l_conv1(conv_input)
# gated activation
gated_tmp = cond_tmp + conv_tmp
t_part = torch.tanh(gated_tmp[:, :self.m_gate_dim//2, :])
s_part = torch.sigmoid(gated_tmp[:, self.m_gate_dim//2:, :])
gated_tmp = t_part * s_part
# transformation into skip / residual channels
resskip_tmp = l_resskip(gated_tmp)
# reschannel
if idx == self.m_n_blocks - 1:
skip_ch_out = skip_ch_out + resskip_tmp
else:
conv_input = conv_input + resskip_tmp[:, 0:self.m_res_ch_dim, :]
skip_ch_out = skip_ch_out + resskip_tmp[:, self.m_res_ch_dim:,:]
output = self.l_compress(skip_ch_out)
# permute back to (batch, length, dimension)
return output.permute(0, 2, 1)
class AffineCouplingWaveGlow(torch_nn.Module):
"""AffineCouplingWaveGlow
AffineCoupling block in WaveGlow
Example:
m_tmp = AffineCouplingWaveGlow(10, 10, 8, 512, 3, True, True)
data1 = torch.randn([2, 100, 10])
cond = torch.randn([2, 100, 10])
output, log_det = m_tmp(data1, cond)
data1_re = m_tmp.reverse(output, cond)
torch.std(data1 - data1_re)
"""
def __init__(self, in_dim, cond_dim,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine=True, flag_detjac=False):
"""AffineCouplingWaveGlow(in_dim, cond_dim,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine=True, flag_detjac=False)
Args:
-----
in_dim: int, dim of input audio data (batch, length, in_dim)
cond_dim, int, dim of condition feature (batch, length, cond_dim)
wn_num_conv1d: int, number of dilated conv WaveNet blocks
wn_dim_channel: int, dime of the WaveNet residual & skip channels
wn_kernel_size: int, kernel size of the dilated convolution layers
flag_affine: bool, whether use affine or additive transformation?
default True
flag_detjac: bool, whether return the determinant of Jacobian,
default False
y -> split() -> y1, y2 -> concate([y1, (y2+bias) * scale])
When flag_affine == True, y1 -> H() -> scale, bias
When flag_affine == False, y1 -> H() -> bias, scale=1
Here, H() is WaveNet blocks (dilated conv + gated activation)
"""
super(AffineCouplingWaveGlow, self).__init__()
self.flag_affine = flag_affine
self.flag_detjac = flag_detjac
if in_dim % 2 > 0:
print("AffineCoulingGlow(feat_dim), feat_dim is an odd number?!")
sys.exit(1)
if self.flag_affine:
# scale and bias
self.m_nn_outdim = in_dim // 2 * 2
else:
# only bias
self.m_nn_outdim = in_dim // 2
# WaveNet blocks (dilated conv, gated activation functions)
self.m_wn = WaveNetModuleForNonAR(
in_dim // 2, cond_dim, self.m_nn_outdim, wn_num_conv1d,
wn_dim_channel * 2, wn_dim_channel, wn_dim_channel,
wn_kernel_size
)
return
def _detjac(self, log_scale, factor=1):
# (batch, dim1, dim2, ..., feat_dim) -> (batch)
# sum over dim1, ... feat_dim
return nii_glow.sum_over_keep_batch(log_scale / factor)
def _nn_trans(self, y1, cond):
"""_nn_trans(self, y1, cond)
input
-----
y1: tensor, input feature, (batch, lengh, input_dim//2)
cond: tensor, condition feature, (batch, length, cond_dim)
output
------
scale: tensor, (batch, lengh, input_dim // 2)
bias: tensor, (batch, lengh, input_dim // 2)
log_scale: tensor, (batch, lengh, input_dim // 2)
Affine transformaiton can be done by scale * feature + bias
log_scale is used for det Jacobian computation
"""
y1_tmp = self.m_wn(y1, cond)
if self.flag_affine:
log_scale, bias = y1_tmp.chunk(2, -1)
scale = torch.exp(log_scale)
else:
bias = y1_tmp
scale = torch.ones_like(y1)
log_scale = torch.zeros_like(y1)
return scale, bias, log_scale
def forward(self, y, cond, factor=1):
"""AffineCouplingWaveGlow.forward(y, cond)
input
-----
y: tensor, input feature, (batch, lengh, input_dim)
cond: tensor, condition feature , (batch, lengh, cond_dim)
output
------
x: tensor, input feature, (batch, lengh, input_dim)
detjac: tensor, det of jacobian, (batch,)
y1, y2 = split(y)
scale, bias = WN(y1)
x2 = y2 * scale + bias or (y2 + bias) * scale
return [y1, x2]
"""
# split
y1, y2 = y.chunk(2, -1)
scale, bias, log_scale = self._nn_trans(y1, cond)
# transform
x1 = y1
x2 = (y2 + bias) * scale
# concatenate
x = torch.cat([x1, x2], dim=-1)
if self.flag_detjac:
return x, self._detjac(log_scale, factor)
else:
return x
def reverse(self, x, cond):
"""AffineCouplingWaveGlow.reverse(y, cond)
input
-----
x: tensor, input feature, (batch, lengh, input_dim)
cond: tensor, condition feature , (batch, lengh, cond_dim)
output
------
y: tensor, input feature, (batch, lengh, input_dim)
x1, x2 = split(x)
scale, bias = WN(x1)
y2 = x2 / scale - bias
return [x1, y2]
"""
# split
x1, x2 = x.chunk(2, -1)
# reverse transform
y1 = x1
scale, bias, log_scale = self._nn_trans(y1, cond)
y2 = x2 / scale - bias
return torch.cat([y1, y2], dim=-1)
class FlowStepWaveGlow(torch_nn.Module):
"""FlowStepWaveGlow
One flow step for waveglow
y -> intertical_1x1() -> AffineCoupling -> x
Example
m_tmp = FlowStepWaveGlow(10, 10, 8, 512, 3, flag_affine=True)
output, log_det = m_tmp(data1, cond)
data1_re = m_tmp.reverse(output, cond)
torch.std(data1 - data1_re)
"""
def __init__(self, in_dim, cond_dim,
wn_num_conv1d, wn_dim_channel, wn_kernel_size, flag_affine,
flag_affine_block_legacy=False):
"""FlowStepWaveGlow(in_dim, cond_dim,
wn_num_conv1d, wn_dim_channel, wn_kernel_size, flag_affine,
flag_affine_block_legacy=False)
Args
----
in_dim: int, input feature dim, (batch, length, in_dim)
cond_dim:, int, conditional feature dim, (batch, length, cond_dim)
wn_num_conv1d: int, number of 1Dconv WaveNet block in this flow step
wn_dim_channel: int, dim of the WaveNet residual and skip channels
wn_kernel_size: int, kernel size of the dilated convolution layers
flag_affine: bool, whether use affine or additive transformation?
default True
flag_affine_block_legacy, bool, whether use AffineCouplingWaveGlow or
AffineCouplingWaveGlow_legacy.
For wn_dim_channel and wn_kernel_size, see AffineCouplingWaveGlow
For flag_affine == False, scale will be 1.0
"""
super(FlowStepWaveGlow, self).__init__()
# Invertible transformation layer
#self.m_invtrans = nii_glow.InvertibleTrans(in_dim, flag_detjac=True)
self.m_invtrans = Invertible1x1ConvWaveGlow(in_dim, flag_detjac=True)
# Coupling layer
if flag_affine_block_legacy:
self.m_coupling = AffineCouplingWaveGlow_legacy(
in_dim, cond_dim, wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine, flag_detjac=True)
else:
self.m_coupling = AffineCouplingWaveGlow(
in_dim, cond_dim, wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine, flag_detjac=True)
return
def forward(self, y, cond, factor=1):
"""FlowStepWaveGlow.forward(y, cond, factor=1)
input
-----
y: tensor, input feature, (batch, lengh, input_dim)
cond: tensor, condition feature , (batch, lengh, cond_dim)
factor: int, this is used to divde likelihood, default 1
if we directly sum all detjac, they will become very large
however, we cannot average them directly on y because y
may have a different shape from the actual data y
output
------
x: tensor, input feature, (batch, lengh, input_dim)
detjac: tensor, det of jacobian, (batch,)
"""
# 1x1 transform
x_tmp, log_det_1 = self.m_invtrans(y, factor)
# coupling
x_tmp, log_det_2 = self.m_coupling(x_tmp, cond, factor)
return x_tmp, log_det_1 + log_det_2
def reverse(self, x, cond):
"""FlowStepWaveGlow.reverse(y, cond)
input
-----
x: tensor, input feature, (batch, lengh, input_dim)
cond: tensor, condition feature , (batch, lengh, cond_dim)
output
------
y: tensor, input feature, (batch, lengh, input_dim)
"""
y_tmp = self.m_coupling.reverse(x, cond)
y_tmp = self.m_invtrans.reverse(y_tmp)
return y_tmp
class WaveGlowBlock(torch_nn.Module):
"""WaveGlowBlock
A WaveGlowBlock includes multiple steps of flow.
The Nvidia WaveGlow does not define WaveGlowBlock but directly
defines 12 flow steps. However, after every 4 flow steps, two
dimension of z will be extracted (multi-scale approach).
It is not convenient to decide when to extract z.
Here, we define a WaveGlowBlock as the casecade of multiple flow
steps, and this WaveGlowBlock can extract the two dimensions from
the output of final flow step.
Example:
data1 = torch.randn([2, 10, 10])
cond = torch.randn([2, 10, 16])
m_block = WaveGlowBlock(10, 16, 5, 8, 512, 3)
x, z, log_det = m_block(data1, cond)
data_re = m_block.reverse(x, z, cond)
print(torch.std(data_re - data1))
"""
def __init__(self, in_dim, cond_dim, n_flow_steps,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine=True,
flag_split = False,
flag_final_block=False,
split_dim = 2,
flag_affine_block_legacy=False):
"""WaveGlowBlock(in_dim, cond_dim, n_flow_steps,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine=True, flag_split = False, split_dim = 2,
flag_affine_block_legacy=False)
Args
----
in_dim: int, input feature dim, (batch, length, in_dim)
cond_dim:, int, conditional feature dim, (batch, length, cond_dim)
n_flow_steps: int, number of flow steps in one block
wn_num_conv1d: int, number of dilated conv WaveNet blocks
wn_dim_channel: int, dim of the WaveNet residual and skip channels
wn_kernel_size: int, kernel size of the dilated convolution layers
flag_affine: bool, whether use affine or additive transformation?
default True
flag_split: bool, whether split output z for multi-scale structure
default True
flag_final_block: bool, whether this block is the final block
default False
split_dim: int, if flag_split==True, z[:, :, :split_dim] will be
extracted, z[:, :, split_dim:] can be used for the next
WaveGlowBlock
flag_affine_block_legacy, bool, whether use the legacy implementation
of wavenet-based affine transformaiton layer
default False.
For wn_dim_channel and wn_kernel_size, see AffineCouplingWaveGlow
For flag_affine, see AffineCouplingWaveGlow
"""
super(WaveGlowBlock, self).__init__()
tmp_flows = []
for i in range(n_flow_steps):
tmp_flows.append(
FlowStepWaveGlow(
in_dim, cond_dim,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine, flag_affine_block_legacy))
self.m_flows = torch_nn.ModuleList(tmp_flows)
self.flag_split = flag_split
self.flag_final_block = flag_final_block
self.split_dim = split_dim
if self.flag_split and self.flag_final_block:
print("WaveGlowBlock: flag_split and flag_final_block are True")
print("This is unexpected. Please check model definition")
sys.exit(1)
if self.flag_split and self.split_dim <= 0:
print("WaveGlowBlock: split_dim should be > 0")
sys.exit(1)
return
def forward(self, y, cond, factor=1):
"""x, z, log_detjac = WaveGlowBlock(y)
y -> H() -> [z, x], log_det_jacobian
H() consists of multiple flow steps (1x1conv + AffineCoupling)
input
-----
y: tensor, (batch, length, dim)
cond, tensor, (batch, length, cond_dim)
factor, None or int, this is used to divde likelihood, default 1
output
------
log_detjac: tensor or scalar
if self.flag_split:
x: tensor, (batch, length, in_dim - split_dim),
z: tensor, (batch, length, split_dim),
else:
if self.flag_final_block:
x: None, no input to the next block
z: tensor, (batch, length, dim), for N(z; 0, I)
else:
x: tensor, (batch, length, dim),
z: None, no latent for N(z; 0, I) from this block
concate([x,z]) should have the same size as y
"""
# flows
log_detjac = 0
x_tmp = y
for l_flow in self.m_flows:
x_tmp, log_detjac_tmp = l_flow(x_tmp, cond, factor)
log_detjac = log_detjac + log_detjac_tmp
if self.flag_split:
z = x_tmp[:, :, :self.split_dim]
x = x_tmp[:, :, self.split_dim:]
else:
if self.flag_final_block:
z = x_tmp
x = None
else:
z = None
x = x_tmp
return x, z, log_detjac
def reverse(self, x, z, cond):
"""y = WaveGlowBlock.reverse(x, z, cond)
[z, x] -> H^{-1}() -> y
input
-----
if self.flag_split:
x: tensor, (batch, length, in_dim - split_dim),
z: tensor, (batch, length, split_dim),
else:
if self.flag_final_block:
x: None
z: tensor, (batch, length, in_dim)
else:
x: tensor, (batch, length, in_dim)
z: None
output
------
y: tensor, (batch, length, in_dim)
"""
if self.flag_split:
if x is None or z is None:
print("WaveGlowBlock.reverse: x and z should not be None")
sys.exit(1)
y_tmp = torch.cat([z, x], dim=-1)
else:
if self.flag_final_block:
if z is None:
print("WaveGlowBlock.reverse: z should not be None")
sys.exit(1)
y_tmp = z
else:
if x is None:
print("WaveGlowBlock.reverse: x should not be None")
sys.exit(1)
y_tmp = x
for l_flow in self.m_flows[::-1]:
# affine
y_tmp = l_flow.reverse(y_tmp, cond)
return y_tmp
class WaveGlow(torch_nn.Module):
"""WaveGlow
Example
cond_dim = 4
upsample = 80
num_blocks = 4
num_flows_inblock = 5
wn_num_conv1d = 8
wn_dim_channel = 512
wn_kernel_size = 3
# waveforms of length 1600
wave1 = torch.randn([2, 1600, 1])
# condition feature
cond = torch.randn([2, 1600//upsample, cond_dim])
# model
m_model = nii_waveglow.WaveGlow(
cond_dim, upsample,
num_blocks, num_flows_inblock, wn_num_conv1d,
wn_dim_channel, wn_kernel_size)
# forward computation, neg_log = -(logp + log_detjac)
# neg_log.backward() can be used for backward
z, neg_log, logp, log_detjac = m_model(wave1, cond)
# recover the signal
wave2 = m_model.reverse(z, cond)
# check difference between original wave and recovered wave
print(torch.std(wave1 - wave2))
"""
def __init__(self, cond_dim, upsample_rate,
num_blocks, num_flows_inblock,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine = True,
early_hid_dim=2,
flag_affine_block_legacy=False):
"""WaveGlow(cond_dim, upsample_rate,
num_blocks, num_flows_inblock,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine = True,
early_hid_dim=2,
flag_affine_block_legacy=False)
Args
----
cond_dim:, int, conditional feature dim, (batch, length, cond_dim)
upsample_rate: int, up-sampling rate for condition features
num_blocks: int, number of WaveGlowBlocks
num_flows_inblock: int, number of flow steps in one WaveGlowBlock
wn_num_conv1d: int, number of 1Dconv WaveNet block in this flow step
wn_dim_channel: int, dim of the WaveNet residual and skip channels
wn_kernel_size: int, kernel size of the dilated convolution layers
flag_affine: bool, whether use affine or additive transformation?
default True
early_hid_dim: int, dimension for z_1, z_2 ... , default 2
flag_affine_block_legacy, bool, whether use the legacy implementation
of wavenet-based affine transformaiton layer
default False. The difference is on the WaveNet part
Please configure AffineCouplingWaveGlow and
AffineCouplingWaveGlow_legacy
This model defines:
cond -> upsample/squeeze -> | ------> | --------> |
v v v
y -> squeeze -> WaveGlowBlock -> WGBlock ... WGBlock -> z
|-> z_1 |-> z_2
z_1, z_2, ... are the extracted z from a multi-scale flow structure
concate([z_1, z_2, z]) is expected to be the white Gaussian noise
If early_hid_dim == 0, z_1 and z_2 will not be extracted
"""
super(WaveGlow, self).__init__()
# input is assumed to be waveform
self.m_input_dim = 1
self.m_early_hid_dim = early_hid_dim
# squeeze layer
self.m_squeeze = SqueezeForWaveGlow()
# up-sampling layer
#self.m_upsample = nii_nn.UpSampleLayer(cond_dim, upsample_rate, True)
self.m_upsample = upsampleByTransConv(cond_dim, upsample_rate)
# wavenet-based flow blocks
# squeezed input dimension
squeezed_in_dim = self.m_input_dim * self.m_squeeze.get_squeeze_factor()
# squeezed condition feature dimension
squeezed_cond_dim = cond_dim * self.m_squeeze.get_squeeze_factor()
# save the dimension for get_z_noises
self.m_feat_dim = []
# define blocks
tmp_squeezed_in_dim = squeezed_in_dim
tmp_flow_blocks = []
for i in range(num_blocks):
# if this is not the last block and early_hid_dim >0
flag_split = (i < (num_blocks-1)) and early_hid_dim > 0
flag_final_block = i == (num_blocks-1)
# save the dimension for get_z_noises
if flag_final_block:
self.m_feat_dim.append(tmp_squeezed_in_dim)
else:
self.m_feat_dim.append(early_hid_dim if flag_split else 0)
tmp_flow_blocks.append(
WaveGlowBlock(
tmp_squeezed_in_dim, squeezed_cond_dim, num_flows_inblock,
wn_num_conv1d, wn_dim_channel, wn_kernel_size, flag_affine,
flag_split = flag_split, flag_final_block=flag_final_block,
split_dim = early_hid_dim,
flag_affine_block_legacy = flag_affine_block_legacy))
# multi-scale approach will extract a few dimensions for next flow
# thus, input dimension to the next block will be this
tmp_squeezed_in_dim = tmp_squeezed_in_dim - early_hid_dim
self.m_flowblocks = torch_nn.ModuleList(tmp_flow_blocks)
# done
return
def _normal_lh(self, noise):
# likelihood of normal distribution on the given noise
return -0.5 * np.log(2 * np.pi) - 0.5 * noise ** 2
def forward(self, y, cond):
"""z, neg_logp_y, logp_z, logdet = WaveGlow.forward(y, cond)
cond -> upsample/squeeze -> | ------> | --------> |
v v v
y -> squeeze -> WaveGlowBlock -> WGBlock ... WGBlock -> z
|-> z_1 |-> z_2
input
-----
y: tensor, (batch, waveform_length, 1)
cond: tensor, (batch, cond_length, 1)
output
------
z: list of tensors, [z_1, z_2, ... ,z ] in figure above
neg_logp_y: scalar, - log p(y)
logp_z: scalar, -log N(z), summed over one data sequence, but averaged
over batch.
logdet: scalar, -|det dH(.)/dy|, summed over one data sequence,
but averaged
over batch.
If self.early_hid_dim == 0, z_1, z_2 ... will be None
"""
# Rather than summing the likelihood and divide it by the number of
# data in the final step, we divide this factor from the likelihood
# caculating by each flow step and sum the scaled likelihood.
# Two methods are equivalent, but the latter may prevent numerical
# overflow of the likelihood value for long sentences
factor = np.prod([dim for dim in y.shape])
# waveform squeeze (batch, squeezed_length, squeezed_dim)
y_squeezed = self.m_squeeze(y)
squeezed_dim = y_squeezed.shape[-1]
# condition feature upsampling and squeeze
# (batch, squeezed_length, squeezed_dim_cond)
cond_up_squeezed = self.m_squeeze(self.m_upsample(cond))
# flows
z_bags = []
log_detjac = 0
log_pz = 0
x_tmp = y_squeezed
for m_block in self.m_flowblocks:
x_tmp, z_tmp, log_detjac_tmp = m_block(
x_tmp, cond_up_squeezed, factor)
# accumulate log det jacobian
log_detjac += log_detjac_tmp
# compute N(z; 0, I)
# save z_tmp (even if it is None)
z_bags.append(z_tmp)
# accumulate log_N(z; 0, I) only if it is valid
if z_tmp is not None:
log_pz += nii_glow.sum_over_keep_batch2(
self._normal_lh(z_tmp), factor)
# average over batch and data points
neg_logp_y = -(log_pz + log_detjac).sum()
return z_bags, neg_logp_y, \
log_pz.sum(), log_detjac.sum()
def reverse(self, z_bags, cond):
"""y = WaveGlow.reverse(z_bags, cond)
cond -> upsample/squeeze -> | ------> | --------> |
v v v
y <- unsqueeze <- WaveGlowBlock -> WGBlock ... WGBlock <- z
|<- z_1 |<- z_2
input
-----
z: list of tensors, [z_1, z_2, ... ,z ] in figure above
cond: tensor, (batch, cond_length, 1)
output
------
y: tensor, (batch, waveform_length, 1)
If self.early_hid_dim == 0, z_1, z_2 ... should be None
"""
# condition feature upsampling and squeeze
# (batch, squeezed_length, squeezed_dim_cond)
cond_up_sqe = self.m_squeeze(self.m_upsample(cond))
# initial
y_tmp = None
for z, m_block in zip(z_bags[::-1], self.m_flowblocks[::-1]):
y_tmp = m_block.reverse(y_tmp, z, cond_up_sqe)
y = self.m_squeeze.reverse(y_tmp)
return y
def get_z_noises(self, length, noise_std=0.7, batchsize=1):
"""z_bags = WaveGlow.get_z_noises(length, noise_std=0.7, batchsize=1)
Return a list of random noises for random sampling
input
-----
length: int, length of target waveform (without squeeze)
noise_std: float, std of Gaussian noise, default 0.7
batchsize: int, batch size of this random data, default 1
output
------
z_bags: list of tensors
Shape of tensor in z_bags is decided by WaveGlow configuration.
WaveGlow.reverse(z_bags, cond) can be used to generate waveform
"""
squeeze_length = self.m_squeeze.get_expected_squeeze_length(length)
device = next(self.parameters()).device
z_bags = []
# generate the z for each WaveGlowBlock
for feat_dim in self.m_feat_dim:
if feat_dim is not None and feat_dim > 0:
z_tmp = torch.randn(
[batchsize, squeeze_length, feat_dim],
dtype=nii_io_conf.d_dtype,
device=device)
z_bags.append(z_tmp * noise_std)
else:
z_bags.append(None)
return z_bags
if __name__ == "__main__":
print("Definition of WaveGlow")
| 42,401 | 35.711688 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/dynamic_prog.py | #!/usr/bin/env python
"""
Functions for dynamic programming
"""
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#############################################################
def viterbi_decode(init_prob, trans_prob, obser_prob,
eps=torch.finfo(torch.float32).eps, return_more=False):
""" Routine to do Viterbi decoding
viterbi_decode(init_prob, trans_prob, obser_prob,
eps=torch.finfo(torch.float32).eps, return_more=False):
Input:
init_prob: initialia state probability
tensor or np.arrary, in shape (N), for N states
trans_prob: transition probability
tensor or np.array, in shape (N, N)
trans_prob(i, j): P(state=j | prev_state=i)
obser_prob: observation probability
tensor or np.array, in shape (T, N), for T time sptes
return_more: True: return best_states, prob_mat, state_trace
False: return best_states
Output:
best_states: best state sequence tensor or np.array, in shape (T)
prob_mat: probablity matrix in shape (T, N), where (t, j) denotes
max_{s_1:t-1} P(o_1:t, s_1:t-1, s_t=j)
state_mat: in shape (T, N), where (t, j) denotes
argmax_i P(o_1:t, s_1:t-2, s_t-1=i, s_t=j)
"""
if type(init_prob) is torch.Tensor:
_log_func = torch.log
_torch_flag = True
else:
_log_func = np.log
_torch_flag = False
log_init_prob = _log_func(init_prob + eps)
log_trans_prob = _log_func(trans_prob + eps)
log_obser_prob = _log_func(obser_prob + eps)
n_time, n_state = log_obser_prob.shape
if log_trans_prob.shape[0] != n_state or log_trans_prob.shape[0] != n_state:
print("Viterbi decoding: transition prob matrix invalid")
sys.exit(1)
if log_init_prob.shape[0] != n_state:
print("Viterbi decoding: init prob matrix invalid")
sys.exit(1)
if _torch_flag:
prob_mat = torch.zeros_like(log_obser_prob)
state_mat = torch.zeros_like(log_obser_prob, dtype=torch.int)
best_states = torch.zeros([n_time], dtype=torch.int,
device = init_prob.device)
_argmax = torch.argmax
tmp_idx = torch.arange(0, n_state, dtype=torch.long)
else:
prob_mat = np.zeros(log_obser_prob.shape)
state_mat = np.zeros(log_obser_prob.shape, dtype=np.int)
best_states = np.zeros([n_time], dtype=np.int)
_argmax = np.argmax
tmp_idx = np.arange(0, n_state, dtype=np.int)
prob_mat[0, :] = log_init_prob + log_obser_prob[0, :]
for time_idx in np.arange(1, n_time):
trout_prob = prob_mat[time_idx - 1] + log_trans_prob.T
# this version is faster?
#print(time_idx)
tmp_best = _argmax(trout_prob, axis=1)
state_mat[time_idx] = tmp_best
prob_mat[time_idx] = trout_prob[tmp_idx, tmp_best] \
+ log_obser_prob[time_idx]
# seems to be too slow
#for state_idx in np.arange(n_state):
# tmp_best = _argmax(trout_prob[state_idx])
# state_mat[time_idx, state_idx] = tmp_best
# prob_mat[time_idx, state_idx] = trout_prob[state_idx, tmp_best] \
# +log_obser_prob[time_idx, state_idx]
best_states[-1] = _argmax(prob_mat[-1, :])
for time_idx in np.arange(n_time-2, -1, -1):
best_states[time_idx] = state_mat[time_idx+1, best_states[time_idx+1]]
if return_more:
return best_states, prob_mat, state_mat
else:
return best_states
| 3,957 | 36.695238 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/block_blow.py | #!/usr/bin/env python
"""
Building blocks for Blow
Serra, J., Pascual, S. & Segura, C. Blow: a single-scale hyperconditioned flow
for non-parallel raw-audio voice conversion. in Proc. NIPS (2019).
Reference: https://github.com/joansj/blow
"""
from __future__ import absolute_import
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import torch.nn.init as torch_init
import sandbox.block_glow as nii_glow
import core_scripts.data_io.wav_tools as nii_wav_tk
import core_scripts.data_io.conf as nii_io_conf
import core_scripts.other_tools.debug as nii_debug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
#######################################
# Numpy utilities for data augmentation
#######################################
def flip(x):
"""y=flip(x) flips the sign of x
input: x, np.array
output: y, np.array
"""
return np.sign(np.random.rand(1)-0.5) * x
def ampscale(x):
"""y=ampscale(x) randomly scale the amplitude of x
input: x, np.array
output: y, np.array
"""
return (2*np.random.rand(1)-1) * x / (np.max(np.abs(x)) + 1e-07)
def framejitter(x, framelen):
"""y=framejitter(x, framelen)
input: x, np.array, original waveform (length, 1)
framelen, int, framelen
output: y, np.array, segment of the waveform
"""
framelen = x.shape[0] if framelen > x.shape[0] else framelen
random_start = int(np.ceil(np.random.rand(1) * (x.shape[0] - framelen)))
return x[random_start:random_start+framelen]
def emphasis_rand(x, coef_val):
"""y=deemphasis(x, coef_val)
input: x, np.array, original waveform (length, 1) or (length)
framelen, int, framelen
output: y, np.array, segment of the waveform
"""
coef = (2 * np.random.rand(1) - 1) * coef_val
x_new = np.zeros_like(x) + x
x_new[1:] = x_new[1:] - coef * x[:-1]
return x_new
def wav_aug(x, framelen, coef_val, sr):
"""y = wav_aug(x, framelen, coef_val, sr)
input
-----
x: np.array, original waveform (length, 1)
framelen: int, frame length
coef_val: float, reference coefficient for emphasis-rand
sr: int, sampling rate (e.g., 16000)
output
------
y: np.array, pre-processed waveform (length, 1)
"""
trimmed_x = nii_wav_tk.silence_handler_wrapper(x, sr, flag_output=1)
x_frame = framejitter(trimmed_x, framelen)
return ampscale(emphasis_rand(x_frame, coef_val))
class OverlapAdder(torch_nn.Module):
"""OverlapAdder
"""
def __init__(self, fl, fs, flag_win_analysis=True):
"""OverlapAdder(flag_windowing_before=True)
Args
----
fl: int, frame length
fs: int, frame shift
flag_win_analysis: bool (default True)
True: apply windowing during analysis
False: apply windowing during synthesis
"""
super(OverlapAdder, self).__init__()
self.fl = fl
self.fs = fs
self.flag_win_ana = flag_win_analysis
# assume even
self.m_win = torch_nn.Parameter(torch.hann_window(self.fl))
return
def get_frame_num(self, wav_length):
"""frame_num = get_frame_num(wav_length)
wav_length: int, waveform length
frame_num: int, number of frames
"""
return (wav_length - self.fl) // self.fs + 1
def get_wavlength(self, frame_num):
"""wav_length = get_wavlength(self, frame_num)
wav_length: int, waveform length
frame_num: int, number of frames
"""
return (frame_num - 1) * self.fs + self.fl
def forward(self, x):
"""OverlapAdder(x)
input
-----
x: tensor, (batch, length, 1)
output
------
y: tensor, (batch, frame_num, frame_length)
"""
frame_num = self.get_frame_num(x.shape[1])
# (batch, num_patches, 1, patch_size)
# num_patches = (length - length) // shift + 1
# and copy the data
# note that unfold put each patch as the last dimension
# x_tmp (batch, frame_num, 1, frame_length)
x_tmp = x.unfold(1, self.fl, self.fs)
# apply window
if self.flag_win_ana:
x_tmp = x_tmp * self.m_win
# (batch, frame_num, frame_length)
return x_tmp.view(x.shape[0], x_tmp.shape[1], -1)
def reverse(self, x_framed, flag_scale=False):
"""OverlapAdder(x)
input
-----
x: tensor, (batch, frame_num, frame_length)
flag_scale: bool, whether scale the ampltidue to (-1, 1)
default False
output
------
y: tensor, (batch, length, 1)
"""
batch, frame_num, frame_len = x_framed.shape
x_len = self.get_wavlength(frame_num)
x_buf = torch.zeros(
[batch, x_len], device=x_framed.device, dtype=x_framed.dtype)
x_win = torch.zeros_like(x_buf)
for idx in range(frame_num):
sdx = idx * self.fs
edx = sdx + self.fl
x_win[:, sdx:edx] += self.m_win
if not self.flag_win_ana:
x_buf[:, sdx:edx] += x_framed[:, idx] * self.m_win
else:
x_buf[:, sdx:edx] += x_framed[:, idx]
# assume the overlapped window has a constant amplitude
x_buf = x_buf / x_win.mean()
# normalize the amplitude between (-1, 1)
if flag_scale:
# if input is between (-1, 1), there is no need to
# do this normalization
x_buf = x_buf / (x_buf.abs().max())
return x_buf.unsqueeze(-1)
#######################################
# Torch model definition
#######################################
class AffineCouplingBlow_core(torch_nn.Module):
"""AffineCouplingBlow_core
AffineCoupling core layer the produces the scale and bias parameters.
Example:
feat_dim = 10
cond_dim = 20
m_layer = AffineCouplingBlow_core(feat_dim, cond_dim, 64, 2)
data = torch.randn([2, 100, feat_dim])
cond = torch.randn([2, 1, cond_dim])
scale, bias, log_scale = m_layer(data, cond)
"""
def __init__(self, feat_dim, cond_dim, num_ch, kernel_size=3):
"""AffineCouplingBlow_core(feat_dim, cond_dim, num_ch, kernel_size=3)
Args
----
feat_dim: int, dimension of input feature
cond_dim: int, dimension of conditional features
num_ch: int, number of channels for conv layers
kernel_size: int, kernel size of conv layer, default 3
input_feature -------> func.conv1d -----> conv1ds -> scale, bias
^
|
cond_dim ---> Adapter -> conv weight/bias
"""
super(AffineCouplingBlow_core, self).__init__()
self.feat_dim = feat_dim
self.cond_dim = cond_dim
# make sure that kernel is odd
if kernel_size % 2 == 0:
self.kernel_s = kernel_size + 1
print("\tAffineCouplingBlow_core", end=" ")
print("kernel size {:d} -> {:d}".format(kernel_size, self.kernel_s))
else:
self.kernel_s = kernel_size
if num_ch % feat_dim != 0:
# make sure that number of channel is good
self.num_ch = num_ch // feat_dim * feat_dim
print("\tAffineCouplingBlow_core", end=" ")
print("conv channel {:d} -> {:d}".format(num_ch, self.num_ch))
else:
self.num_ch = num_ch
# Adapter
# (batch, 1, cond_dim) -> (batch, 1, kernel_size * num_ch) for weight
# -> (batch, 1, num_ch) for bias
self.m_adapter = torch_nn.Linear(cond_dim,
(self.kernel_s+1) * self.num_ch)
# conv1d with condition-independent parameters
self.m_conv1ds = torch_nn.Sequential(
torch_nn.ReLU(),
torch_nn.Conv1d(self.num_ch, self.num_ch, 1),
torch_nn.ReLU(),
torch_nn.Conv1d(self.num_ch, feat_dim * 2, self.kernel_s,
padding=(self.kernel_s-1)//2)
)
# zero initialization for the last conv layers
# similar to Glow and WaveGlow
self.m_conv1ds[-1].weight.data.zero_()
self.m_conv1ds[-1].bias.data.zero_()
return
def forward(self, x, cond):
"""scale, bias = AffineCouplingBlow_core(x, cond)
input
-----
x: tensor, input tensor (batch, length, feat_dim)
cond: tensor, condition feature (batch, 1, cond_dim)
output
------
scale: tensor, scaling parameters (batch, length, feat_dim)
bias: tensor, bias paramerters (batch, length, feat_dim)
"""
# cond_dim -> Adapter -> conv weight/bias
# cond[:, 0, :] -> (batch, cond_dim)
# adapter(cond[:, 0, :]) -> (batch, kernel_size * num_ch + num_ch)
# view(...) -> (batch * num_ch, kernel_size + 1)
weight_bias = self.m_adapter(cond[:, 0, :]).view(-1, self.kernel_s+1)
# (batch * num_ch, 1, kernel_size)
weight = weight_bias[:, 0:self.kernel_s].unsqueeze(1)
# (batch * num_ch)
bias = weight_bias[:, self.kernel_s]
# convolution given weight_bias
padsize = (self.kernel_s - 1) // 2
groupsize = x.shape[0] * self.feat_dim
length = x.shape[1]
# x.permute(0, 2, 1)...view -> (1, batch*feat_dim, length)
# conv1d -> (1, batch * num_ch, length)
# view -> (batch, num_ch, length)
x_tmp = torch_nn_func.conv1d(
x.permute(0, 2, 1).contiguous().view(1, -1, length),
weight,
bias = bias,
padding = padsize,
groups = groupsize
).view(x.shape[0], -1, length)
# condition invariant conv -> (batch, feat_dim * 2, length)
x_tmp = self.m_conv1ds(x_tmp)
# scale and bias (batch, feat_dim, length)
raw_scale, bias = torch.chunk(x_tmp, 2, dim=1)
# -> (batch, length, feat_dim)
bias = bias.permute(0, 2, 1)
# re-parameterize
# Here we need to add a small number, otherwise, log(scale)
# somtime times become -inf during training
scale = torch.sigmoid(raw_scale + 2).permute(0, 2, 1) * 0.5 + 0.5
log_scale = torch.log(scale)
#print("Debug: {:.3f} {:.3f} {:.3f} {:3f}".format(
# log_scale.max().item(), log_scale.min().item(),
# scale.max().item(), scale.min().item()),
# file=sys.stderr)
return scale, bias, log_scale
class AffineCouplingBlow(torch_nn.Module):
"""AffineCouplingBlow
AffineCoupling block in Blow
Example:
feat_dim = 10
cond_dim = 20
m_layer = AffineCouplingBlow(feat_dim, cond_dim,60,3, flag_detjac=True)
data = torch.randn([2, 100, feat_dim])
cond = torch.randn([2, 1, cond_dim])
out, detjac = m_layer(data, cond)
data_rever = m_layer.reverse(out, cond)
torch.std(data - data_rever)
"""
def __init__(self, in_dim, cond_dim,
conv_dim_channel, conv_kernel_size,
flag_detjac=False):
"""AffineCouplingBlow(in_dim, cond_dim,
wn_num_conv1d, wn_dim_channel, wn_kernel_size,
flag_affine=True, flag_detjac=False)
Args:
-----
in_dim: int, dim of input audio data (batch, length, in_dim)
cond_dim, int, dim of condition feature (batch, length, cond_dim)
conv_dim_channel: int, dime of the convolution channels
conv_kernel_size: int, kernel size of the convolution layers
flag_detjac: bool, whether return the determinant of Jacobian,
default False
y -> split() -> y1, y2 -> concate([y1, (y2+bias) * scale])
When flag_affine == True, y1 -> H() -> scale, bias
When flag_affine == False, y1 -> H() -> bias, scale=1
Here, H() is AffineCouplingBlow_core layer
"""
super(AffineCouplingBlow, self).__init__()
self.flag_detjac = flag_detjac
if in_dim % 2 > 0:
print("AffineCouplingBlow(feat_dim), feat_dim is an odd number?!")
sys.exit(1)
# Convolution block to get scale and bias
self.m_core = AffineCouplingBlow_core(
in_dim // 2, cond_dim, conv_dim_channel, conv_kernel_size)
return
def _detjac(self, log_scale, factor=1):
# (batch, dim1, dim2, ..., feat_dim) -> (batch)
# sum over dim1, ... feat_dim
return nii_glow.sum_over_keep_batch(log_scale / factor)
def _nn_trans(self, y1, cond):
"""_nn_trans(self, y1, cond)
input
-----
y1: tensor, input feature, (batch, lengh, input_dim//2)
cond: tensor, condition feature, (batch, length, cond_dim)
output
------
scale: tensor, (batch, lengh, input_dim // 2)
bias: tensor, (batch, lengh, input_dim // 2)
log_scale: tensor, (batch, lengh, input_dim // 2)
Affine transformaiton can be done by scale * feature + bias
log_scale is used for det Jacobian computation
"""
scale, bias, log_scale = self.m_core(y1, cond)
return scale, bias, log_scale
def forward(self, y, cond, factor=1):
"""AffineCouplingBlow.forward(y, cond)
input
-----
y: tensor, input feature, (batch, lengh, input_dim)
cond: tensor, condition feature , (batch, 1, cond_dim)
output
------
x: tensor, input feature, (batch, lengh, input_dim)
detjac: tensor, det of jacobian, (batch,)
y1, y2 = split(y)
scale, bias = Conv(y1)
x2 = y2 * scale + bias or (y2 + bias) * scale
return [y1, x2]
"""
# split
y1, y2 = y.chunk(2, -1)
scale, bias, log_scale = self._nn_trans(y1, cond)
# transform
x1 = y1
x2 = (y2 + bias) * scale
# concatenate
x = torch.cat([x1, x2], dim=-1)
if self.flag_detjac:
return x, self._detjac(log_scale, factor)
else:
return x
def reverse(self, x, cond):
"""AffineCouplingBlow.reverse(y, cond)
input
-----
x: tensor, input feature, (batch, lengh, input_dim)
cond: tensor, condition feature , (batch, 1, cond_dim)
output
------
y: tensor, input feature, (batch, lengh, input_dim)
x1, x2 = split(x)
scale, bias = conv(x1)
y2 = x2 / scale - bias
return [x1, y2]
"""
# split
x1, x2 = x.chunk(2, -1)
# reverse transform
y1 = x1
scale, bias, log_scale = self._nn_trans(y1, cond)
y2 = x2 / scale - bias
return torch.cat([y1, y2], dim=-1)
class SqueezeForBlow(torch_nn.Module):
"""SqueezeForBlow
Squeeze input feature for Blow.
Example
data = torch.randn([2, 10, 3])
m_sq = SqueezeForBlow()
data_out = m_sq(data)
data_rev = m_sq.reverse(data_out)
torch.std(data_rev - data)
"""
def __init__(self, mode=1):
"""SqueezeForBlow(mode=1)
Args
----
mode: int, mode of squeeze, default 1
Mode 1: squeeze by a factor of 2 as in original paper
"""
super(SqueezeForBlow, self).__init__()
self.m_mode = mode
if self.m_mode == 1:
self.squeeze_factor = 2
else:
print("SqueezeForBlow mode {:d} not implemented".format(mode))
sys.exit(1)
return
def get_expected_squeeze_length(self, orig_length):
# return expected length after squeezing
if self.m_mode == 1:
return orig_length // self.squeeze_factor
else:
print("unknown mode for SqueezeForBlow")
sys.exit(1)
def get_recovered_length(self, squeezed_length):
# return original length before squeezing
if self.m_mode == 1:
return squeezed_length * self.squeeze_factor
else:
print("unknown mode for SqueezeForBlow")
sys.exit(1)
def get_squeeze_factor(self):
# return the configuration for squeezing
if self.m_mode == 1:
return self.squeeze_factor
else:
print("unknown mode for SqueezeForBlow")
sys.exit(1)
def forward(self, x):
"""SqueezeForBlow(x)
input
-----
x: tensor, (batch, length, feat_dim)
output
------
y: tensor, (batch, length//squeeze_factor, feat_dim*squeeze_factor)
"""
if self.m_mode == 1:
# squeeze, the 8 points should be the last dimension
squeeze_len = self.get_expected_squeeze_length(x.shape[1])
# trim length first
trim_len = squeeze_len * self.squeeze_factor
x_tmp = x[:, 0:trim_len, :]
# (batch, time//squeeze_size, squeeze_size, dim)
x_tmp = x_tmp.view(x_tmp.shape[0], squeeze_len,
self.squeeze_factor, -1)
# (batch, time//squeeze_size, dim, squeeze_size)
x_tmp = x_tmp.permute(0, 1, 3, 2).contiguous()
# (batch, time//squeeze_size, dim * squeeze_size)
return x_tmp.view(x_tmp.shape[0], squeeze_len, -1)
else:
print("SqueezeForWaveGlow not implemented")
sys.exit(1)
return x_squeezed
def reverse(self, x_squeezed):
if self.m_mode == 1:
# (batch, time//squeeze_size, dim * squeeze_size)
batch, squeeze_len, squeeze_dim = x_squeezed.shape
# (batch, time//squeeze_size, dim, squeeze_size)
x_tmp = x_squeezed.view(
batch, squeeze_len, squeeze_dim // self.squeeze_factor,
self.squeeze_factor)
# (batch, time//squeeze_size, squeeze_size, dim)
x_tmp = x_tmp.permute(0, 1, 3, 2).contiguous()
# (batch, time, dim)
x = x_tmp.view(batch, squeeze_len * self.squeeze_factor, -1)
else:
print("SqueezeForWaveGlow not implemented")
sys.exit(1)
return x
class FlowStepBlow(torch_nn.Module):
"""FlowStepBlow
One flow step for Blow
y -> intertical_1x1() -> ActNorm -> AffineCoupling -> x
Example
feat_dim = 10
cond_dim = 20
m_layer = FlowStepBlow(feat_dim, cond_dim, 60, 3)
data = torch.randn([2, 100, feat_dim])
cond = torch.randn([2, 1, cond_dim])
out, detjac = m_layer(data, cond)
data_rever = m_layer.reverse(out, cond)
torch.std(data - data_rever)
"""
def __init__(self, in_dim, cond_dim, conv_dim_channel, conv_kernel_size):
"""FlowStepBlow(in_dim, cond_dim,
conv_dim_channel, conv_kernel_size)
Args
----
in_dim: int, input feature dim, (batch, length, in_dim)
cond_dim:, int, conditional feature dim, (batch, length, cond_dim)
cond_dim_channel: int, dim of the convolution layers
conv_kernel_size: int, kernel size of the convolution layers
For cond_dim_channel and conv_kernel_size, see AffineCouplingBlow
"""
super(FlowStepBlow, self).__init__()
# Invertible transformation layer
self.m_invtrans = nii_glow.InvertibleTrans(in_dim, flag_detjac=True)
# Act norm layer
self.m_actnorm = nii_glow.ActNorm(in_dim, flag_detjac=True)
# coupling layer
self.m_coupling = AffineCouplingBlow(
in_dim, cond_dim, conv_dim_channel, conv_kernel_size,
flag_detjac=True)
return
def forward(self, y, cond, factor=1):
"""FlowStepBlow.forward(y, cond, factor=1)
input
-----
y: tensor, input feature, (batch, lengh, in_dim)
cond: tensor, condition feature , (batch, 1, cond_dim)
factor: int, this is used to divde likelihood, default 1
if we directly sum all detjac, they will become very large
however, we cannot average them directly on y because y
may have a different shape from the actual data y
output
------
x: tensor, input feature, (batch, lengh, input_dim)
detjac: tensor, det of jacobian, (batch,)
"""
# 1x1 transform
x_tmp, log_det_1 = self.m_invtrans(y, factor)
# Actnorm
x_tmp, log_det_2 = self.m_actnorm(x_tmp, factor)
# coupling
x_tmp, log_det_3 = self.m_coupling(x_tmp, cond, factor)
return x_tmp, log_det_1 + log_det_2 + log_det_3
def reverse(self, x, cond):
"""FlowStepBlow.reverse(y, cond)
input
-----
x: tensor, input feature, (batch, lengh, input_dim)
cond: tensor, condition feature , (batch, 1, cond_dim)
output
------
y: tensor, input feature, (batch, lengh, input_dim)
"""
y_tmp1 = self.m_coupling.reverse(x, cond)
y_tmp2 = self.m_actnorm.reverse(y_tmp1)
y_tmp3 = self.m_invtrans.reverse(y_tmp2)
#print("Debug: {:.3f} {:.3f} {:.3f} {:.3f} {:.3f} {:.3f}".format(
# y_tmp1.max().item(), y_tmp1.min().item(),
# y_tmp2.max().item(), y_tmp2.min().item(),
# y_tmp3.max().item(), y_tmp3.min().item()))
return y_tmp3
class BlowBlock(torch_nn.Module):
"""BlowBlock
A BlowBlok includes multiple steps of flow for Blow.
Each block conducts:
x -> squeeze -> flow step1 -> ... -> flow step N
Compared with WaveGlowBlock, this is easier because there is no
multi-scale structure, no need to split the latent z.
Example:
"""
def __init__(self, in_dim, cond_dim, n_flow_steps,
conv_dim_channel, conv_kernel_size):
"""BlowBlock(in_dim, cond_dim, n_flow_steps,
conv_dim_channel, conv_kernel_size)
Args
----
in_dim: int, input feature dim, (batch, length, in_dim)
cond_dim:, int, conditional feature dim, (batch, length, cond_dim)
n_flow_steps: int, number of flow steps in one block
conv_dim_channel: int, dim of the conv residual and skip channels
conv_kernel_size: int, kernel size of the convolution layers
For conv_dim_channel and conv_kernel_size, see AffineCouplingBlow
"""
super(BlowBlock, self).__init__()
# squeeze
self.m_squeeze = SqueezeForBlow()
squeezed_feat_dim = in_dim * self.m_squeeze.get_squeeze_factor()
# flow steps
tmp_flows = []
for i in range(n_flow_steps):
tmp_flows.append(
FlowStepBlow(
squeezed_feat_dim, cond_dim,
conv_dim_channel, conv_kernel_size))
self.m_flows = torch_nn.ModuleList(tmp_flows)
self.m_out_dim = squeezed_feat_dim
return
def get_out_feat_dim(self):
return self.m_out_dim
def get_expected_squeeze_length(self, orig_length):
return self.m_squeeze.get_expected_squeeze_length(orig_length)
def forward(self, y, cond, factor=1):
"""z, log_detjac = BlowBlock(y)
y -> squeeze -> H() -> z, log_det_jacobian
H() consists of multiple flow steps (1x1conv + Actnorm + AffineCoupling)
input
-----
y: tensor, (batch, length, dim)
cond, tensor, (batch, 1, cond_dim)
factor, None or int, this is used to divde likelihood, default 1
output
------
log_detjac: tensor or scalar
z: tensor, (batch, length, dim), for N(z; 0, I) or next flow block
"""
# squeeze
x_tmp = self.m_squeeze(y)
# flows
log_detjac = 0
for idx, l_flow in enumerate(self.m_flows):
x_tmp, log_detjac_tmp = l_flow(x_tmp, cond, factor)
log_detjac = log_detjac + log_detjac_tmp
return x_tmp, log_detjac
def reverse(self, z, cond):
"""y = BlowBlock.reverse(z, cond)
z -> H^{-1}() -> unsqueeze -> y
input
-----
z: tensor, (batch, length, in_dim)
cond, tensor, (batch, 1, cond_dim)
output
------
y: tensor, (batch, length, in_dim)
"""
y_tmp = z
for l_flow in self.m_flows[::-1]:
y_tmp = l_flow.reverse(y_tmp, cond)
y = self.m_squeeze.reverse(y_tmp)
return y
class Blow(torch_nn.Module):
"""Blow
"""
def __init__(self, cond_dim, num_blocks, num_flows_inblock,
conv_dim_channel, conv_kernel_size):
"""Blow(cond_dim, num_blocks, num_flows_inblock,
conv_dim_channel, conv_kernel_size)
Args
----
cond_dim:, int, conditional feature dim, (batch, length, cond_dim)
num_blocks: int, number of WaveGlowBlocks
num_flows_inblock: int, number of flow steps in one WaveGlowBlock
conv_dim_channel: int, dim of convolution layers channels
conv_kernel_size: int, kernel size of the convolution layers
This model defines:
cond (global) ----- -> | ------> | --------> |
v v v
y --------------> BlowBlock1 -> BlowBlock2 -> ... -> z
"""
super(Blow, self).__init__()
# input is assumed to be waveform
self.m_input_dim = 1
# save the dimension for get_z_noises
self.m_z_dim = 0
# define blocks
tmp_squeezed_in_dim = self.m_input_dim
tmp_flow_blocks = []
for i in range(num_blocks):
tmp_flow_blocks.append(
BlowBlock(
tmp_squeezed_in_dim, cond_dim, num_flows_inblock,
conv_dim_channel, conv_kernel_size))
tmp_squeezed_in_dim = tmp_flow_blocks[-1].get_out_feat_dim()
self.m_z_dim = tmp_squeezed_in_dim
self.m_flowblocks = torch_nn.ModuleList(tmp_flow_blocks)
# done
return
def get_expected_squeeze_length(self, wave_length):
"""length = get_expected_squeeze_length(self, wave_length)
Return expected length of latent z
input
-----
wave_length: int, length of original waveform
output
------
length: int, length of latent z
"""
length = wave_length
for glowblock in self.m_flowblocks:
length = glowblock.get_expected_squeeze_length(length)
return length
def _normal_lh(self, noise):
# likelihood of normal distribution on the given noise
return -0.5 * np.log(2 * np.pi) - 0.5 * noise ** 2
def forward(self, y, cond):
"""z, neg_logp_y, logp_z, logdet = Blow.forward(y, cond)
cond (global) ----- -> | ------> | --------> |
v v v
y --------------> BlowBlock1 -> BlowBlock2 -> ... -> z
input
-----
y: tensor, (batch, waveform_length, 1)
cond: tensor, (batch, 1, cond_dim)
output
------
z: tensor
neg_logp_y: scalar, - log p(y)
logp_z: scalar, -log N(z), summed over one data sequence, but averaged
over batch.
logdet: scalar, -|det dH(.)/dy|, summed over one data sequence,
but averaged
over batch.
"""
# Rather than summing the likelihood and divide it by the number of
# data in the final step, we divide this factor from the likelihood
# caculating by each flow step and sum the scaled likelihood.
# Two methods are equivalent, but the latter may prevent numerical
# overflow of the likelihood value for long sentences
factor = np.prod([dim for dim in y.shape])
# flows
log_detjac = 0
log_pz = 0
x_tmp = y
for m_block in self.m_flowblocks:
x_tmp, log_detjac_tmp = m_block(
x_tmp, cond, factor)
# accumulate log det jacobian
log_detjac += log_detjac_tmp
z_tmp = x_tmp
# compute N(z; 0, I)
# accumulate log_N(z; 0, I) only if it is valid
if z_tmp is not None:
log_pz += nii_glow.sum_over_keep_batch2(
self._normal_lh(z_tmp), factor)
# average over batch and data points
neg_logp_y = -(log_pz + log_detjac).sum()
return z_tmp, neg_logp_y, \
log_pz.sum(), log_detjac.sum()
def reverse(self, z, cond):
"""y = Blow.reverse(z_bags, cond)
cond (global) ----- -> | ------> | --------> |
v v v
y <--------------- BlowBlock1 <- BlowBlock2 <- ... <- z
input
-----
z: tensor, shape decided by the model configuration
cond: tensor, (batch, 1, cond_dim)
output
------
y: tensor, (batch, waveform_length, 1)
"""
# initial
y_tmp = z
for m_block in self.m_flowblocks[::-1]:
y_tmp = m_block.reverse(y_tmp, cond)
return y_tmp
def get_z_noises(self, length, noise_std=0.7, batchsize=1):
"""z_bags = Blow.get_z_noises(length, noise_std=0.7, batchsize=1)
Return random noise for random sampling
input
-----
length: int, length of target waveform (without squeeze)
noise_std: float, std of Gaussian noise, default 0.7
batchsize: int, batch size of this random data, default 1
output
------
z: tensor, shape decided by the network
Blow.reverse(z, cond) can be used to generate waveform
"""
squeeze_length = self.get_expected_squeeze_length(length)
device = next(self.parameters()).device
z_tmp = torch.randn(
[batchsize, squeeze_length, self.m_z_dim],
dtype=nii_io_conf.d_dtype,
device=device)
return z_tmp
if __name__ == "__main__":
print("Definition of Blow")
| 31,665 | 32.367756 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/util_bayesian.py | #!/usr/bin/env python
"""
util_bayesian.py
Utilities for bayeisan neural network
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn.functional as torch_nn_func
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
######
# utils to save guide and model Pyro
# not used anymore
######
def save_model_guide(model, guide, path_model, path_guide):
#torch.save(dnn_net.state_dict(), "mnist_cnn_{:03d}.pt".format(idx))
torch.save({"model" : model.state_dict(),
"guide" : guide}, path_model)
pyro.get_param_store().save(path_guide)
return
def load_model_guide(model, path_model, path_guide):
pretrained = torch.load(path_model)
model.load_state_dict(pretrained['model'])
guide = pretrained['guide']
pyro.get_param_store().load(path_guide)
return guide
######
# Utils to compute metrics for Bayesian inference
######
def _xent(y, dim=-1, log_floor = 0.000001):
"""xe = xent(y, dim)
input: y, tensor, (..., num_classes), probablity matrix
input: dim, int, along which dimension we do xent? default -1
output: xe, tensor, (..., 1), xe = -sum_j y[j] log y[j]
"""
logfloor = torch.zeros_like(y)
logfloor[y < log_floor] = log_floor
return -torch.sum(y * torch.log(y + logfloor), dim=dim, keepdim=True)
def xent(p):
"""mi = xent(p)
This measures total uncertainty
input: p, tensor, (sammple_N, batch, num_classes), probablity
output: xe, tensor, (batch, 1)
"""
# step1. Bayesian model average p(y | x, D) = E_{q_w}[p(y | w, x)]
# -> 1/N sum_i p(y | w_i, x)
# mp (batch, num_classes)
mp = p.mean(dim=0)
# step2. cross entropy over p(y | x, D)
# xe (batch, 1)
xe = _xent(mp)
return xe
def compute_epstemic_uncertainty(y):
"""mi = mutual_infor(y)
This measures epstemic uncertainty
input: y, tensor, (sammple_N, batch, num_classes), probablity
output: mi, tensor, (batch, 1)
"""
# cross entropy over BMA prob, see xent() above
xe = xent(y)
# cross entropy over each individual sample, ve (sample_N, batch, 1)
# for w_i, compute ent_i = xent(p(y | w_i, x))
# then, ve = 1/N sum_i ent_i
ve = torch.mean(_xent(y), dim=0)
# xe - ve
mi = xe - ve
return mi
def compute_aleatoric_uncertainty(y):
"""mi = mutual_infor(y)
This measures aleatoric uncertainty
input: y, tensor, (sammple_N, batch, num_classes), probablity
output: mi, tensor, (batch, 1)
"""
ve = torch.mean(_xent(y), dim=0)
return ve
def compute_logit_from_prob(y, log_floor=0.0000001):
"""logit = compute_logit_from_prob(y)
input: y, tensor, any shape, probablity of being positive
output: logit, tensor, same shape as y, sigmoid(logit) is y
"""
logfloor = torch.zeros_like(y)
logfloor[y < log_floor] = log_floor
tmp = 1 / (y + logfloor) - 1
logfloor = logfloor * 0
logfloor[tmp < log_floor] = log_floor
logit = - torch.log(tmp + logfloor)
return logit
#####
# wrapper
#####
def compute_llr_eps_ale(logits, idx_pos=1):
"""llr, eps, ale = compute_llr_eps_ale(logits)
input: logits, tensor (sampling_num, batch, 2)
idx_pos, int, which dimension is the positive class?
(default 1, which means logits[:, :, 1])
output: llr, tensor, (batch, 1)
eps, tensor, (batch, 1)
ale, tensor, (batch, 1)
"""
# -> (sampling_num, batch, 2)
prob = torch_nn_func.softmax(logits, dim=-1)
# to LLR
# 1. average prob over the samples to (batch, num_class)
# 2. compute the llr
averaged_prob = torch.mean(prob, dim=0)
# unsqueeze to make the shape consistent
llr = compute_logit_from_prob(averaged_prob[..., idx_pos]).unsqueeze(-1)
# get uncertainty
eps = compute_epstemic_uncertainty(prob)
ale = compute_aleatoric_uncertainty(prob)
return llr, eps, ale
if __name__ == "__main__":
print("Package for util_bayesian")
| 4,124 | 26.684564 | 76 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/util_dsp.py | #!/usr/bin/env python
"""
util_dsp.py
Utilities for signal processing
MuLaw Code adapted from
https://github.com/fatchord/WaveRNN/blob/master/utils/distribution.py
DCT code adapted from
https://github.com/zh217/torch-dct
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020-2021, Xin Wang"
######################
### WaveForm utilities
######################
def label_2_float(x, bits):
"""output = label_2_float(x, bits)
Assume x is code index for N-bits, then convert x to float values
Note: dtype conversion is not handled
inputs:
-----
x: data to be converted Tensor.long or int, any shape.
x value should be [0, 2**bits-1]
bits: number of bits, int
Return:
-------
output: tensor.float, [-1, 1]
output = 2 * x / (2**bits - 1.) - 1.
"""
return 2 * x / (2**bits - 1.) - 1.
def float_2_label(x, bits):
"""output = float_2_label(x, bits)
Assume x is a float value, do N-bits quantization and
return the code index.
input
-----
x: data to be converted, any shape
x value should be [-1, 1]
bits: number of bits, int
output
------
output: tensor.float, [0, 2**bits-1]
Although output is quantized, we use torch.float to save
the quantized values
"""
#assert abs(x).max() <= 1.0
# scale the peaks
peak = torch.abs(x).max()
if peak > 1.0:
x /= peak
# quantize
x = (x + 1.) * (2**bits - 1) / 2
return torch.clamp(x, 0, 2**bits - 1)
def mulaw_encode(x, quantization_channels, scale_to_int=True):
"""x_mu = mulaw_encode(x, quantization_channels, scale_to_int=True)
Adapted from torchaudio
https://pytorch.org/audio/functional.html mu_law_encoding
input
-----
x (Tensor): Input tensor, float-valued waveforms in (-1, 1)
quantization_channels (int): Number of channels
scale_to_int: Bool
True: scale mu-law to int
False: return mu-law in (-1, 1)
output
------
x_mu: tensor, int64, Input after mu-law encoding
"""
# mu
mu = quantization_channels - 1.0
# no check on the value of x
if not x.is_floating_point():
x = x.to(torch.float)
mu = torch.tensor(mu, dtype=x.dtype, device=x.device)
x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu)
if scale_to_int:
x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64)
return x_mu
def mulaw_decode(x_mu, quantization_channels, input_int=True):
"""Adapted from torchaudio
https://pytorch.org/audio/functional.html mu_law_encoding
Args:
x_mu (Tensor): Input tensor
quantization_channels (int): Number of channels
input_int: Bool
True: convert x_mu (int) from int to float, before mu-law decode
False: directly decode x_mu (float)
Returns:
Tensor: Input after mu-law decoding (float-value waveform (-1, 1))
"""
mu = quantization_channels - 1.0
if not x_mu.is_floating_point():
x_mu = x_mu.to(torch.float)
mu = torch.tensor(mu, dtype=x_mu.dtype, device=x_mu.device)
if input_int:
x = ((x_mu) / mu) * 2 - 1.0
else:
x = x_mu
x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu
return x
######################
### DCT utilities
### https://github.com/zh217/torch-dct
### LICENSE: MIT
###
######################
def rfft_wrapper(x, onesided=True, inverse=False):
# compatiblity with torch fft API
if hasattr(torch, "rfft"):
# for torch < 1.8.0, rfft is the API to use
# torch 1.7.0 complains about this API, but it is OK to use
if not inverse:
# FFT
return torch.rfft(x, 1, onesided=onesided)
else:
# inverse FFT
return torch.irfft(x, 1, onesided=onesided)
else:
# for torch > 1.8.0, fft.rfft is the API to use
if not inverse:
# FFT
if onesided:
data = torch.fft.rfft(x)
else:
data = torch.fft.fft(x)
return torch.stack([data.real, data.imag], dim=-1)
else:
# It requires complex-tensor
real_image = torch.chunk(x, 2, dim=1)
x = torch.complex(real_image[0].squeeze(-1),
real_image[1].squeeze(-1))
if onesided:
return torch.fft.irfft(x)
else:
return torch.fft.ifft(x)
def dct1(x):
"""
Discrete Cosine Transform, Type I
:param x: the input signal
:return: the DCT-I of the signal over the last dimension
"""
x_shape = x.shape
x = x.view(-1, x_shape[-1])
return rfft_wrapper(
torch.cat([x, x.flip([1])[:, 1:-1]], dim=1))[:, :, 0].view(*x_shape)
def idct1(X):
"""
The inverse of DCT-I, which is just a scaled DCT-I
Our definition if idct1 is such that idct1(dct1(x)) == x
:param X: the input signal
:return: the inverse DCT-I of the signal over the last dimension
"""
n = X.shape[-1]
return dct1(X) / (2 * (n - 1))
def dct(x, norm=None):
"""
Discrete Cosine Transform, Type II (a.k.a. the DCT)
For the meaning of the parameter `norm`, see:
https://docs.scipy.org/doc/ scipy.fftpack.dct.html
:param x: the input signal
:param norm: the normalization, None or 'ortho'
:return: the DCT-II of the signal over the last dimension
"""
x_shape = x.shape
N = x_shape[-1]
x = x.contiguous().view(-1, N)
v = torch.cat([x[:, ::2], x[:, 1::2].flip([1])], dim=1)
Vc = rfft_wrapper(v, onesided=False)
k = - torch.arange(N, dtype=x.dtype, device=x.device)[None, :] * np.pi/(2*N)
W_r = torch.cos(k)
W_i = torch.sin(k)
V = Vc[:, :, 0] * W_r - Vc[:, :, 1] * W_i
if norm == 'ortho':
V[:, 0] /= np.sqrt(N) * 2
V[:, 1:] /= np.sqrt(N / 2) * 2
V = 2 * V.view(*x_shape)
return V
def idct(X, norm=None):
"""
The inverse to DCT-II, which is a scaled Discrete Cosine Transform, Type III
Our definition of idct is that idct(dct(x)) == x
For the meaning of the parameter `norm`, see:
https://docs.scipy.org/doc/ scipy.fftpack.dct.html
:param X: the input signal
:param norm: the normalization, None or 'ortho'
:return: the inverse DCT-II of the signal over the last dimension
"""
x_shape = X.shape
N = x_shape[-1]
X_v = X.contiguous().view(-1, x_shape[-1]) / 2
if norm == 'ortho':
X_v[:, 0] *= np.sqrt(N) * 2
X_v[:, 1:] *= np.sqrt(N / 2) * 2
k = torch.arange(x_shape[-1], dtype=X.dtype,
device=X.device)[None, :]*np.pi/(2*N)
W_r = torch.cos(k)
W_i = torch.sin(k)
V_t_r = X_v
V_t_i = torch.cat([X_v[:, :1] * 0, -X_v.flip([1])[:, :-1]], dim=1)
V_r = V_t_r * W_r - V_t_i * W_i
V_i = V_t_r * W_i + V_t_i * W_r
V = torch.cat([V_r.unsqueeze(2), V_i.unsqueeze(2)], dim=2)
v = rfft_wrapper(V, onesided=False, inverse=True)
x = v.new_zeros(v.shape)
x[:, ::2] += v[:, :N - (N // 2)]
x[:, 1::2] += v.flip([1])[:, :N // 2]
return x.view(*x_shape)
class LinearDCT(torch_nn.Linear):
"""DCT implementation as linear transformation
Original Doc is in:
https://github.com/zh217/torch-dct/blob/master/torch_dct/_dct.py
This class implements DCT as a linear transformation layer.
This layer's weight matrix is initialized using the DCT transformation mat.
Accordingly, this API assumes that the input signal has a fixed length.
Please pad or trim the input signal when using this LinearDCT.forward(x)
Args:
----
in_features: int, which is equal to expected length of the signal.
type: string, dct1, idct1, dct, or idct
norm: string, ortho or None, default None
bias: bool, whether add bias to this linear layer. Default None
"""
def __init__(self, in_features, type, norm=None, bias=False):
self.type = type
self.N = in_features
self.norm = norm
super(LinearDCT, self).__init__(in_features, in_features, bias=bias)
def reset_parameters(self):
# initialise using dct function
I = torch.eye(self.N)
if self.type == 'dct1':
self.weight.data = dct1(I).data.t()
elif self.type == 'idct1':
self.weight.data = idct1(I).data.t()
elif self.type == 'dct':
self.weight.data = dct(I, norm=self.norm).data.t()
elif self.type == 'idct':
self.weight.data = idct(I, norm=self.norm).data.t()
self.weight.requires_grad = False # don't learn this!
if __name__ == "__main__":
print("util_dsp.py")
| 9,027 | 27.935897 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/util_music.py | #!/usr/bin/env python
"""
util_music.py
Utilities for music applications
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.dynamic_prog as nii_dy
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
class HzCentConverter(torch_nn.Module):
"""
HzCentConverter: an interface to convert F0 to cent, probablity matrix
and do reverse conversions
"""
def __init__(self,
u_hz = 0,
hz_ref = 10,
base_hz = 31.77219916398751,
top_hz = 2033.4207464952,
bins = 360,
cent_1st = 32.70319566257483,
cent_last = 1975.5332050244956,
):
super(HzCentConverter, self).__init__()
# unvoiced F0
self.m_v_hz = u_hz
# reference for cent calculation
self.m_hz_ref = hz_ref
# quantized resolution
# originally, bins = 360 -> 12 * 6 * 5, 12 semitones, 6 octaves
# each semitone is further divided to 5 intervals
self.m_fine_interval = 5
#####
# for quantization
#####
# one semitone cover 100 cents
# thus, -50 on bottom, +50 on top
# cent2hz(hz2cent(librosa.note_to_hz('C1'))-50)
self.m_base_hz = torch.tensor([base_hz])
# cent2hz(hz2cent(librosa.note_to_hz('B6'))+50)
self.m_top_hz = torch.tensor([top_hz])
# quantization interval
self.m_bins = bins
self.m_base_cent = self.hz2cent(self.m_base_hz)
self.m_top_cent = self.hz2cent(self.m_top_hz)
#####
# for de-quantization
#####
# librosa.note_to_hz('C1')
self.m_1st_cent = self.hz2cent(torch.tensor([cent_1st]))
# librosa.note_to_hz('B6')
self.m_last_cent = self.hz2cent(torch.tensor([cent_last]))
# quantized cent per bin
self.m_quan_cent_dis = (self.m_last_cent - self.m_1st_cent)/self.m_bins
# quantized cents as a tentor
self.m_dis_cent = torch_nn.Parameter(
torch.linspace(self.m_1st_cent.numpy()[0],
self.m_last_cent.numpy()[0],
self.m_bins),
requires_grad=False)
# quantized F0 as a tensor
self.m_dis_f0 = self.cent2hz(
torch.linspace(self.m_1st_cent.numpy()[0],
self.m_last_cent.numpy()[0],
self.m_bins))
#####
# for viterbi decoding
#####
self.m_viterbi_decode = True
# initial state probablity
self.m_viterbi_init = np.ones(self.m_bins * 2) / (self.m_bins * 2)
# transition probability
def _trans_mat():
max_cent = 12
p_vv = 0.99
p_uv = 1 - p_vv
# transition probabilities inducing continuous pitch
xx, yy = np.meshgrid(range(self.m_bins), range(self.m_bins))
tran_m_v = np.maximum(max_cent - abs(xx - yy), 0)
tran_m_v = tran_m_v / np.sum(tran_m_v, axis=1)[:, None]
# unvoiced part
tran_m_u = np.ones([self.m_bins, self.m_bins])/self.m_bins
tmp1 = np.concatenate([tran_m_v * p_vv, tran_m_u * p_uv], axis=1)
tmp2 = np.concatenate([tran_m_v * p_uv, tran_m_u * p_vv], axis=1)
trans = np.concatenate([tmp1, tmp2], axis=0)
return trans
self.m_viterbi_tran = _trans_mat()
def hz2cent(self, hz):
"""
hz2cent(self, hz)
Convert F0 Hz in to Cent
Parameters
----------
hz: torch.tensor
Return
------
: torch.tensor
"""
return 1200 * torch.log2(hz/self.m_hz_ref)
def cent2hz(self, cent):
return torch.pow(2, cent/1200) * self.m_hz_ref
def quantize_hz(self, hz):
cent = self.hz2cent(hz)
q_bin = torch.round((cent - self.m_base_cent) * self.m_bins /\
(self.m_top_cent - self.m_base_cent))
q_bin = torch.min([torch.max([0, q_bin]), self.m_bins - 1]) +1
return q_bin
def dequantize_hz(self, quantized_cent):
cent = quantized_cent * self.m_quan_cent_dis + self.m_1st_cent
return self.cent2hz(cent)
def f0_to_mat(self, f0_seq, var=625):
"""
f0_to_mat(self, f0_seq)
Convert F0 sequence (hz) into a probability matrix.
Jong Wook Kim, Justin Salamon, Peter Li, and Juan Pablo Bello. 2018.
CREPE: A Convolutional Representation for Pitch Estimation.
In Proc. ICASSP, 161-165
Parameters
----------
f0_seq: torch.tensor (1, N, 1)
Return
------
target_mat: torch.tensor (1, N, bins)
created probability matrix for f0
"""
if f0_seq.dim() != 3:
print("f0 sequence loaded in tensor should be in shape (1, N, 1)")
sys.exit(1)
# voiced / unvoiced indix
v_idx = f0_seq > self.m_v_hz
u_idx = ~v_idx
# convert F0 Hz to cent
target = torch.zeros_like(f0_seq)
target[v_idx] = self.hz2cent(f0_seq[v_idx])
target[u_idx] = 0
# target
# since target is (1, N, 1), the last dimension size is 1
# self.m_dis_cent (bins) -> propagated to (1, N, bins)
target_mat = torch.exp(-torch.pow(self.m_dis_cent - target, 2)/2/var)
# set unvoiced to zero
for idx in range(target_mat.shape[0]):
target_mat[idx, u_idx[idx, :, 0], :] *= 0.0
#target_mat[0, u_idx[0, :, 0], :] *= 0.0
# return
return target_mat
def recover_f0(self, bin_mat, viterbi_decode=True):
"""
recover_f0(self, bin_mat)
Produce F0 from a probability matrix.
This is the inverse function of f0_to_mat.
By default, use Viterbi decoding to produce F0.
Matthias Mauch, and Simon Dixon. 2014.
PYIN: A Fundamental Frequency Estimator Using Probabilistic
Threshold Distributions. In Proc. ICASSP, 659-663.
Parameters
----------
bin_mat: torch.tensor (1, N, bins)
Return
------
f0: torch.tensor(1, N, 1)
"""
# check
if bin_mat.shape[0] != 1:
print("F0 generation only support batchsize=1")
sys.exit(1)
if bin_mat.dim() != 3 or bin_mat.shape[-1] != self.m_bins:
print("bin_mat should be in shape (1, N, bins)")
sys.exit(1)
# generation
if not self.m_viterbi_decode or not viterbi_decode:
# denominator
prob_sum = torch.sum(bin_mat, axis=2)
# add floor
prob_sum[prob_sum < 1e-07] = 1e-07
# normal sum
cent = torch.sum(bin_mat * self.m_dis_cent, axis=2) / prob_sum
f0 = self.cent2hz(cent)
f0[cent < 1] = 0
return f0.unsqueeze(-1)
else:
tmp_bin_mat = bin_mat.to('cpu')
# viterbi decode:
with torch.no_grad():
# observation probablity for unvoiced states
prob_u = torch.ones_like(tmp_bin_mat) \
- torch.mean(tmp_bin_mat, axis=2, keepdim=True)
# concatenate to observation probability matrix
# [Timestep, m_bins * 2],
# m_bins is the number of quantized F0 bins
# another m_bins is for the unvoiced states
tmp_bin_mat = torch.cat([tmp_bin_mat, prob_u],axis=2).squeeze(0)
# viterbi decoding. Numpy is fast?
tmp_bin_mat = tmp_bin_mat.numpy()
quantized_cent = nii_dy.viterbi_decode(
self.m_viterbi_init, self.m_viterbi_tran, tmp_bin_mat * 0.5)
# unvoiced state sequence (states in [m_bins, m_bins*2])
u_idx = quantized_cent>=self.m_bins
# based on viterbi best state, do weighted sum over a beam
# Equation from
# https://github.com/marl/crepe/blob/master/crepe/core.py#L108
prob_m = torch.zeros_like(bin_mat)
for idx, i in enumerate(quantized_cent):
s_idx = np.max([i - 4, 0])
e_idx = np.min([i+5, self.m_bins])
prob_m[0, idx, s_idx:e_idx] = bin_mat[0, idx, s_idx:e_idx]
cent = torch.sum(prob_m * self.m_dis_cent, axis=2) / \
torch.sum(prob_m, axis=2)
# from cent to f0
f0 = self.cent2hz(cent)
# unvoiced
f0[0, u_idx]=0
return f0.unsqueeze(-1)
def f0_probmat_postprocessing(self, f0_prob_mat):
"""
f0_prob_mat = f0_prob_mat_post(f0_prob_mat)
input
-----
f0_prob_mat: torch tensor of shape (bathcsize, length, bins)
output
------
f0_prob_mat_new: same shape as f0_prob_mat
"""
if f0_prob_mat.shape[-1] != self.m_bins:
print("Last dimension of F0 prob mat != {:d}".format(self.m_bins))
sys.exit(1)
if f0_prob_mat.shape[0] > 1:
print("Cannot support batchsize > 1 for dynamic programming")
sys.exit(1)
# observation probablity for unvoiced states
prob_u = torch.ones_like(f0_prob_mat) \
- torch.mean(f0_prob_mat, axis=2, keepdim=True)
tmp_bin_mat = torch.cat([f0_prob_mat, prob_u],axis=2).squeeze(0)
# viterbi decoding. Numpy is fast?
tmp_bin_mat = tmp_bin_mat.to('cpu').numpy()
quantized_cent = nii_dy.viterbi_decode(
self.m_viterbi_init, self.m_viterbi_tran, tmp_bin_mat * 0.5)
u_idx = quantized_cent>=self.m_bins
mat_new = torch.zeros_like(f0_prob_mat)
for idx, i in enumerate(quantized_cent):
if i < self.m_bins:
sidx = np.max([i - 4, 0])
eidx = np.min([i+5, self.m_bins])
mat_new[0, idx, sidx:eidx] = f0_prob_mat[0,idx,sidx:eidx]
mat_new[0, idx, sidx:eidx] /= mat_new[0, idx, sidx:eidx].sum()
return mat_new
if __name__ == "__main__":
print("util_music")
| 10,763 | 32.742947 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/different_main/main_gan.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The default training/inference process wrapper
Requires model.py and config.py
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.nn_manager.nn_manager_GAN as nii_nn_wrapper_GAN
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference and not args.epoch2pt:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler,
'pin_memory': True}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
inout_trans_fns = prj_conf.input_output_trans_fn \
if hasattr(prj_conf, 'input_output_trans_fn') else None
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns,
inoutput_augment_func = inout_trans_fns)
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns,
inoutput_augment_func = inout_trans_fns)
else:
val_set = None
# initialize the model and loss function
model_G = prj_model.ModelGenerator(
trn_set.get_in_dim(), trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
model_D = prj_model.ModelDiscriminator(
trn_set.get_in_dim(), trn_set.get_out_dim(),
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = None
# initialize the optimizer
optimizer_G_wrap = nii_op_wrapper.OptimizerWrapper(model_G, args)
optimizer_D_wrap = nii_op_wrapper.OptimizerWrapper(model_D, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint_G = None
checkpoint_D = None
else:
tmp_str = args.trained_model.split(",")
checkpoint_G = torch.load(tmp_str[0])
if len(tmp_str) > 1:
checkpoint_D = torch.load(tmp_str[1])
else:
checkpoint_D = None
# start training
nii_nn_wrapper_GAN.f_train_wrapper_GAN(
args, model_G, model_D,
loss_wrapper, device,
optimizer_G_wrap, optimizer_D_wrap,
trn_set, val_set,
checkpoint_G, checkpoint_D)
# done for traing
elif args.inference:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
inout_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'test_input_output_trans_fn') \
else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq = None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns,
inoutput_augment_func = inout_trans_fns)
# initialize model
model = prj_model.ModelGenerator(
test_set.get_in_dim(), test_set.get_out_dim(), args, prj_conf)
if args.trained_model == "":
print("Please provide ---trained-model")
sys.exit(1)
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(
args, model, device, test_set, checkpoint)
elif args.epoch2pt:
# for model conversion from epoch.pt to trained_network.pt
# initialize model
model = prj_model.ModelGenerator(
sum(prj_conf.input_dims), sum(prj_conf.output_dims), args, prj_conf)
if args.trained_model == "":
print("Please provide ---trained-model")
sys.exit(1)
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_convert_epoch_to_trained(
args, model, device, checkpoint)
else:
print("Fatal error in main.py")
sys.exit(1)
# done
return
if __name__ == "__main__":
main()
| 9,039 | 35.16 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/different_main/temp.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
import sandbox.block_nn as nii_nn
import sandbox.block_nsf as nii_nsf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
#####
## Model definition
#####
class Conv1dNoPermute(torch_nn.Conv1d):
"""
"""
def __init__(self, input_dim, output_dim, dilation_s, kernel_s,
causal = False, stride = 1, groups=1, bias=True, tanh=True):
super(Conv1dNoPermute, self).__init__(
input_dim, output_dim, kernel_s, stride=stride,
padding = dilation_s * (kernel_s - 1) if causal \
else dilation_s * (kernel_s - 1) // 2,
dilation = dilation_s, groups=groups, bias=bias)
self.l_ac = torch_nn.Tanh() if tanh else torch_nn.Identity()
return
def forward(self, input_data):
data = input_data[0]
cond = input_data[1]
out = self.l_ac(
super(Conv1dNoPermute, self).forward(data)[:, :, :data.shape[-1]])
return [data + cond + out, cond]
class NeuralFilterBlock(torch_nn.Module):
""" Wrapper over a single filter block
NeuralFilterBlock(signal_size, hidden_size, kernel_size, conv_num=10)
args
----
signal_size: int, input signal is in shape (batch, length, signal_size)
hidden_size: int, output of conv layers is (batch, length, hidden_size)
kernel_size: int, kernel size of the conv layers
conv_num: number of conv layers in this neural filter block (default 10)
legacy_scale: Bool, whether load scale as parameter or magic number
To be compatible with old models that defines self.scale
No impact on the result, just different ways to load a
fixed self.scale
"""
def __init__(self, signal_size, hidden_size, kernel_size=3, conv_num=10,
legacy_scale = False):
super(NeuralFilterBlock, self).__init__()
self.signal_size = signal_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.conv_num = conv_num
self.dilation_size = [np.power(2, x) for x in np.arange(conv_num)]
# ff layer to expand dimension
self.l_ff_p = torch_nn.Sequential(
torch_nn.Linear(signal_size, hidden_size, bias=False),
torch_nn.Tanh())
# dilated conv layers
tmp = [Conv1dNoPermute(hidden_size, hidden_size, x,
kernel_size, causal=True, bias=False) \
for x in self.dilation_size]
self.l_convs = torch_nn.Sequential(*tmp)
# ff layer to de-expand dimension
self.l_ff_f = torch_nn.Sequential(
torch_nn.Linear(hidden_size, hidden_size//4, bias=False),
torch_nn.Tanh(),
torch_nn.Linear(hidden_size//4, signal_size, bias=False),
torch_nn.Tanh())
# a simple scale: to be consistent with CURRENNT implementation
if legacy_scale:
# in case this scale is defined as model parameter in
# some old models
self.scale = torch_nn.Parameter(
torch.tensor([0.1]), requires_grad=False)
else:
# simple hyper-parameter should be OK
self.scale = 0.1
return
def forward(self, input_data):
"""
input
-----
signal (batchsize, length, signal_size)
context (batchsize, length, hidden_size)
context is produced from the condition module
output
------
output: (batchsize, length, signal_size)
"""
signal, context = input_data[0], input_data[1]
# expand dimension
tmp_hidden = self.l_ff_p(signal)
# loop over dilated convs
# output of a d-conv is input + context + d-conv(input)
tmp_hidden = self.l_convs(
[tmp_hidden.permute(0, 2, 1),
context.permute(0, 2, 1)])[0].permute(0, 2, 1)
# to be consistent with legacy configuration in CURRENNT
tmp_hidden = tmp_hidden * self.scale
# compress the dimesion and skip-add
output_signal = self.l_ff_f(tmp_hidden) + signal
return [output_signal, context]
## For condition module only provide Spectral feature to Filter block
class CondModule(torch_nn.Module):
""" Conditiona module
Upsample and transform input features
CondModule(input_dimension, output_dimension, up_sample_rate,
blstm_dimension = 64, cnn_kernel_size = 3)
Spec, F0 = CondModule(features, F0)
Both input features should be frame-level features
If x doesn't contain F0, just ignore the returned F0
"""
def __init__(self, input_dim, output_dim, up_sample, \
blstm_s = 64, cnn_kernel_s = 3):
super(CondModule, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.up_sample = up_sample
self.blstm_s = blstm_s
self.cnn_kernel_s = cnn_kernel_s
# bi-LSTM
self.l_blstm = nii_nn.BLSTMLayer(input_dim, self.blstm_s)
self.l_conv1d = nii_nn.Conv1dKeepLength(
self.blstm_s, output_dim, 1, self.cnn_kernel_s)
self.l_upsamp = nii_nn.UpSampleLayer(
self.output_dim, self.up_sample, True)
# Upsampling for F0: don't smooth up-sampled F0
self.l_upsamp_F0 = nii_nn.UpSampleLayer(1, self.up_sample, False)
def forward(self, feature, f0):
""" spec, f0 = forward(self, feature, f0)
feature: (batchsize, length, dim)
f0: (batchsize, length, dim=1), which should be F0 at frame-level
spec: (batchsize, length, self.output_dim), at wave-level
f0: (batchsize, length, 1), at wave-level
"""
spec = self.l_upsamp(self.l_conv1d(self.l_blstm(feature)))
f0 = self.l_upsamp_F0(f0)
return spec, f0
# For source module
class SourceModuleMusicNSF(torch_nn.Module):
""" SourceModule for hn-nsf
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0)
sampling_rate: sampling_rate in Hz
harmonic_num: number of harmonic above F0 (default: 0)
sine_amp: amplitude of sine source signal (default: 0.1)
add_noise_std: std of additive Gaussian noise (default: 0.003)
note that amplitude of noise in unvoiced is decided
by sine_amp
voiced_threshold: threhold to set U/V given F0 (default: 0)
Sine_source, noise_source = SourceModuleMusicNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
uv (batchsize, length, 1)
"""
def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0):
super(SourceModuleMusicNSF, self).__init__()
self.sine_amp = sine_amp
self.noise_std = add_noise_std
# to produce sine waveforms
self.l_sin_gen = nii_nsf.SineGen(
sampling_rate, harmonic_num, sine_amp,
add_noise_std, voiced_threshod)
# to merge source harmonics into a single excitation
self.l_linear = torch_nn.Linear(harmonic_num+1, 1)
self.l_tanh = torch_nn.Tanh()
def forward(self, x):
"""
Sine_source, noise_source = SourceModuleMusicNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
"""
# source for harmonic branch
# sine fundamental component and harmonic overtones
sine_wavs, uv, _ = self.l_sin_gen(x)
# merge into a single excitation
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
# source for noise branch, in the same shape as uv
noise = torch.randn_like(uv) * self.sine_amp / 3
return sine_merge, noise, uv
# For Filter module
class FilterModuleMusicNSF(torch_nn.Module):
""" Filter for Hn-NSF
FilterModuleMusicNSF(signal_size, hidden_size, fir_coef,
block_num = 5,
kernel_size = 3, conv_num_in_block = 10)
signal_size: signal dimension (should be 1)
hidden_size: dimension of hidden features inside neural filter block
fir_coef: list of FIR filter coeffs,
(low_pass_1, low_pass_2, high_pass_1, high_pass_2)
block_num: number of neural filter blocks in harmonic branch
kernel_size: kernel size in dilated CNN
conv_num_in_block: number of d-conv1d in one neural filter block
output = FilterModuleMusicNSF(harmonic_source,noise_source,uv,context)
harmonic_source (batchsize, length, dim=1)
noise_source (batchsize, length, dim=1)
context (batchsize, length, dim)
uv (batchsize, length, dim)
output: (batchsize, length, dim=1)
"""
def __init__(self, signal_size, hidden_size, \
block_num = 5, kernel_size = 3, conv_num_in_block = 10):
super(FilterModuleMusicNSF, self).__init__()
self.signal_size = signal_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.block_num = block_num
self.conv_num_in_block = conv_num_in_block
# filter blocks for harmonic branch
tmp = [NeuralFilterBlock(
signal_size, hidden_size, kernel_size, conv_num_in_block) \
for x in range(self.block_num)]
self.l_har_blocks = torch_nn.Sequential(*tmp)
def forward(self, har_component, noi_component, condition_feat, uv):
"""
"""
# harmonic component
#for l_har_block in self.l_har_blocks:
# har_component = l_har_block(har_component, condition_feat)
#output = har_component
output = self.l_har_blocks([har_component, condition_feat])[0]
return output
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
######
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
######
# configurations
self.sine_amp = 0.1
self.noise_std = 0.001
self.input_dim = in_dim
self.output_dim = out_dim
self.hidden_dim = 64
self.upsamp_rate = prj_conf.input_reso[0]
self.sampling_rate = prj_conf.wav_samp_rate
self.cnn_kernel_size = 3
self.filter_block_num = 5
self.cnn_num_in_block = 10
self.harmonic_num = 16
# the three modules
self.m_condition = CondModule(self.input_dim, \
self.hidden_dim, \
self.upsamp_rate, \
cnn_kernel_s = self.cnn_kernel_size)
#self.m_source = SourceModuleMusicNSF(self.sampling_rate,
# self.harmonic_num,
# self.sine_amp,
# self.noise_std)
self.m_filter = FilterModuleMusicNSF(self.output_dim,
self.hidden_dim,\
self.filter_block_num, \
self.cnn_kernel_size, \
self.cnn_num_in_block)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def forward(self, x):
""" definition of forward method
Assume x (batchsize=1, length, dim)
Return output(batchsize=1, length)
"""
# normalize the data
feat = self.normalize_input(x)
# condition module
# place_holder is originally the up-sampled F0
# it is not used for noise-excitation model
# but it has the same shape as the upsampled souce signal
# it can help to create the noise_source below
cond_feat, place_holder = self.m_condition(feat, x[:, :, -1:])
with torch.no_grad():
noise_source = torch.randn_like(place_holder) * self.noise_std / 3
# source module
#har_source, noi_source, uv = self.m_source(f0_upsamped)
# filter module (including FIR filtering)
output = self.m_filter(noise_source, None, cond_feat, None)
# output
return output.squeeze(-1)
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
# frame shift (number of points)
self.frame_hops = [80, 40, 640]
# frame length
self.frame_lens = [320, 80, 1920]
# FFT length
self.fft_n = [4096, 4096, 4096]
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# loss
self.loss1 = torch_nn.MSELoss()
self.loss2 = torch_nn.MSELoss()
self.loss3 = torch_nn.MSELoss()
self.loss = [self.loss1, self.loss2, self.loss3]
#self.loss = torch_nn.MSELoss()
def compute(self, output_orig, target_orig):
""" Loss().compute(output, target) should return
the Loss in torch.tensor format
Assume output and target as (batchsize=1, length)
"""
# convert from (batchsize=1, length, dim=1) to (1, length)
if output_orig.ndim == 3:
output = output_orig.squeeze(-1)
else:
output = output_orig
if target_orig.ndim == 3:
target = target_orig.squeeze(-1)
else:
target = target_orig
# compute loss
loss = 0
for frame_shift, frame_len, fft_p, loss_f in \
zip(self.frame_hops, self.frame_lens, self.fft_n, self.loss):
x_stft = torch.stft(output, fft_p, frame_shift, frame_len, \
window=self.win(frame_len, \
device=output_orig.device),
onesided=True,
pad_mode="constant")
y_stft = torch.stft(target, fft_p, frame_shift, frame_len, \
window=self.win(frame_len,
device=output_orig.device),
onesided=True,
pad_mode="constant")
x_sp_amp = torch.log(torch.norm(x_stft, 2, -1).pow(2) + \
self.amp_floor)
y_sp_amp = torch.log(torch.norm(y_stft, 2, -1).pow(2) + \
self.amp_floor)
loss += loss_f(x_sp_amp, y_sp_amp)
return loss
if __name__ == "__main__":
print("Definition of model")
| 17,431 | 36.010616 | 78 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/different_main/transfer_weight.py | #!/usr/bin/env python
"""
"""
from __future__ import absolute_import
import os
import sys
import copy
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager_AL as nii_nn_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper_base
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
def self_defined_transfer(model_src, model_tar):
""" A self defined function to transfer the weights from model_src
to model_tar
"""
# load SSL front-end
model_tar.m_front_end.ssl_model.load_state_dict(
model_src.m_ssl.state_dict())
# load SSL front-end linear layer
model_tar.m_front_end.m_front_end_process.load_state_dict(
model_src.m_frontend[0].state_dict())
# load the linear output layer
model_tar.m_back_end.m_utt_level.load_state_dict(
model_src.m_output_act[0].state_dict())
return
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load 1st module: %s" % (args.module_model))
nii_warn.f_print("Load 2nd module: %s" % (args.module_model_aux))
prj_conf = importlib.import_module(args.module_config)
prj_model_src = importlib.import_module(args.module_model)
prj_model_tar = importlib.import_module(args.module_model_aux)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
#
checkpoint = torch.load(args.trained_model)
model_src = prj_model_src.Model(sum(prj_conf.input_dims),
sum(prj_conf.output_dims),
args, prj_conf)
model_tar = prj_model_tar.Model(sum(prj_conf.input_dims),
sum(prj_conf.output_dims),
args, prj_conf)
model_src.load_state_dict(checkpoint)
self_defined_transfer(model_src, model_tar)
torch.save(model_tar.state_dict(), 'temp.pt')
return
if __name__ == "__main__":
main()
| 2,922 | 31.120879 | 71 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/different_main/main_merge_datasets.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper.
Dataset API is replaced with NII_MergeDataSetLoader.
It is more convenient to train model on corpora stored in different directories.
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference and not args.epoch2pt:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
inout_trans_fns = prj_conf.input_output_trans_fn \
if hasattr(prj_conf, 'input_output_trans_fn') else None
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns,
inoutput_augment_func = inout_trans_fns)
if hasattr(prj_conf, 'val_input_dirs'):
val_input_dirs = prj_conf.val_input_dirs
else:
val_input_dirs = prj_conf.input_dirs
if hasattr(prj_conf, 'val_output_dirs'):
val_output_dirs = prj_conf.val_output_dirs
else:
val_output_dirs = prj_conf.output_dirs
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
val_input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
val_output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns,
inoutput_augment_func = inout_trans_fns)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
elif args.inference:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.test_input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.test_output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
inout_trans_fns = prj_conf.test_input_output_trans_fn \
if hasattr(prj_conf, 'test_input_output_trans_fn') \
else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns,
inoutput_augment_func = inout_trans_fns)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
elif args.epoch2pt:
# for model conversion from epoch.pt to trained_network.pt
# initialize model
model = prj_model.Model(
sum(prj_conf.input_dims), sum(prj_conf.output_dims), args, prj_conf)
if args.trained_model == "":
print("Please provide ---trained-model")
sys.exit(1)
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_convert_epoch_to_trained(
args, model, device, checkpoint)
else:
print("Fatal error in main.py")
sys.exit(1)
# done
return
if __name__ == "__main__":
main()
| 9,342 | 35.928854 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/different_main/main_al.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper for active learning.
The base is on main_mergedataset.py
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import copy
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager_AL as nii_nn_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper_base
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# Load data pool and create data loader
pool_lst = prj_conf.al_pool_list
pool_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.al_pool_set_name, \
pool_lst,
prj_conf.al_pool_in_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.al_pool_out_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
if hasattr(prj_conf, 'val_input_dirs'):
val_input_dirs = prj_conf.val_input_dirs
else:
val_input_dirs = prj_conf.input_dirs
if hasattr(prj_conf, 'val_output_dirs'):
val_output_dirs = prj_conf.val_output_dirs
else:
val_output_dirs = prj_conf.output_dirs
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
val_input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
val_output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# pre-training using standard procedure
# change args
args_tmp = copy.deepcopy(args)
args_tmp.epochs = args.active_learning_pre_train_epoch_num
args_tmp.not_save_each_epoch = True
args_tmp.save_trained_name += '_pretrained'
args_tmp.active_learning_cycle_num = 0
pretraind_name = args_tmp.save_trained_name + args_tmp.save_model_ext
if args.active_learning_pre_train_epoch_num:
nii_warn.f_print_w_date("Normal training (warm-up) phase",level='h')
nii_warn.f_print("Normal training for {:d} epochs".format(
args.active_learning_pre_train_epoch_num))
op_wrapper_tmp = nii_op_wrapper.OptimizerWrapper(model, args_tmp)
loss_wrapper_tmp = prj_model.Loss(args_tmp)
nii_nn_wrapper_base.f_train_wrapper(
args_tmp, model, loss_wrapper, device, op_wrapper_tmp,
trn_set, val_set, checkpoint)
checkpoint = torch.load(pretraind_name)
elif checkpoint is None:
if os.path.isfile(pretraind_name):
checkpoint = torch.load(pretraind_name)
nii_warn.f_print("Use pretrained model before active learning")
else:
nii_warn.f_print("Use seed model to initialize")
nii_warn.f_print_w_date("Active learning phase",level='h')
# start training
nii_nn_wrapper.f_train_wrapper(
args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, pool_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.test_input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.test_output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper_base.f_inference_wrapper(
args, model, device, test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 10,498 | 36.766187 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/sandbox/different_main/main_profile.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper.
Dataset API is replaced with NII_MergeDataSetLoader.
It is more convenient to train model on corpora stored in different directories.
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager_profile as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
in_trans_fns = prj_conf.test_input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.test_output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 7,827 | 35.924528 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/04-asvspoof2021-toy/lfcc-lcnn-lstm-sig_toy_example/main.py | #!/usr/bin/env python
"""
main.py
The default training/inference process wrapper
Requires model.py and config.py
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
# Load file list and create data loader
trn_lst = nii_list_tool.read_list_from_text(prj_conf.trn_list)
trn_set = nii_dset.NIIDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
global_arg = args)
if prj_conf.val_list is not None:
val_lst = nii_list_tool.read_list_from_text(prj_conf.val_list)
val_set = nii_dset.NIIDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
global_arg = args)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NIIDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
global_arg = args)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 6,501 | 34.530055 | 76 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/04-asvspoof2021-toy/lfcc-lcnn-lstm-sig_toy_example/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
try:
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
except OSError:
print("Skip loading protocol file")
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = m_output((hidden_features_lstm + hidden_features).mean(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _get_target_eval(self, filenames):
""" retrieve the target label for a trial from protocol if available
"""
return [self.protocol_parser[x] if x in self.protocol_parser else -1 \
for x in filenames]
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target_eval(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 14,927 | 33.878505 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/07-asvspoof-ssl/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper.
Dataset API is replaced with NII_MergeDataSetLoader.
It is more convenient to train model on corpora stored in different directories.
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.test_input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.test_output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 7,862 | 35.915493 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/07-asvspoof-ssl/config_train_asvspoof2019.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage (although
this configuration will only be used for training)
"""
import os
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = ['asvspoof2019_trn']
val_set_name = ['asvspoof2019_val']
# for convenience
tmp = os.path.dirname(__file__) + '/../../../DATA/asvspoof2019_LA'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = [tmp + '/scp/train.lst']
# val_file_list: list of files for validation set. It can be None
val_list = [tmp + '/scp/val.lst']
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [[tmp + '/train_dev']]
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.wav']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [1]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [False]
# Similar configurations for output features
output_dirs = [[] for x in input_dirs]
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = 64000
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = 8000
# Optional argument
# Just a buffer for convenience
# It can contain anything
# Here we use it to hold the path to the protocol.txt
# They will be loaded by model.py
optional_argument = [tmp + '/protocol.txt']
#import augment
#input_trans_fns = [[augment.wav_aug]]
#output_trans_fns = [[]]
#########################################################
## Configuration for inference stage (place holder)
#########################################################
# Please use config_test_*.py inference
# This part is just a place holder
test_set_name = trn_set_name + val_set_name
# List of test set data
# for convenience, you may directly load test_set list here
test_list = trn_list + val_list
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = input_dirs * 2
# Directories for output features, which are []
test_output_dirs = [[]] * 2
| 3,561 | 30.803571 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/07-asvspoof-ssl/config_train_toyset.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage (although
this configuration will only be used for training)
"""
import os
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# this will be used as the name of cache files created for each set
#
# Name for the seed training set, in case you merge multiple data sets as
# a single training set, just specify the name for each subset.
# Here we only have 1 training subset
trn_set_name = ['asvspoof2019_toyset_trn']
# Name for the development set
val_set_name = ['asvspoof2019_toyset_val']
# For convenience, specify a path to the toy data set
# because config*.py will be copied into model-*/config_AL_train_toyset/NN
# we need to use ../../../
tmp = os.path.dirname(__file__) + '/../../../DATA/toy_example'
# File list for training and development sets
# (text file, one file name per line, without name extension)
# we need to provide one lst for each subset
# trn_list[n] will correspond to trn_set_name[n]
# for training set
trn_list = [tmp + '/scp/train.lst']
# for development set
val_list = [tmp + '/scp/val.lst']
# Directories for input data
# We need to provide the path to the directory that saves the input data.
# We assume waveforms for training and development of one subset
# are stored in the same directory.
# Hence, input_dirs[n] is for trn_set_name[n] and val_set_name[n]
#
# If you need to specify a separate val_input_dirs
# val_input_dirs = [[PATH_TO_DEVELOPMENT_SET]]
#
# Each input_dirs[n] is a list,
# for example, input_dirs[n] = [wav, speaker_label, augmented_wav, ...]
#
# Here, input for each file is a single waveform
input_dirs = [[tmp + '/train_dev']]
# Dimensions of input features
# What is the dimension of the input feature
# len(input_dims) should be equal to len(input_dirs[n])
#
# Here, input for each file is a single waveform, dimension is 1
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# len(input_exts) should be equal to len(input_dirs[n])
#
# Here, input file extension is .wav
# We use .wav not .flac
input_exts = ['.wav']
# Temporal resolution for input features
# This is not relevant for CM but for other projects
# len(input_reso) should be equal to len(input_dirs[n])
# Here, it is 1 for waveform
input_reso = [1]
# Whether input features should be z-normalized
# This is not relevant for CM but for other projects
# len(input_norm) should be equal to len(input_dirs[n])
# Here, it is False for waveform
# We don't normalize the waveform
input_norm = [False]
# Similar configurations for output features
# Here, we set output to empty because we will load
# the target labels from protocol rather than output feature
# '.bin' is also a place holder
output_dirs = [[] for x in input_dirs]
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# ===
# Waveform configuration
# ===
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = 64000
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = 8000
# Optional argument
# This used to load protocol(s)
# Multiple protocol files can be specified in the list
#
# Note that these protocols should cover all the
# training, development, and pool set data.
# Otherwise, the code will raise an error
#
# Here, this protocol will cover all the data in the toy set
optional_argument = [tmp + '/protocol.txt']
#import augment
#input_trans_fns = [[augment.wav_aug]]
#output_trans_fns = [[]]
#########################################################
## Configuration for inference stage (place holder)
#########################################################
# Please use config_test_*.py inference
# This part is just a place holder
test_set_name = trn_set_name + val_set_name
# List of test set data
# for convenience, you may directly load test_set list here
test_list = trn_list + val_list
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = input_dirs * 2
# Directories for output features, which are []
test_output_dirs = [[]] * 2
| 4,915 | 31.556291 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/07-asvspoof-ssl/model-HuBERT-XL-fix-LLGF/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq
class SSLModel():
def __init__(self, cp_path, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args:
cp_path: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([cp_path])
self.model = model[0]
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" feature = extract_feat(input_data)
Args:
input_data: tensor, waveform, (batch, length)
Return:
feature: tensor, feature, (batch, frame_num, feat_dim)
"""
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
self.model.eval()
with torch.no_grad():
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
# not good way, but the path is fixed
ssl_path = os.path.dirname(__file__) + '/../../../SSL_pretrained/hubert_xtralarge_ll60k.pt'
# This model produces output feature dimensions (per frame)
ssl_orig_output_dim = 1280
# SSL model is declared as a global var since it is fixed
g_ssl_model = SSLModel(ssl_path, ssl_orig_output_dim)
#################
## Misc functions
#################
# A function to load in/out label for OOD detection. This is just a place holder
# in this project
g_attack_map = {}
def protocol_parse_general(protocol_filepaths, g_map, sep=' ', target_row=-1):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
target_row: int, default -1, use line[-1] as the target label
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = nii_asvspoof.CustomDict(missing_value=True)
if type(protocol_filepaths) is str:
tmp = [protocol_filepaths]
else:
tmp = protocol_filepaths
for protocol_filepath in tmp:
if len(protocol_filepath) and os.path.isfile(protocol_filepath):
with open(protocol_filepath, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip('\n')
cols = line.split(sep)
if g_map:
try:
data_buffer[cols[1]] = g_map[cols[target_row]]
except KeyError:
data_buffer[cols[1]] = False
else:
data_buffer[cols[1]] = True
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
# Load CM protocol (if available)
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
# Load OOD protocl (if available)
self.in_out_parser = protocol_parse_general(protocol_f, g_attack_map,
' ', -2)
# Working sampling rate
# torchaudio may be used to change sampling rate
#self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
#
self.v_feat_dim = [128]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.v_feat_dim)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = None
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, v_feat_dim in enumerate(self.v_feat_dim):
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((v_feat_dim//16) * 32,
(v_feat_dim//16) * 32),
nii_nn.BLSTMLayer((v_feat_dim//16) * 32,
(v_feat_dim//16) * 32)
)
)
if self.v_emd_dim is None:
self.v_emd_dim = (v_feat_dim // 16) * 32
else:
assert self.v_emd_dim == (v_feat_dim//16)*32, "v_emd_dim error"
self.m_output_act.append(
torch_nn.Linear((v_feat_dim // 16) * 32, self.v_out_class)
)
self.m_frontend.append(
torch_nn.Linear(g_ssl_model.out_dim, v_feat_dim)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
self.m_loss = torch_nn.CrossEntropyLoss()
self.m_temp = 1
self.m_lambda = 0.
self.m_e_m_in = -25.0
self.m_e_m_out = -7.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_ssl_feat = g_ssl_model.extract_feat(wav.squeeze(-1))
# return
return self.m_frontend[idx](x_ssl_feat)
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros(
[batch_size * self.v_submodels, self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.m_transform, self.m_before_pooling,
self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = (hidden_features_lstm + hidden_features).mean(1)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
output_act[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb)
# feature_vec is [batch * submodel, output-class]
return output_act
def _compute_score(self, logits):
"""
"""
# [batch * submodel, output-class], logits
# [:, 1] denotes being bonafide
if logits.shape[1] == 2:
return logits[:, 1] - logits[:, 0]
else:
return logits[:, -1]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _get_in_out_indx(self, filenames):
in_indx = []
out_indx = []
for x, y in enumerate(filenames):
if self.in_out_parser[y]:
in_indx.append(x)
else:
out_indx.append(x)
return np.array(in_indx), np.array(out_indx)
def _energy(self, logits):
"""
"""
# - T \log \sum_y \exp (logits[x, y] / T)
eng = - self.m_temp * torch.logsumexp(logits / self.m_temp, dim=1)
return eng
def _loss(self, logits, targets, energy, in_indx, out_indx):
"""
"""
# loss over cross-entropy on in-dist. data
if len(in_indx):
loss = self.m_loss(logits[in_indx], targets[in_indx])
else:
loss = 0
# loss on energy of in-dist.data
if len(in_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(energy[in_indx] - self.m_e_m_in), 2).mean()
# loss on energy of out-dist. data
if len(out_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(self.m_e_m_out - energy[out_indx]), 2).mean()
return loss
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
feature_vec = self._compute_embedding(x, datalength)
logits = self._compute_logit(feature_vec)
energy = self._energy(logits)
in_indx, out_indx = self._get_in_out_indx(filenames)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
# loss
loss = self._loss(logits, target_vec, energy, in_indx, out_indx)
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
for filename, target, score, energytmp in \
zip(filenames, targets, scores, energy):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), -energytmp.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 19,256 | 33.759928 | 91 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/07-asvspoof-ssl/model-W2V-Small-fix-LLGF/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq
class SSLModel():
def __init__(self, cp_path, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args:
cp_path: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([cp_path])
self.model = model[0]
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" feature = extract_feat(input_data)
Args:
input_data: tensor, waveform, (batch, length)
Return:
feature: tensor, feature, (batch, frame_num, feat_dim)
"""
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
self.model.eval()
with torch.no_grad():
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
# not good way, but the path is fixed
ssl_path = os.path.dirname(__file__) + '/../../../SSL_pretrained/wav2vec_small.pt'
# This model produces 768 output feature dimensions (per frame)
ssl_orig_output_dim = 768
# SSL model is declared as a global var since it is fixed
g_ssl_model = SSLModel(ssl_path, ssl_orig_output_dim)
#################
## Misc functions
#################
# A function to load in/out label for OOD detection. This is just a place holder
# in this project
g_attack_map = {}
def protocol_parse_general(protocol_filepaths, g_map, sep=' ', target_row=-1):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
target_row: int, default -1, use line[-1] as the target label
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = nii_asvspoof.CustomDict(missing_value=True)
if type(protocol_filepaths) is str:
tmp = [protocol_filepaths]
else:
tmp = protocol_filepaths
for protocol_filepath in tmp:
if len(protocol_filepath) and os.path.isfile(protocol_filepath):
with open(protocol_filepath, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip('\n')
cols = line.split(sep)
if g_map:
try:
data_buffer[cols[1]] = g_map[cols[target_row]]
except KeyError:
data_buffer[cols[1]] = False
else:
data_buffer[cols[1]] = True
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
# Load CM protocol (if available)
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
# Load OOD protocl (if available)
self.in_out_parser = protocol_parse_general(protocol_f, g_attack_map,
' ', -2)
# Working sampling rate
# torchaudio may be used to change sampling rate
#self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
#
self.v_feat_dim = [128]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.v_feat_dim)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = None
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, v_feat_dim in enumerate(self.v_feat_dim):
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((v_feat_dim//16) * 32,
(v_feat_dim//16) * 32),
nii_nn.BLSTMLayer((v_feat_dim//16) * 32,
(v_feat_dim//16) * 32)
)
)
if self.v_emd_dim is None:
self.v_emd_dim = (v_feat_dim // 16) * 32
else:
assert self.v_emd_dim == (v_feat_dim//16)*32, "v_emd_dim error"
self.m_output_act.append(
torch_nn.Linear((v_feat_dim // 16) * 32, self.v_out_class)
)
self.m_frontend.append(
torch_nn.Linear(g_ssl_model.out_dim, v_feat_dim)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
self.m_loss = torch_nn.CrossEntropyLoss()
self.m_temp = 1
self.m_lambda = 0.
self.m_e_m_in = -25.0
self.m_e_m_out = -7.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_ssl_feat = g_ssl_model.extract_feat(wav.squeeze(-1))
# return
return self.m_frontend[idx](x_ssl_feat)
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros(
[batch_size * self.v_submodels, self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.m_transform, self.m_before_pooling,
self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = (hidden_features_lstm + hidden_features).mean(1)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
output_act[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb)
# feature_vec is [batch * submodel, output-class]
return output_act
def _compute_score(self, logits):
"""
"""
# [batch * submodel, output-class], logits
# [:, 1] denotes being bonafide
if logits.shape[1] == 2:
return logits[:, 1] - logits[:, 0]
else:
return logits[:, -1]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _get_in_out_indx(self, filenames):
in_indx = []
out_indx = []
for x, y in enumerate(filenames):
if self.in_out_parser[y]:
in_indx.append(x)
else:
out_indx.append(x)
return np.array(in_indx), np.array(out_indx)
def _energy(self, logits):
"""
"""
# - T \log \sum_y \exp (logits[x, y] / T)
eng = - self.m_temp * torch.logsumexp(logits / self.m_temp, dim=1)
return eng
def _loss(self, logits, targets, energy, in_indx, out_indx):
"""
"""
# loss over cross-entropy on in-dist. data
if len(in_indx):
loss = self.m_loss(logits[in_indx], targets[in_indx])
else:
loss = 0
# loss on energy of in-dist.data
if len(in_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(energy[in_indx] - self.m_e_m_in), 2).mean()
# loss on energy of out-dist. data
if len(out_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(self.m_e_m_out - energy[out_indx]), 2).mean()
return loss
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
feature_vec = self._compute_embedding(x, datalength)
logits = self._compute_logit(feature_vec)
energy = self._energy(logits)
in_indx, out_indx = self._get_in_out_indx(filenames)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
# loss
loss = self._loss(logits, target_vec, energy, in_indx, out_indx)
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
for filename, target, score, energytmp in \
zip(filenames, targets, scores, energy):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), -energytmp.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 19,249 | 33.747292 | 86 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/07-asvspoof-ssl/model-W2V-XLSR-fix-LLGF/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq
class SSLModel():
def __init__(self, cp_path, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args:
cp_path: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([cp_path])
self.model = model[0]
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" feature = extract_feat(input_data)
Args:
input_data: tensor, waveform, (batch, length)
Return:
feature: tensor, feature, (batch, frame_num, feat_dim)
"""
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
self.model.eval()
with torch.no_grad():
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
# not good way, but the path is fixed
ssl_path = os.path.dirname(__file__) + '/../../../SSL_pretrained/xlsr_53_56k.pt'
# This model produces 1024 output feature dimensions (per frame)
ssl_orig_output_dim = 1024
# SSL model is declared as a global var since it is fixed
g_ssl_model = SSLModel(ssl_path, ssl_orig_output_dim)
#################
## Misc functions
#################
# A function to load in/out label for OOD detection. This is just a place holder
# in this project
g_attack_map = {}
def protocol_parse_general(protocol_filepaths, g_map, sep=' ', target_row=-1):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
target_row: int, default -1, use line[-1] as the target label
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = nii_asvspoof.CustomDict(missing_value=True)
if type(protocol_filepaths) is str:
tmp = [protocol_filepaths]
else:
tmp = protocol_filepaths
for protocol_filepath in tmp:
if len(protocol_filepath) and os.path.isfile(protocol_filepath):
with open(protocol_filepath, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip('\n')
cols = line.split(sep)
if g_map:
try:
data_buffer[cols[1]] = g_map[cols[target_row]]
except KeyError:
data_buffer[cols[1]] = False
else:
data_buffer[cols[1]] = True
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
# Load CM protocol (if available)
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
# Load OOD protocl (if available)
self.in_out_parser = protocol_parse_general(protocol_f, g_attack_map,
' ', -2)
# Working sampling rate
# torchaudio may be used to change sampling rate
#self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
#
self.v_feat_dim = [128]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.v_feat_dim)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = None
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, v_feat_dim in enumerate(self.v_feat_dim):
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((v_feat_dim//16) * 32,
(v_feat_dim//16) * 32),
nii_nn.BLSTMLayer((v_feat_dim//16) * 32,
(v_feat_dim//16) * 32)
)
)
if self.v_emd_dim is None:
self.v_emd_dim = (v_feat_dim // 16) * 32
else:
assert self.v_emd_dim == (v_feat_dim//16)*32, "v_emd_dim error"
self.m_output_act.append(
torch_nn.Linear((v_feat_dim // 16) * 32, self.v_out_class)
)
self.m_frontend.append(
torch_nn.Linear(g_ssl_model.out_dim, v_feat_dim)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
self.m_loss = torch_nn.CrossEntropyLoss()
self.m_temp = 1
self.m_lambda = 0.
self.m_e_m_in = -25.0
self.m_e_m_out = -7.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_ssl_feat = g_ssl_model.extract_feat(wav.squeeze(-1))
# return
return self.m_frontend[idx](x_ssl_feat)
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros(
[batch_size * self.v_submodels, self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.m_transform, self.m_before_pooling,
self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = (hidden_features_lstm + hidden_features).mean(1)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
output_act[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb)
# feature_vec is [batch * submodel, output-class]
return output_act
def _compute_score(self, logits):
"""
"""
# [batch * submodel, output-class], logits
# [:, 1] denotes being bonafide
if logits.shape[1] == 2:
return logits[:, 1] - logits[:, 0]
else:
return logits[:, -1]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _get_in_out_indx(self, filenames):
in_indx = []
out_indx = []
for x, y in enumerate(filenames):
if self.in_out_parser[y]:
in_indx.append(x)
else:
out_indx.append(x)
return np.array(in_indx), np.array(out_indx)
def _energy(self, logits):
"""
"""
# - T \log \sum_y \exp (logits[x, y] / T)
eng = - self.m_temp * torch.logsumexp(logits / self.m_temp, dim=1)
return eng
def _loss(self, logits, targets, energy, in_indx, out_indx):
"""
"""
# loss over cross-entropy on in-dist. data
if len(in_indx):
loss = self.m_loss(logits[in_indx], targets[in_indx])
else:
loss = 0
# loss on energy of in-dist.data
if len(in_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(energy[in_indx] - self.m_e_m_in), 2).mean()
# loss on energy of out-dist. data
if len(out_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(self.m_e_m_out - energy[out_indx]), 2).mean()
return loss
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
feature_vec = self._compute_embedding(x, datalength)
logits = self._compute_logit(feature_vec)
energy = self._energy(logits)
in_indx, out_indx = self._get_in_out_indx(filenames)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
# loss
loss = self._loss(logits, target_vec, energy, in_indx, out_indx)
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
for filename, target, score, energytmp in \
zip(filenames, targets, scores, energy):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), -energytmp.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 19,249 | 33.747292 | 86 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/07-asvspoof-ssl/model-W2V-XLSR-fix-LGF/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq
class SSLModel():
def __init__(self, cp_path, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args:
cp_path: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([cp_path])
self.model = model[0]
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" feature = extract_feat(input_data)
Args:
input_data: tensor, waveform, (batch, length)
Return:
feature: tensor, feature, (batch, frame_num, feat_dim)
"""
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
self.model.eval()
with torch.no_grad():
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
# not good way, but the path is fixed
ssl_path = os.path.dirname(__file__) + '/../../../SSL_pretrained/xlsr_53_56k.pt'
# This model produces 1024 output feature dimensions (per frame)
ssl_orig_output_dim = 1024
# SSL model is declared as a global var since it is fixed
g_ssl_model = SSLModel(ssl_path, ssl_orig_output_dim)
#################
## Misc functions
#################
# A function to load in/out label for OOD detection. This is just a place holder
# in this project
g_attack_map = {}
def protocol_parse_general(protocol_filepaths, g_map, sep=' ', target_row=-1):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
target_row: int, default -1, use line[-1] as the target label
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = nii_asvspoof.CustomDict(missing_value=True)
if type(protocol_filepaths) is str:
tmp = [protocol_filepaths]
else:
tmp = protocol_filepaths
for protocol_filepath in tmp:
if len(protocol_filepath) and os.path.isfile(protocol_filepath):
with open(protocol_filepath, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip('\n')
cols = line.split(sep)
if g_map:
try:
data_buffer[cols[1]] = g_map[cols[target_row]]
except KeyError:
data_buffer[cols[1]] = False
else:
data_buffer[cols[1]] = True
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
# Load CM protocl (if available)
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
# Load OOD protocol (if available)
self.in_out_parser = protocol_parse_general(protocol_f, g_attack_map,
' ', -2)
# Working sampling rate
# torchaudio may be used to change sampling rate
# self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
#
self.v_feat_dim = [128]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.v_feat_dim)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = None
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, v_feat_dim in enumerate(self.v_feat_dim):
# just a place holder
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Identity()
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer(v_feat_dim, v_feat_dim),
nii_nn.BLSTMLayer(v_feat_dim, v_feat_dim)
)
)
if self.v_emd_dim is None:
self.v_emd_dim = v_feat_dim
else:
assert self.v_emd_dim == v_feat_dim, "v_emd_dim error"
self.m_output_act.append(
torch_nn.Linear(v_feat_dim, self.v_out_class)
)
self.m_frontend.append(
torch_nn.Linear(g_ssl_model.out_dim, v_feat_dim)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
# The loss includes energy function for in and out dist. data
# But in this project, we will only use the common cross entropy
# This is done by setting self.m_temp = 1 and self.m_lambda = 0.0
self.m_loss = torch_nn.CrossEntropyLoss()
self.m_temp = 1
self.m_lambda = 0.
self.m_e_m_in = -25.0
self.m_e_m_out = -7.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_ssl_feat = g_ssl_model.extract_feat(wav.squeeze(-1))
# return
return self.m_frontend[idx](x_ssl_feat)
def _pretransform(self, x_sp_amp, m_trans):
""" A wrapper on the self.m_transform part
"""
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
#hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
#hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
#frame_num = hidden_features.shape[1]
#hidden_features = hidden_features.view(batch_size, frame_num, -1)
hidden_features = m_trans(x_sp_amp)
return hidden_features
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros(
[batch_size * self.v_submodels, self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.m_transform, self.m_before_pooling,
self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, datalength)
# 1. 2. 3. steps in transform
hidden_features = self._pretransform(x_sp_amp, m_trans)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = (hidden_features_lstm + hidden_features).mean(1)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
output_act[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb)
# feature_vec is [batch * submodel, output-class]
return output_act
def _compute_score(self, logits):
"""
"""
# [batch * submodel, output-class], logits
# [:, 1] denotes being bonafide
if logits.shape[1] == 2:
return logits[:, 1] - logits[:, 0]
else:
return logits[:, -1]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _get_in_out_indx(self, filenames):
in_indx = []
out_indx = []
for x, y in enumerate(filenames):
if self.in_out_parser[y]:
in_indx.append(x)
else:
out_indx.append(x)
return np.array(in_indx), np.array(out_indx)
def _energy(self, logits):
"""
"""
# - T \log \sum_y \exp (logits[x, y] / T)
eng = - self.m_temp * torch.logsumexp(logits / self.m_temp, dim=1)
return eng
def _loss(self, logits, targets, energy, in_indx, out_indx):
"""
"""
# loss over cross-entropy on in-dist. data
if len(in_indx):
loss = self.m_loss(logits[in_indx], targets[in_indx])
else:
loss = 0
# loss on energy of in-dist.data
if len(in_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(energy[in_indx] - self.m_e_m_in), 2).mean()
# loss on energy of out-dist. data
if len(out_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(self.m_e_m_out - energy[out_indx]), 2).mean()
return loss
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
feature_vec = self._compute_embedding(x, datalength)
logits = self._compute_logit(feature_vec)
energy = self._energy(logits)
in_indx, out_indx = self._get_in_out_indx(filenames)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
# loss
loss = self._loss(logits, target_vec, energy, in_indx, out_indx)
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
for filename, target, score, energytmp in \
zip(filenames, targets, scores, energy):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), -energytmp.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
This is just a place holder.
Because Model.forward() will compute the loss value.
The output of Model.forward() will be used as the outputs for Loss.compute
Thus, just return outputs from compute()
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 18,117 | 33.120527 | 86 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/07-asvspoof-ssl/model-W2V-XLSR-ft-GF/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq
class SSLModel(torch_nn.Module):
def __init__(self, cp_path, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args:
cp_path: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([cp_path])
self.model = model[0]
# dimension of output from SSL model. This is fixed
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" feature = extract_feat(input_data)
Args:
input_data: tensor, waveform, (batch, length)
Return:
feature: tensor, feature, (batch, frame_num, feat_dim)
"""
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
#self.model.eval()
#with torch.no_grad():
if True:
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
#################
## Misc functions
#################
# A function to load in/out label for OOD detection. This is just a place holder
# in this project
g_attack_map = {}
def protocol_parse_general(protocol_filepaths, g_map, sep=' ', target_row=-1):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
target_row: int, default -1, use line[-1] as the target label
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = nii_asvspoof.CustomDict(missing_value=True)
if type(protocol_filepaths) is str:
tmp = [protocol_filepaths]
else:
tmp = protocol_filepaths
for protocol_filepath in tmp:
if len(protocol_filepath) and os.path.isfile(protocol_filepath):
with open(protocol_filepath, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip('\n')
cols = line.split(sep)
if g_map:
try:
data_buffer[cols[1]] = g_map[cols[target_row]]
except KeyError:
data_buffer[cols[1]] = False
else:
data_buffer[cols[1]] = True
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
# Load CM protocl (if available)
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
# Load OOD protocol (if available)
self.in_out_parser = protocol_parse_general(protocol_f, g_attack_map,
' ', -2)
# Working sampling rate
# torchaudio may be used to change sampling rate
#self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# dimension of compressed SSL features
self.v_feat_dim = [128]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.v_feat_dim)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = None
# number of output class
self.v_out_class = 2
####
# create network
####
ssl_path = os.path.dirname(__file__) + '/../../../SSL_pretrained/xlsr_53_56k.pt'
ssl_orig_output_dim = 1024
self.m_ssl = SSLModel(ssl_path, ssl_orig_output_dim)
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, v_feat_dim in enumerate(self.v_feat_dim):
# just a place holder, adopted from LLGF
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Identity()
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
torch_nn.Identity()
)
)
if self.v_emd_dim is None:
self.v_emd_dim = v_feat_dim
else:
assert self.v_emd_dim == v_feat_dim, "v_emd_dim error"
self.m_output_act.append(
torch_nn.Linear(v_feat_dim, self.v_out_class)
)
self.m_frontend.append(
torch_nn.Linear(self.m_ssl.out_dim, v_feat_dim)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
# The loss includes energy function for in and out dist. data
# But in this project, we will only use the common cross entropy
# This is done by setting self.m_temp = 1 and self.m_lambda = 0.0
self.m_loss = torch_nn.CrossEntropyLoss()
self.m_temp = 1
self.m_lambda = 0.
self.m_e_m_in = -25.0
self.m_e_m_out = -7.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
#with torch.no_grad():
x_ssl_feat = self.m_ssl.extract_feat(wav.squeeze(-1))
# return
return self.m_frontend[idx](x_ssl_feat)
def _pretransform(self, x_sp_amp, m_trans):
""" A wrapper on the self.m_transform part
"""
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
#hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
#hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
#frame_num = hidden_features.shape[1]
#hidden_features = hidden_features.view(batch_size, frame_num, -1)
hidden_features = m_trans(x_sp_amp)
return hidden_features
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros(
[batch_size * self.v_submodels, self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.m_transform, self.m_before_pooling,
self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, datalength)
# 1. 2. 3. steps in transform
hidden_features = self._pretransform(x_sp_amp, m_trans)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = (hidden_features_lstm + hidden_features).mean(1) / 2
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
output_act[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb)
# feature_vec is [batch * submodel, output-class]
return output_act
def _compute_score(self, logits):
"""
"""
# [batch * submodel, output-class], logits
# [:, 1] denotes being bonafide
if logits.shape[1] == 2:
return logits[:, 1] - logits[:, 0]
else:
return logits[:, -1]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _get_in_out_indx(self, filenames):
in_indx = []
out_indx = []
for x, y in enumerate(filenames):
if self.in_out_parser[y]:
in_indx.append(x)
else:
out_indx.append(x)
return np.array(in_indx), np.array(out_indx)
def _energy(self, logits):
"""
"""
# - T \log \sum_y \exp (logits[x, y] / T)
eng = - self.m_temp * torch.logsumexp(logits / self.m_temp, dim=1)
return eng
def _loss(self, logits, targets, energy, in_indx, out_indx):
"""
"""
# loss over cross-entropy on in-dist. data
if len(in_indx):
loss = self.m_loss(logits[in_indx], targets[in_indx])
else:
loss = 0
# loss on energy of in-dist.data
if len(in_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(energy[in_indx] - self.m_e_m_in), 2).mean()
# loss on energy of out-dist. data
if len(out_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(self.m_e_m_out - energy[out_indx]), 2).mean()
return loss
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
feature_vec = self._compute_embedding(x, datalength)
logits = self._compute_logit(feature_vec)
energy = self._energy(logits)
in_indx, out_indx = self._get_in_out_indx(filenames)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
# loss
loss = self._loss(logits, target_vec, energy, in_indx, out_indx)
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
for filename, target, score, energytmp in \
zip(filenames, targets, scores, energy):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), -energytmp.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
This is just a place holder.
Because Model.forward() will compute the loss value.
The output of Model.forward() will be used as the outputs for Loss.compute
Thus, just return outputs from compute()
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 18,134 | 33.281664 | 88 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/07-asvspoof-ssl/model-W2V-XLSR-ft-LGF/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq
class SSLModel(torch_nn.Module):
def __init__(self, cp_path, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args:
cp_path: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([cp_path])
self.model = model[0]
# dimension of output from SSL model. This is fixed
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" feature = extract_feat(input_data)
Args:
input_data: tensor, waveform, (batch, length)
Return:
feature: tensor, feature, (batch, frame_num, feat_dim)
"""
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
#self.model.eval()
#with torch.no_grad():
if True:
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
#################
## Misc functions
#################
# A function to load in/out label for OOD detection. This is just a place holder
# in this project
g_attack_map = {}
def protocol_parse_general(protocol_filepaths, g_map, sep=' ', target_row=-1):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
target_row: int, default -1, use line[-1] as the target label
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = nii_asvspoof.CustomDict(missing_value=True)
if type(protocol_filepaths) is str:
tmp = [protocol_filepaths]
else:
tmp = protocol_filepaths
for protocol_filepath in tmp:
if len(protocol_filepath) and os.path.isfile(protocol_filepath):
with open(protocol_filepath, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip('\n')
cols = line.split(sep)
if g_map:
try:
data_buffer[cols[1]] = g_map[cols[target_row]]
except KeyError:
data_buffer[cols[1]] = False
else:
data_buffer[cols[1]] = True
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
# load protocol for CM (if available)
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
# Load protocol for OOD (if available)
self.in_out_parser = protocol_parse_general(protocol_f, g_attack_map,
' ', -2)
# Working sampling rate
# torchaudio may be used to change sampling rate
#self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
#
self.v_feat_dim = [128]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.v_feat_dim)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = None
self.v_out_class = 2
####
# create network
####
# path to the pre-trained SSL model
ssl_path = os.path.dirname(__file__) + '/../../../SSL_pretrained/xlsr_53_56k.pt'
ssl_orig_output_dim = 1024
self.m_ssl = SSLModel(ssl_path, ssl_orig_output_dim)
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, v_feat_dim in enumerate(self.v_feat_dim):
# just a place holder
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Identity()
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer(v_feat_dim, v_feat_dim),
nii_nn.BLSTMLayer(v_feat_dim, v_feat_dim)
)
)
if self.v_emd_dim is None:
self.v_emd_dim = v_feat_dim
else:
assert self.v_emd_dim == v_feat_dim, "v_emd_dim error"
self.m_output_act.append(
torch_nn.Linear(v_feat_dim, self.v_out_class)
)
self.m_frontend.append(
torch_nn.Linear(self.m_ssl.out_dim, v_feat_dim)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
self.m_loss = torch_nn.CrossEntropyLoss()
self.m_temp = 1
self.m_lambda = 0.
self.m_e_m_in = -25.0
self.m_e_m_out = -7.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
#with torch.no_grad():
x_ssl_feat = self.m_ssl.extract_feat(wav.squeeze(-1))
# return
return self.m_frontend[idx](x_ssl_feat)
def _pretransform(self, x_sp_amp, m_trans):
""" A wrapper on the self.m_transform part
"""
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
#hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
#hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
#frame_num = hidden_features.shape[1]
#hidden_features = hidden_features.view(batch_size, frame_num, -1)
hidden_features = m_trans(x_sp_amp)
return hidden_features
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros(
[batch_size * self.v_submodels, self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.m_transform, self.m_before_pooling,
self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, datalength)
# 1. 2. 3. steps in transform
hidden_features = self._pretransform(x_sp_amp, m_trans)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = (hidden_features_lstm + hidden_features).mean(1)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
output_act[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb)
# feature_vec is [batch * submodel, output-class]
return output_act
def _compute_score(self, logits):
"""
"""
# [batch * submodel, output-class], logits
# [:, 1] denotes being bonafide
if logits.shape[1] == 2:
return logits[:, 1] - logits[:, 0]
else:
return logits[:, -1]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _get_in_out_indx(self, filenames):
in_indx = []
out_indx = []
for x, y in enumerate(filenames):
if self.in_out_parser[y]:
in_indx.append(x)
else:
out_indx.append(x)
return np.array(in_indx), np.array(out_indx)
def _energy(self, logits):
"""
"""
# - T \log \sum_y \exp (logits[x, y] / T)
eng = - self.m_temp * torch.logsumexp(logits / self.m_temp, dim=1)
return eng
def _loss(self, logits, targets, energy, in_indx, out_indx):
"""
"""
# loss over cross-entropy on in-dist. data
if len(in_indx):
loss = self.m_loss(logits[in_indx], targets[in_indx])
else:
loss = 0
# loss on energy of in-dist.data
if len(in_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(energy[in_indx] - self.m_e_m_in), 2).mean()
# loss on energy of out-dist. data
if len(out_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(self.m_e_m_out - energy[out_indx]), 2).mean()
return loss
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
feature_vec = self._compute_embedding(x, datalength)
logits = self._compute_logit(feature_vec)
energy = self._energy(logits)
in_indx, out_indx = self._get_in_out_indx(filenames)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
# loss
loss = self._loss(logits, target_vec, energy, in_indx, out_indx)
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
for filename, target, score, energytmp in \
zip(filenames, targets, scores, energy):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), -energytmp.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 17,771 | 32.851429 | 88 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/07-asvspoof-ssl/model-W2V-XLSR-ft-LLGF/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq
class SSLModel(torch_nn.Module):
def __init__(self, cp_path, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args:
cp_path: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([cp_path])
self.model = model[0]
# dimension of output from SSL model. This is fixed
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" feature = extract_feat(input_data)
Args:
input_data: tensor, waveform, (batch, length)
Return:
feature: tensor, feature, (batch, frame_num, feat_dim)
"""
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
#self.model.eval()
#with torch.no_grad():
if True:
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
#################
## Misc functions
#################
# A function to load in/out label for OOD detection. This is just a place holder
# in this project
g_attack_map = {}
def protocol_parse_general(protocol_filepaths, g_map, sep=' ', target_row=-1):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
target_row: int, default -1, use line[-1] as the target label
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = nii_asvspoof.CustomDict(missing_value=True)
if type(protocol_filepaths) is str:
tmp = [protocol_filepaths]
else:
tmp = protocol_filepaths
for protocol_filepath in tmp:
if len(protocol_filepath) and os.path.isfile(protocol_filepath):
with open(protocol_filepath, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip('\n')
cols = line.split(sep)
if g_map:
try:
data_buffer[cols[1]] = g_map[cols[target_row]]
except KeyError:
data_buffer[cols[1]] = False
else:
data_buffer[cols[1]] = True
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
# load protocol for CM (if available)
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
# load protocol for OOD (if available)
self.in_out_parser = protocol_parse_general(protocol_f, g_attack_map,
' ', -2)
# Working sampling rate
# torchaudio may be used to change sampling rate
#self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# dimension of compressed SSL features
self.v_feat_dim = [128]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.v_feat_dim)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = None
self.v_out_class = 2
####
# create network
####
# path to the pre-traind SSL model
ssl_path = os.path.dirname(__file__) + '/../../../SSL_pretrained/xlsr_53_56k.pt'
ssl_orig_output_dim = 1024
self.ssl_model = SSLModel(ssl_path, ssl_orig_output_dim)
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, v_feat_dim in enumerate(self.v_feat_dim):
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((v_feat_dim//16) * 32,
(v_feat_dim//16) * 32),
nii_nn.BLSTMLayer((v_feat_dim//16) * 32,
(v_feat_dim//16) * 32)
)
)
if self.v_emd_dim is None:
self.v_emd_dim = (v_feat_dim // 16) * 32
else:
assert self.v_emd_dim == (v_feat_dim//16)*32, "v_emd_dim error"
self.m_output_act.append(
torch_nn.Linear((v_feat_dim // 16) * 32, self.v_out_class)
)
self.m_frontend.append(
torch_nn.Linear(self.ssl_model.out_dim, v_feat_dim)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
self.m_loss = torch_nn.CrossEntropyLoss()
self.m_temp = 1
self.m_lambda = 0.
self.m_e_m_in = -25.0
self.m_e_m_out = -7.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
#with torch.no_grad():
x_ssl_feat = self.ssl_model.extract_feat(wav.squeeze(-1))
# return
return self.m_frontend[idx](x_ssl_feat)
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros(
[batch_size * self.v_submodels, self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.m_transform, self.m_before_pooling,
self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = (hidden_features_lstm + hidden_features).mean(1)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
output_act[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb)
# feature_vec is [batch * submodel, output-class]
return output_act
def _compute_score(self, logits):
"""
"""
# [batch * submodel, output-class], logits
# [:, 1] denotes being bonafide
if logits.shape[1] == 2:
return logits[:, 1] - logits[:, 0]
else:
return logits[:, -1]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _get_in_out_indx(self, filenames):
in_indx = []
out_indx = []
for x, y in enumerate(filenames):
if self.in_out_parser[y]:
in_indx.append(x)
else:
out_indx.append(x)
return np.array(in_indx), np.array(out_indx)
def _energy(self, logits):
"""
"""
# - T \log \sum_y \exp (logits[x, y] / T)
eng = - self.m_temp * torch.logsumexp(logits / self.m_temp, dim=1)
return eng
def _loss(self, logits, targets, energy, in_indx, out_indx):
"""
"""
# loss over cross-entropy on in-dist. data
if len(in_indx):
loss = self.m_loss(logits[in_indx], targets[in_indx])
else:
loss = 0
# loss on energy of in-dist.data
if len(in_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(energy[in_indx] - self.m_e_m_in), 2).mean()
# loss on energy of out-dist. data
if len(out_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(self.m_e_m_out - energy[out_indx]), 2).mean()
return loss
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
feature_vec = self._compute_embedding(x, datalength)
logits = self._compute_logit(feature_vec)
energy = self._energy(logits)
in_indx, out_indx = self._get_in_out_indx(filenames)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
# loss
loss = self._loss(logits, target_vec, energy, in_indx, out_indx)
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
for filename, target, score, energytmp in \
zip(filenames, targets, scores, energy):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), -energytmp.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 19,336 | 33.96745 | 88 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/07-asvspoof-ssl/model-W2V-Large2-fix-LLGF/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq
class SSLModel():
def __init__(self, cp_path, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args:
cp_path: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([cp_path])
self.model = model[0]
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" feature = extract_feat(input_data)
Args:
input_data: tensor, waveform, (batch, length)
Return:
feature: tensor, feature, (batch, frame_num, feat_dim)
"""
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
self.model.eval()
with torch.no_grad():
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
# not good way, but the path is fixed
ssl_path = os.path.dirname(__file__) + '/../../../SSL_pretrained/w2v_large_lv_fsh_swbd_cv.pt'
# This model produces 1024 output feature dimensions (per frame)
ssl_orig_output_dim = 1024
# SSL model is declared as a global var since it is fixed
g_ssl_model = SSLModel(ssl_path, ssl_orig_output_dim)
#################
## Misc functions
#################
# A function to load in/out label for OOD detection. This is just a place holder
# in this project
g_attack_map = {}
def protocol_parse_general(protocol_filepaths, g_map, sep=' ', target_row=-1):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
target_row: int, default -1, use line[-1] as the target label
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = nii_asvspoof.CustomDict(missing_value=True)
if type(protocol_filepaths) is str:
tmp = [protocol_filepaths]
else:
tmp = protocol_filepaths
for protocol_filepath in tmp:
if len(protocol_filepath) and os.path.isfile(protocol_filepath):
with open(protocol_filepath, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip('\n')
cols = line.split(sep)
if g_map:
try:
data_buffer[cols[1]] = g_map[cols[target_row]]
except KeyError:
data_buffer[cols[1]] = False
else:
data_buffer[cols[1]] = True
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
# Load CM protocol (if available)
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
# Load OOD protocl (if available)
self.in_out_parser = protocol_parse_general(protocol_f, g_attack_map,
' ', -2)
# Working sampling rate
# torchaudio may be used to change sampling rate
#self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
#
self.v_feat_dim = [128]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.v_feat_dim)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = None
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, v_feat_dim in enumerate(self.v_feat_dim):
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((v_feat_dim//16) * 32,
(v_feat_dim//16) * 32),
nii_nn.BLSTMLayer((v_feat_dim//16) * 32,
(v_feat_dim//16) * 32)
)
)
if self.v_emd_dim is None:
self.v_emd_dim = (v_feat_dim // 16) * 32
else:
assert self.v_emd_dim == (v_feat_dim//16)*32, "v_emd_dim error"
self.m_output_act.append(
torch_nn.Linear((v_feat_dim // 16) * 32, self.v_out_class)
)
self.m_frontend.append(
torch_nn.Linear(g_ssl_model.out_dim, v_feat_dim)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
self.m_loss = torch_nn.CrossEntropyLoss()
self.m_temp = 1
self.m_lambda = 0.
self.m_e_m_in = -25.0
self.m_e_m_out = -7.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_ssl_feat = g_ssl_model.extract_feat(wav.squeeze(-1))
# return
return self.m_frontend[idx](x_ssl_feat)
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros(
[batch_size * self.v_submodels, self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.m_transform, self.m_before_pooling,
self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = (hidden_features_lstm + hidden_features).mean(1)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
output_act[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb)
# feature_vec is [batch * submodel, output-class]
return output_act
def _compute_score(self, logits):
"""
"""
# [batch * submodel, output-class], logits
# [:, 1] denotes being bonafide
if logits.shape[1] == 2:
return logits[:, 1] - logits[:, 0]
else:
return logits[:, -1]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _get_in_out_indx(self, filenames):
in_indx = []
out_indx = []
for x, y in enumerate(filenames):
if self.in_out_parser[y]:
in_indx.append(x)
else:
out_indx.append(x)
return np.array(in_indx), np.array(out_indx)
def _energy(self, logits):
"""
"""
# - T \log \sum_y \exp (logits[x, y] / T)
eng = - self.m_temp * torch.logsumexp(logits / self.m_temp, dim=1)
return eng
def _loss(self, logits, targets, energy, in_indx, out_indx):
"""
"""
# loss over cross-entropy on in-dist. data
if len(in_indx):
loss = self.m_loss(logits[in_indx], targets[in_indx])
else:
loss = 0
# loss on energy of in-dist.data
if len(in_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(energy[in_indx] - self.m_e_m_in), 2).mean()
# loss on energy of out-dist. data
if len(out_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(self.m_e_m_out - energy[out_indx]), 2).mean()
return loss
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
feature_vec = self._compute_embedding(x, datalength)
logits = self._compute_logit(feature_vec)
energy = self._energy(logits)
in_indx, out_indx = self._get_in_out_indx(filenames)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
# loss
loss = self._loss(logits, target_vec, energy, in_indx, out_indx)
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
for filename, target, score, energytmp in \
zip(filenames, targets, scores, energy):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), -energytmp.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 19,262 | 33.770758 | 93 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/07-asvspoof-ssl/model-W2V-XLSR-fix-GF/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq
class SSLModel():
def __init__(self, cp_path, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args:
cp_path: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([cp_path])
self.model = model[0]
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" feature = extract_feat(input_data)
Args:
input_data: tensor, waveform, (batch, length)
Return:
feature: tensor, feature, (batch, frame_num, feat_dim)
"""
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
self.model.eval()
with torch.no_grad():
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
# not good way, but the path is fixed
ssl_path = os.path.dirname(__file__) + '/../../../SSL_pretrained/xlsr_53_56k.pt'
# This model produces 1024 output feature dimensions (per frame)
ssl_orig_output_dim = 1024
# SSL model is declared as a global var since it is fixed
g_ssl_model = SSLModel(ssl_path, ssl_orig_output_dim)
#################
## Misc functions
#################
# A function to load in/out label for OOD detection. This is just a place holder
# in this project
g_attack_map = {}
def protocol_parse_general(protocol_filepaths, g_map, sep=' ', target_row=-1):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
target_row: int, default -1, use line[-1] as the target label
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = nii_asvspoof.CustomDict(missing_value=True)
if type(protocol_filepaths) is str:
tmp = [protocol_filepaths]
else:
tmp = protocol_filepaths
for protocol_filepath in tmp:
if len(protocol_filepath) and os.path.isfile(protocol_filepath):
with open(protocol_filepath, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip('\n')
cols = line.split(sep)
if g_map:
try:
data_buffer[cols[1]] = g_map[cols[target_row]]
except KeyError:
data_buffer[cols[1]] = False
else:
data_buffer[cols[1]] = True
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
# Load CM protocl (if available)
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
# Load OOD protocol (if available)
self.in_out_parser = protocol_parse_general(protocol_f, g_attack_map,
' ', -2)
# Working sampling rate
# torchaudio may be used to change sampling rate
#self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
#
self.v_feat_dim = [128]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.v_feat_dim)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = None
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, v_feat_dim in enumerate(self.v_feat_dim):
# just a place holder
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Identity()
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
torch_nn.Identity()
)
)
if self.v_emd_dim is None:
self.v_emd_dim = v_feat_dim
else:
assert self.v_emd_dim == v_feat_dim, "v_emd_dim error"
self.m_output_act.append(
torch_nn.Linear(v_feat_dim, self.v_out_class)
)
self.m_frontend.append(
torch_nn.Linear(g_ssl_model.out_dim, v_feat_dim)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
# The loss includes energy function for in and out dist. data
# But in this project, we will only use the common cross entropy
# This is done by setting self.m_temp = 1 and self.m_lambda = 0.0
self.m_loss = torch_nn.CrossEntropyLoss()
self.m_temp = 1
self.m_lambda = 0.
self.m_e_m_in = -25.0
self.m_e_m_out = -7.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_ssl_feat = g_ssl_model.extract_feat(wav.squeeze(-1))
# return
return self.m_frontend[idx](x_ssl_feat)
def _pretransform(self, x_sp_amp, m_trans):
""" A wrapper on the self.m_transform part
"""
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
#hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
#hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
#frame_num = hidden_features.shape[1]
#hidden_features = hidden_features.view(batch_size, frame_num, -1)
hidden_features = m_trans(x_sp_amp)
return hidden_features
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros(
[batch_size * self.v_submodels, self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.m_transform, self.m_before_pooling,
self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, datalength)
# 1. 2. 3. steps in transform
hidden_features = self._pretransform(x_sp_amp, m_trans)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = (hidden_features_lstm + hidden_features).mean(1) / 2
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
output_act[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb)
# feature_vec is [batch * submodel, output-class]
return output_act
def _compute_score(self, logits):
"""
"""
# [batch * submodel, output-class], logits
# [:, 1] denotes being bonafide
if logits.shape[1] == 2:
return logits[:, 1] - logits[:, 0]
else:
return logits[:, -1]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _get_in_out_indx(self, filenames):
in_indx = []
out_indx = []
for x, y in enumerate(filenames):
if self.in_out_parser[y]:
in_indx.append(x)
else:
out_indx.append(x)
return np.array(in_indx), np.array(out_indx)
def _energy(self, logits):
"""
"""
# - T \log \sum_y \exp (logits[x, y] / T)
eng = - self.m_temp * torch.logsumexp(logits / self.m_temp, dim=1)
return eng
def _loss(self, logits, targets, energy, in_indx, out_indx):
"""
"""
# loss over cross-entropy on in-dist. data
if len(in_indx):
loss = self.m_loss(logits[in_indx], targets[in_indx])
else:
loss = 0
# loss on energy of in-dist.data
if len(in_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(energy[in_indx] - self.m_e_m_in), 2).mean()
# loss on energy of out-dist. data
if len(out_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(self.m_e_m_out - energy[out_indx]), 2).mean()
return loss
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
feature_vec = self._compute_embedding(x, datalength)
logits = self._compute_logit(feature_vec)
energy = self._energy(logits)
in_indx, out_indx = self._get_in_out_indx(filenames)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
# loss
loss = self._loss(logits, target_vec, energy, in_indx, out_indx)
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
for filename, target, score, energytmp in \
zip(filenames, targets, scores, energy):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), -energytmp.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
This is just a place holder.
Because Model.forward() will compute the loss value.
The output of Model.forward() will be used as the outputs for Loss.compute
Thus, just return outputs from compute()
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 18,028 | 33.210626 | 86 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/07-asvspoof-ssl/model-LFCC-LLGF/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Adopted from OOD project.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torchaudio
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#################
## Misc functions
#################
# A function to load in/out label for OOD detection. This is just a place holder
# in this project
g_attack_map = {}
def protocol_parse_general(protocol_filepaths, g_map, sep=' ', target_row=-1):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
target_row: int, default -1, use line[-1] as the target label
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = nii_asvspoof.CustomDict(missing_value=True)
if type(protocol_filepaths) is str:
tmp = [protocol_filepaths]
else:
tmp = protocol_filepaths
for protocol_filepath in tmp:
if len(protocol_filepath) and os.path.isfile(protocol_filepath):
with open(protocol_filepath, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip('\n')
cols = line.split(sep)
if g_map:
try:
data_buffer[cols[1]] = g_map[cols[target_row]]
except KeyError:
data_buffer[cols[1]] = False
else:
data_buffer[cols[1]] = True
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
self.in_out_parser = protocol_parse_general(protocol_f, g_attack_map,
' ', -2)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# no truncation
self.v_truncate_lens = [None for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = None
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32),
nii_nn.BLSTMLayer((lfcc_dim//16) * 32, (lfcc_dim//16) * 32)
)
)
if self.v_emd_dim is None:
self.v_emd_dim = (lfcc_dim // 16) * 32
else:
assert self.v_emd_dim == (lfcc_dim//16) * 32, "v_emd_dim error"
self.m_output_act.append(
torch_nn.Linear((lfcc_dim // 16) * 32, self.v_out_class)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
self.m_loss = torch_nn.CrossEntropyLoss()
self.m_temp = 1
self.m_lambda = 0.
self.m_e_m_in = -25.0
self.m_e_m_out = -7.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_transform,
self.m_before_pooling, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = (hidden_features_lstm + hidden_features).mean(1)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
output_act[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb)
# feature_vec is [batch * submodel, output-class]
return output_act
def _compute_score(self, logits):
"""
"""
# [batch * submodel, output-class], logits
# [:, 1] denotes being bonafide
if logits.shape[1] == 2:
return logits[:, 1] - logits[:, 0]
else:
return logits[:, -1]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _get_in_out_indx(self, filenames):
in_indx = []
out_indx = []
for x, y in enumerate(filenames):
if self.in_out_parser[y]:
in_indx.append(x)
else:
out_indx.append(x)
return np.array(in_indx), np.array(out_indx)
def _energy(self, logits):
"""
"""
# - T \log \sum_y \exp (logits[x, y] / T)
eng = - self.m_temp * torch.logsumexp(logits / self.m_temp, dim=1)
return eng
def _loss(self, logits, targets, energy, in_indx, out_indx):
"""
"""
# loss over cross-entropy on in-dist. data
if len(in_indx):
loss = self.m_loss(logits[in_indx], targets[in_indx])
else:
loss = 0
# loss on energy of in-dist.data
if len(in_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(energy[in_indx] - self.m_e_m_in), 2).mean()
# loss on energy of out-dist. data
if len(out_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(self.m_e_m_out - energy[out_indx]), 2).mean()
return loss
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
feature_vec = self._compute_embedding(x, datalength)
logits = self._compute_logit(feature_vec)
energy = self._energy(logits)
in_indx, out_indx = self._get_in_out_indx(filenames)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
# loss
loss = self._loss(logits, target_vec, energy, in_indx, out_indx)
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
for filename, target, score, energytmp in \
zip(filenames, targets, scores, energy):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), -energytmp.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 18,598 | 34.29222 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/07-asvspoof-ssl/model-W2V-Large1-fix-LLGF/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq
class SSLModel():
def __init__(self, cp_path, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args:
cp_path: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([cp_path])
self.model = model[0]
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" feature = extract_feat(input_data)
Args:
input_data: tensor, waveform, (batch, length)
Return:
feature: tensor, feature, (batch, frame_num, feat_dim)
"""
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
self.model.eval()
with torch.no_grad():
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
# not good way, but the path is fixed
ssl_path = os.path.dirname(__file__) + '/../../../SSL_pretrained/libri960_big.pt'
# This model produces 1024 output feature dimensions (per frame)
ssl_orig_output_dim = 1024
# SSL model is declared as a global var since it is fixed
g_ssl_model = SSLModel(ssl_path, ssl_orig_output_dim)
#################
## Misc functions
#################
# A function to load in/out label for OOD detection. This is just a place holder
# in this project
g_attack_map = {}
def protocol_parse_general(protocol_filepaths, g_map, sep=' ', target_row=-1):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
target_row: int, default -1, use line[-1] as the target label
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = nii_asvspoof.CustomDict(missing_value=True)
if type(protocol_filepaths) is str:
tmp = [protocol_filepaths]
else:
tmp = protocol_filepaths
for protocol_filepath in tmp:
if len(protocol_filepath) and os.path.isfile(protocol_filepath):
with open(protocol_filepath, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip('\n')
cols = line.split(sep)
if g_map:
try:
data_buffer[cols[1]] = g_map[cols[target_row]]
except KeyError:
data_buffer[cols[1]] = False
else:
data_buffer[cols[1]] = True
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
# Load CM protocol (if available)
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
# Load OOD protocl (if available)
self.in_out_parser = protocol_parse_general(protocol_f, g_attack_map,
' ', -2)
# Working sampling rate
# torchaudio may be used to change sampling rate
#self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
#
self.v_feat_dim = [128]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.v_feat_dim)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = None
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, v_feat_dim in enumerate(self.v_feat_dim):
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((v_feat_dim//16) * 32,
(v_feat_dim//16) * 32),
nii_nn.BLSTMLayer((v_feat_dim//16) * 32,
(v_feat_dim//16) * 32)
)
)
if self.v_emd_dim is None:
self.v_emd_dim = (v_feat_dim // 16) * 32
else:
assert self.v_emd_dim == (v_feat_dim//16)*32, "v_emd_dim error"
self.m_output_act.append(
torch_nn.Linear((v_feat_dim // 16) * 32, self.v_out_class)
)
self.m_frontend.append(
torch_nn.Linear(g_ssl_model.out_dim, v_feat_dim)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
self.m_loss = torch_nn.CrossEntropyLoss()
self.m_temp = 1
self.m_lambda = 0.
self.m_e_m_in = -25.0
self.m_e_m_out = -7.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_ssl_feat = g_ssl_model.extract_feat(wav.squeeze(-1))
# return
return self.m_frontend[idx](x_ssl_feat)
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros(
[batch_size * self.v_submodels, self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.m_transform, self.m_before_pooling,
self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = (hidden_features_lstm + hidden_features).mean(1)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
output_act[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb)
# feature_vec is [batch * submodel, output-class]
return output_act
def _compute_score(self, logits):
"""
"""
# [batch * submodel, output-class], logits
# [:, 1] denotes being bonafide
if logits.shape[1] == 2:
return logits[:, 1] - logits[:, 0]
else:
return logits[:, -1]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _get_in_out_indx(self, filenames):
in_indx = []
out_indx = []
for x, y in enumerate(filenames):
if self.in_out_parser[y]:
in_indx.append(x)
else:
out_indx.append(x)
return np.array(in_indx), np.array(out_indx)
def _energy(self, logits):
"""
"""
# - T \log \sum_y \exp (logits[x, y] / T)
eng = - self.m_temp * torch.logsumexp(logits / self.m_temp, dim=1)
return eng
def _loss(self, logits, targets, energy, in_indx, out_indx):
"""
"""
# loss over cross-entropy on in-dist. data
if len(in_indx):
loss = self.m_loss(logits[in_indx], targets[in_indx])
else:
loss = 0
# loss on energy of in-dist.data
if len(in_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(energy[in_indx] - self.m_e_m_in), 2).mean()
# loss on energy of out-dist. data
if len(out_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(self.m_e_m_out - energy[out_indx]), 2).mean()
return loss
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
feature_vec = self._compute_embedding(x, datalength)
logits = self._compute_logit(feature_vec)
energy = self._energy(logits)
in_indx, out_indx = self._get_in_out_indx(filenames)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
# loss
loss = self._loss(logits, target_vec, energy, in_indx, out_indx)
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
for filename, target, score, energytmp in \
zip(filenames, targets, scores, energy):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), -energytmp.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 19,250 | 33.749097 | 86 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper for active learning.
The base is on main_mergedataset.py
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import copy
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager_AL as nii_nn_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper_base
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# Load data pool and create data loader
pool_lst = prj_conf.al_pool_list
pool_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.al_pool_set_name, \
pool_lst,
prj_conf.al_pool_in_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.al_pool_out_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
if hasattr(prj_conf, 'val_input_dirs'):
val_input_dirs = prj_conf.val_input_dirs
else:
val_input_dirs = prj_conf.input_dirs
if hasattr(prj_conf, 'val_output_dirs'):
val_output_dirs = prj_conf.val_output_dirs
else:
val_output_dirs = prj_conf.output_dirs
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
val_input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
val_output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# pre-training using standard procedure
# change args
args_tmp = copy.deepcopy(args)
args_tmp.epochs = args.active_learning_pre_train_epoch_num
args_tmp.not_save_each_epoch = True
args_tmp.save_trained_name += '_pretrained'
args_tmp.active_learning_cycle_num = 0
pretraind_name = args_tmp.save_trained_name + args_tmp.save_model_ext
if args.active_learning_pre_train_epoch_num:
nii_warn.f_print_w_date("Normal training (warm-up) phase",level='h')
nii_warn.f_print("Normal training for {:d} epochs".format(
args.active_learning_pre_train_epoch_num))
op_wrapper_tmp = nii_op_wrapper.OptimizerWrapper(model, args_tmp)
loss_wrapper_tmp = prj_model.Loss(args_tmp)
nii_nn_wrapper_base.f_train_wrapper(
args_tmp, model, loss_wrapper, device, op_wrapper_tmp,
trn_set, val_set, checkpoint)
checkpoint = torch.load(pretraind_name)
elif checkpoint is None:
if os.path.isfile(pretraind_name):
checkpoint = torch.load(pretraind_name)
nii_warn.f_print("Use pretrained model before active learning")
else:
nii_warn.f_print("Use seed model to initialize")
nii_warn.f_print_w_date("Active learning phase",level='h')
# start training
nii_nn_wrapper.f_train_wrapper(
args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, pool_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.test_input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.test_output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper_base.f_inference_wrapper(
args, model, device, test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 10,498 | 36.766187 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/config_AL_train_toyset.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
import os
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# this will be used as the name of cache files created for each set
#
# Name for the seed training set, in case you merge multiple data sets as
# a single training set, just specify the name for each subset.
# Here we only have 1 training subset
trn_set_name = ['asvspoof2019_toyset_trn']
# Name for the development set
val_set_name = ['asvspoof2019_toyset_val']
# For convenience, specify a path to the toy data set
# because config*.py will be copied into model-*/config_AL_train_toyset/NN
# we need to use ../../../
tmp = os.path.dirname(__file__) + '/../../../DATA/toy_example'
# File list for training and development sets
# (text file, one file name per line, without name extension)
# we need to provide one lst for each subset
# trn_list[n] will correspond to trn_set_name[n]
# for training set
trn_list = [tmp + '/scp/train.lst']
# for development set
val_list = [tmp + '/scp/val.lst']
# Directories for input data
# We need to provide the path to the directory that saves the input data.
# We assume waveforms for training and development of one subset
# are stored in the same directory.
# Hence, input_dirs[n] is for trn_set_name[n] and val_set_name[n]
#
# If you need to specify a separate val_input_dirs
# val_input_dirs = [[PATH_TO_DEVELOPMENT_SET]]
#
# Each input_dirs[n] is a list,
# for example, input_dirs[n] = [wav, speaker_label, augmented_wav, ...]
#
# Here, input for each file is a single waveform
input_dirs = [[tmp + '/train_dev']]
# Dimensions of input features
# What is the dimension of the input feature
# len(input_dims) should be equal to len(input_dirs[n])
#
# Here, input for each file is a single waveform, dimension is 1
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# len(input_exts) should be equal to len(input_dirs[n])
#
# Here, input file extension is .wav
# We use .wav not .flac
input_exts = ['.wav']
# Temporal resolution for input features
# This is not relevant for CM but for other projects
# len(input_reso) should be equal to len(input_dirs[n])
# Here, it is 1 for waveform
input_reso = [1]
# Whether input features should be z-normalized
# This is not relevant for CM but for other projects
# len(input_norm) should be equal to len(input_dirs[n])
# Here, it is False for waveform
# We don't normalize the waveform
input_norm = [False]
# Similar configurations for output features
# Here, we set output to empty because we will load
# the target labels from protocol rather than output feature
# '.bin' is also a place holder
output_dirs = [[] for x in input_dirs]
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# ===
# For active learning pool data
# ===
# Similar configurations as above
#
# This is for demonstration, we still use the toy set as pool set.
# And we will merge the trainin and development sets as the pool set
#
# Name of the pool subsets
al_pool_set_name = ['pool_toyset_trn', 'pool_toyset_val']
# list of files for each pool subsets
al_pool_list = [tmp + '/scp/train.lst', tmp + '/scp/val.lst']
# list of input data directories
al_pool_in_dirs = [[tmp + '/train_dev'],
[tmp + '/train_dev']]
al_pool_out_dirs = [[] for x in al_pool_in_dirs]
# ===
# Waveform configuration
# ===
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = 64000
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = 8000
# Optional argument
# This used to load protocol(s)
# Multiple protocol files can be specified in the list
#
# Note that these protocols should cover all the
# training, development, and pool set data.
# Otherwise, the code will raise an error
#
# Here, this protocol will cover all the data in the toy set
optional_argument = [tmp + '/protocol.txt']
# ===
# pre-trained SSL model
# ===
# We will load this pre-trained SSL model as the front-end
#
# path to the SSL model (it is downloaded by 01_download.sh)
ssl_front_end_path = os.path.dirname(__file__) \
+ '/../../../SSL_pretrained/xlsr_53_56k.pt'
# dimension of the SSL model output
# this must be provided.
ssl_front_end_out_dim = 1024
#########################################################
## Configuration for inference stage
#########################################################
# This part is not used in this project
# They are place holders
test_set_name = trn_set_name + val_set_name
# List of test set data
# for convenience, you may directly load test_set list here
test_list = trn_list + val_list
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = input_dirs * 2
# Directories for output features, which are []
test_output_dirs = [[]] * 2
| 5,691 | 30.274725 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/model-AL-Adv/model.py | #!/usr/bin/env python
"""
model.py for Active learning model
This model.py consists of two parts:
1. A CM with SSL-based front-end and linear back-end.
The same model as 07-asvspoof-ssl/model-W2V-XLSR-ft-GF/model.py,
but the code is revised and simplified.
2. A function al_retrieve_data_knowing_train to select data for training
The above function is called in core_scripts/nn_manager/nn_manager_AL.py.
Please check the training algorithm there.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.util_bayesian as nii_bayesian
import sandbox.util_loss_metric as nii_loss_util
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq as fq
class SSLModel(torch_nn.Module):
def __init__(self, mpath, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args
----
mpath: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath])
self.model = md[0]
# this should be loaded from md
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" output = extract_feat(input_data)
input:
------
input_data,tensor, (batch, length, 1) or (batch, length)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
# put the model to GPU if it not there
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
# input should be in shape (batch, length)
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# emb has shape [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
##############
## FOR MODEL
##############
class FrontEnd(torch_nn.Module):
""" Front end wrapper
"""
def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False):
super(FrontEnd, self).__init__()
# dimension of output feature
self.out_dim = output_dim
# whether fix SSL or not
self.flag_fix_ssl = fix_ssl
# ssl part
self.ssl_model = SSLModel(mpath, ssl_out_dim)
# post transformation part
self.m_front_end_process = torch_nn.Linear(
self.ssl_model.out_dim, self.out_dim)
return
def set_flag_fix_ssl(self, fix_ssl):
self.flag_fix_ssl = fix_ssl
return
def forward(self, wav):
""" output = front_end(wav)
input:
------
wav: tensor, (batch, length, 1)
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
if self.flag_fix_ssl:
self.ssl_model.eval()
with torch.no_grad():
x_ssl_feat = self.ssl_model.extract_feat(wav)
else:
x_ssl_feat = self.ssl_model.extract_feat(wav)
output = self.m_front_end_process(x_ssl_feat)
return output
class BackEnd(torch_nn.Module):
"""Back End Wrapper
"""
def __init__(self, input_dim, out_dim, num_classes,
dropout_rate, dropout_flag=True, dropout_trials=[1]):
super(BackEnd, self).__init__()
# input feature dimension
self.in_dim = input_dim
# output embedding dimension
self.out_dim = out_dim
# number of output classes
self.num_class = num_classes
# dropout rate
self.m_mcdp_rate = dropout_rate
self.m_mcdp_flag = dropout_flag
self.m_mcdp_num = dropout_trials
# linear linear to produce output logits
self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class)
return
def forward(self, feat):
""" logits, emb_vec = back_end_emb(feat)
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
logits: tensor, (batch, num_output_class)
emb_vec: tensor, (batch, emb_dim)
"""
# through the frame-level network
# (batch, frame_num, self.out_dim)
# average pooling -> (batch, self.out_dim)
feat_utt = feat.mean(1)
# output linear
logits = self.m_utt_level(feat_utt)
return logits, feat_utt
def inference(self, feat):
"""scores, emb_vec, energy = inference(feat)
This is used for inference, output includes the logits and
confidence scores.
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
scores: tensor, (batch, 1)
emb_vec: tensor, (batch, emb_dim)
energy: tensor, (batch, 1)
"""
# logits
logits, feat_utt = self.forward(feat)
# logits -> score
scores = logits[:, 1] - logits[:, 0]
# compute confidence using negative energy
energy = nii_loss_util.neg_energy(logits)
return scores, feat_utt, energy
class MainLossModule(torch_nn.Module):
""" Loss wrapper
"""
def __init__(self):
super(MainLossModule, self).__init__()
self.m_loss = torch_nn.CrossEntropyLoss()
return
def forward(self, logits, target):
return self.m_loss(logits, target)
class FeatLossModule(torch_nn.Module):
""" Loss wrapper over features
Not used here
"""
def __init__(self):
super(FeatLossModule, self).__init__()
return
def forward(self, data, target):
"""
"""
return 0
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
############################################
####
# auxililary
####
# flag of current training stage
# this variable will be overwritten
self.temp_flag = args.temp_flag
####
# Load protocol and prepare the target data for network training
####
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
####
# Bayesian parameter
####
self.m_mcdp_rate = None
self.m_mcdp_flag = True
# if [1], we will only do one inference
self.m_mcdropout_num = [1]
####
# Model definition
####
# front-end
# dimension of compressed front-end feature
self.v_feat_dim = 128
self.m_front_end = FrontEnd(self.v_feat_dim,
prj_conf.ssl_front_end_path,
prj_conf.ssl_front_end_out_dim)
# back-end
# dimension of utterance-level embedding vectors
self.v_emd_dim = self.v_feat_dim
# number of output classes
self.v_out_class = 2
self.m_back_end = BackEnd(self.v_feat_dim,
self.v_emd_dim,
self.v_out_class,
self.m_mcdp_rate,
self.m_mcdp_flag,
self.m_mcdropout_num)
#####
# Loss function
#####
self.m_ce_loss = MainLossModule()
self.m_cr_loss = FeatLossModule()
# weight for the feature loss
self.m_feat = 0.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but irrelevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but irrelevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but irrelevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but irrelevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _get_target_vec(self, num_sys, num_aug, bs, device, dtype):
target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)]
target = np.tile(target, bs)
target = torch.tensor(target, device=device, dtype=dtype)
return target
def __inference(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences, skip it
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, 0.0, 0.0, 0.0))
return None
# front-end
feat_vec = self.m_front_end(x)
# back-end
scores, _, energy = self.m_back_end.inference(feat_vec)
# print output
targets = self._get_target(filenames)
for filename, target, score, eps in \
zip(filenames, targets, scores, energy):
print("Output, {:s}, {:d}, {:f}, {:f}".format(
filename, target, score.item(), eps.item()))
# don't write output score as a single file
return None
def __forward_single_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# front-end & back-end
feat_vec = self.m_front_end(x)
logits, emb_vec = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
# loss
loss = self.m_ce_loss(logits, target_)
return loss
def __forward_multi_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# input will be (batchsize, length, 1+num_spoofed, num_aug)
bat_siz = x.shape[0]
pad_len = x.shape[1]
num_sys = x.shape[2]
num_aug = x.shape[3]
# to (batchsize * (1+num_spoofed) * num_aug, length)
x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len)
datalen_tmp = np.repeat(datalength, num_sys * num_aug)
# target vector
# this is for CE loss
# [1, 0, 0, ..., 1, 0, 0 ...]
target = self._get_target_vec(num_sys, num_aug, bat_siz,
x.device, torch.long)
# this is for contrasitive loss (ignore the augmentation)
target_feat = self._get_target_vec(num_sys, 1, bat_siz,
x.device, torch.long)
# front-end & back-end
feat_vec = self.m_front_end(x_new)
logits, emb_vec = self.m_back_end(feat_vec)
# CE loss
loss_ce = self.m_ce_loss(logits, target)
if self.m_feat:
# feat loss
loss_cr_1 = 0
loss_cr_2 = 0
# reshape to multi-view format
# (batch, (1+num_spoof), nview, dimension...)
feat_vec_ = feat_vec.view(bat_siz, num_sys, num_aug, -1,
feat_vec.shape[-1])
emb_vec_ = emb_vec.view(bat_siz, num_sys, num_aug, -1)
for bat_idx in range(bat_siz):
loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss(
feat_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss(
emb_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
return [[loss_ce, loss_cr_1, loss_cr_2],
[True, True, True]]
else:
return loss_ce
def forward(self, x, fileinfo):
"""
"""
if self.training and x.shape[2] > 1:
# if training with multi-view data
return self.__forward_multi_view(x, fileinfo)
elif self.training:
return self.__forward_single_view(x, fileinfo)
else:
return self.__inference(x, fileinfo)
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
def al_retrieve_data_knowing_train(self,
train_data_loader,
pool_data_loader,
num_sample):
"""idx = al_retrieve_data_knowing_train(
train_data_loader,
pool_data_loader,
num_sample)
Data retrival function for active learning
Args:
-----
train_data_loader: Pytorch DataLoader, for train data
pool_data_loader: Pytorch DataLoader, for pool data
num_sample: int, number of samples selected
Return
------
idx: list of index
"""
def _adv_attack(data, data_grad, epsilon=0.3):
return data+ data_grad * epsilon
def _feat_dis(feat1, feat2):
# feat1 (batch, feat)
# feat2 (batch, feat)
edis = torch.cdist(feat1.unsqueeze(0), feat2.unsqueeze(0))[0]
return torch.min(edis, dim=0)[0]
# note that data_loader.dataset.__len__() returns the number of
# individual samples, not the number of mini-batches
idx_list = np.zeros([pool_data_loader.dataset.__len__()])
conf_list = np.zeros([pool_data_loader.dataset.__len__()])
#
counter = 0
# get gradients
for data_idx, (x, y, data_info, idx_orig) in \
enumerate(train_data_loader):
filenames = [nii_seq_tk.parse_filename(y) for y in data_info]
datalength = [nii_seq_tk.parse_length(y) for y in data_info]
if isinstance(x, torch.Tensor):
x = x.to(self.input_std.device, dtype=self.input_std.dtype)
else:
nii_display.f_die("data input is not a tensor")
# To collect gradient
x.requires_grad = True
# Forward pass (copied from forward())
# We cannot directly use forward() because that function requires
# self.training, and mini-batch will be made balanced
feat_vec = self.m_front_end(x)
logits, _ = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
loss = self.m_ce_loss(logits, target_)
# Backward pass
self.zero_grad()
loss.backward()
# get gradient
data_grad = x.grad.data
break
# create adversarial example
perturbed_data = _adv_attack(x, data_grad)
# loop over the pool and find the nearest pool data
with torch.no_grad():
# feature vec for adversarial example
ad_feature_vec = self.m_front_end(perturbed_data)
_, ad_feature_vec = self.m_back_end(ad_feature_vec)
for data_idx, (x, y, data_info, idx_orig) in \
enumerate(pool_data_loader):
filenames = [nii_seq_tk.parse_filename(y) for y in data_info]
datalength = [nii_seq_tk.parse_length(y) for y in data_info]
if isinstance(x, torch.Tensor):
x = x.to(self.input_std.device, dtype=self.input_std.dtype)
else:
nii_display.f_die("data input is not a tensor")
or_feature_vec = self.m_front_end(x)
_, or_feature_vec = self.m_back_end(or_feature_vec)
scores = _feat_dis(ad_feature_vec, or_feature_vec)
# add the distance score) and data index to the buffer
conf_list[counter:counter+x.shape[0]] = np.array(
[x.item() for x in scores])
idx_list[counter:counter+x.shape[0]] = np.array(
idx_orig)
counter += x.shape[0]
# select the best
sorted_idx = np.argsort(conf_list)
return_idx = [idx_list[x] for x in sorted_idx[:num_sample]]
return return_idx
class Loss():
""" Wrapper for scripts, ignore it
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 20,901 | 32.125198 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/model-AL-Rem/model.py | #!/usr/bin/env python
"""
model.py for Active learning model
This model.py consists of two parts:
1. A CM with SSL-based front-end and linear back-end.
The same model as 07-asvspoof-ssl/model-W2V-XLSR-ft-GF/model.py,
but the code is revised and simplified.
2. A function al_exclude_data to select data to be excluded from the pool
then
A function al_retrieve_data to select data from the pool (for training)
Both functions are called in core_scripts/nn_manager/nn_manager_AL.py.
Please check the training algorithm there.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.util_bayesian as nii_bayesian
import sandbox.util_loss_metric as nii_loss_util
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq as fq
class SSLModel(torch_nn.Module):
def __init__(self, mpath, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args
----
mpath: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath])
self.model = md[0]
# this should be loaded from md
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" output = extract_feat(input_data)
input:
------
input_data,tensor, (batch, length, 1) or (batch, length)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
# put the model to GPU if it not there
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
# input should be in shape (batch, length)
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# emb has shape [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
##############
## FOR MODEL
##############
class FrontEnd(torch_nn.Module):
""" Front end wrapper
"""
def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False):
super(FrontEnd, self).__init__()
# dimension of output feature
self.out_dim = output_dim
# whether fix SSL or not
self.flag_fix_ssl = fix_ssl
# ssl part
self.ssl_model = SSLModel(mpath, ssl_out_dim)
# post transformation part
self.m_front_end_process = torch_nn.Linear(
self.ssl_model.out_dim, self.out_dim)
return
def set_flag_fix_ssl(self, fix_ssl):
self.flag_fix_ssl = fix_ssl
return
def forward(self, wav):
""" output = front_end(wav)
input:
------
wav: tensor, (batch, length, 1)
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
if self.flag_fix_ssl:
self.ssl_model.eval()
with torch.no_grad():
x_ssl_feat = self.ssl_model.extract_feat(wav)
else:
x_ssl_feat = self.ssl_model.extract_feat(wav)
output = self.m_front_end_process(x_ssl_feat)
return output
class BackEnd(torch_nn.Module):
"""Back End Wrapper
"""
def __init__(self, input_dim, out_dim, num_classes,
dropout_rate, dropout_flag=True, dropout_trials=[1]):
super(BackEnd, self).__init__()
# input feature dimension
self.in_dim = input_dim
# output embedding dimension
self.out_dim = out_dim
# number of output classes
self.num_class = num_classes
# dropout rate
self.m_mcdp_rate = dropout_rate
self.m_mcdp_flag = dropout_flag
self.m_mcdp_num = dropout_trials
# linear linear to produce output logits
self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class)
return
def forward(self, feat):
""" logits, emb_vec = back_end_emb(feat)
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
logits: tensor, (batch, num_output_class)
emb_vec: tensor, (batch, emb_dim)
"""
# through the frame-level network
# (batch, frame_num, self.out_dim)
# average pooling -> (batch, self.out_dim)
feat_utt = feat.mean(1)
# output linear
logits = self.m_utt_level(feat_utt)
return logits, feat_utt
def inference(self, feat):
"""scores, emb_vec, energy = inference(feat)
This is used for inference, output includes the logits and
confidence scores.
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
scores: tensor, (batch, 1)
emb_vec: tensor, (batch, emb_dim)
energy: tensor, (batch, 1)
"""
# logits
logits, feat_utt = self.forward(feat)
# logits -> score
scores = logits[:, 1] - logits[:, 0]
# compute confidence using negative energy
energy = nii_loss_util.neg_energy(logits)
return scores, feat_utt, energy
class MainLossModule(torch_nn.Module):
""" Loss wrapper
"""
def __init__(self):
super(MainLossModule, self).__init__()
self.m_loss = torch_nn.CrossEntropyLoss()
return
def forward(self, logits, target):
return self.m_loss(logits, target)
class FeatLossModule(torch_nn.Module):
""" Loss wrapper over features
Not used here
"""
def __init__(self):
super(FeatLossModule, self).__init__()
return
def forward(self, data, target):
"""
"""
return 0
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
############################################
####
# auxililary
####
# flag of current training stage
# this variable will be overwritten
self.temp_flag = args.temp_flag
####
# Load protocol and prepare the target data for network training
####
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
####
# Bayesian parameter
####
self.m_mcdp_rate = None
self.m_mcdp_flag = True
# if [1], we will only do one inference
self.m_mcdropout_num = [1]
####
# Model definition
####
# front-end
# dimension of compressed front-end feature
self.v_feat_dim = 128
self.m_front_end = FrontEnd(self.v_feat_dim,
prj_conf.ssl_front_end_path,
prj_conf.ssl_front_end_out_dim)
# back-end
# dimension of utterance-level embedding vectors
self.v_emd_dim = self.v_feat_dim
# number of output classes
self.v_out_class = 2
self.m_back_end = BackEnd(self.v_feat_dim,
self.v_emd_dim,
self.v_out_class,
self.m_mcdp_rate,
self.m_mcdp_flag,
self.m_mcdropout_num)
#####
# Loss function
#####
self.m_ce_loss = MainLossModule()
self.m_cr_loss = FeatLossModule()
# weight for the feature loss
self.m_feat = 0.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but irrelevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but irrelevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but irrelevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but irrelevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _get_target_vec(self, num_sys, num_aug, bs, device, dtype):
target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)]
target = np.tile(target, bs)
target = torch.tensor(target, device=device, dtype=dtype)
return target
def __inference(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences, skip it
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, 0.0, 0.0, 0.0))
return None
# front-end
feat_vec = self.m_front_end(x)
# back-end
scores, _, energy = self.m_back_end.inference(feat_vec)
# print output
targets = self._get_target(filenames)
for filename, target, score, eps in \
zip(filenames, targets, scores, energy):
print("Output, {:s}, {:d}, {:f}, {:f}".format(
filename, target, score.item(), eps.item()))
# don't write output score as a single file
return None
def __forward_single_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# front-end & back-end
feat_vec = self.m_front_end(x)
logits, emb_vec = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
# loss
loss = self.m_ce_loss(logits, target_)
return loss
def __forward_multi_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# input will be (batchsize, length, 1+num_spoofed, num_aug)
bat_siz = x.shape[0]
pad_len = x.shape[1]
num_sys = x.shape[2]
num_aug = x.shape[3]
# to (batchsize * (1+num_spoofed) * num_aug, length)
x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len)
datalen_tmp = np.repeat(datalength, num_sys * num_aug)
# target vector
# this is for CE loss
# [1, 0, 0, ..., 1, 0, 0 ...]
target = self._get_target_vec(num_sys, num_aug, bat_siz,
x.device, torch.long)
# this is for contrasitive loss (ignore the augmentation)
target_feat = self._get_target_vec(num_sys, 1, bat_siz,
x.device, torch.long)
# front-end & back-end
feat_vec = self.m_front_end(x_new)
logits, emb_vec = self.m_back_end(feat_vec)
# CE loss
loss_ce = self.m_ce_loss(logits, target)
if self.m_feat:
# feat loss
loss_cr_1 = 0
loss_cr_2 = 0
# reshape to multi-view format
# (batch, (1+num_spoof), nview, dimension...)
feat_vec_ = feat_vec.view(bat_siz, num_sys, num_aug, -1,
feat_vec.shape[-1])
emb_vec_ = emb_vec.view(bat_siz, num_sys, num_aug, -1)
for bat_idx in range(bat_siz):
loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss(
feat_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss(
emb_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
return [[loss_ce, loss_cr_1, loss_cr_2],
[True, True, True]]
else:
return loss_ce
def forward(self, x, fileinfo):
"""
"""
if self.training and x.shape[2] > 1:
# if training with multi-view data
return self.__forward_multi_view(x, fileinfo)
elif self.training:
return self.__forward_single_view(x, fileinfo)
else:
return self.__inference(x, fileinfo)
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
def al_retrieve_data(self, data_loader, num_sample):
"""idx = al_retrieve_data(data_loader, num_sample)
Data retrival function for active learning
Args:
-----
data_loader: Pytorch DataLoader for the pool data set
num_sample: int, number of samples to be selected
Return
------
idx: list of index
"""
# randomly select data index
sorted_idx = np.arange(data_loader.dataset.__len__())
np.random.shuffle(sorted_idx)
return_idx = sorted_idx[0:num_sample]
# return the data index,
# the corresponding samples will be added to training set
return return_idx
def al_exclude_data(self, data_loader, num_sample):
"""idx = al_exclude_data(data_loader, num_sample)
Function to select useless data from the pool and remove them
Args:
-----
data_loader: Pytorch DataLoader for the pool data set
num_sample: int, number of samples to be selected
Return
------
idx: list of index
"""
# buffer
# note that data_loader.dataset.__len__() returns the number of
# individual samples, not the number of mini-batches
idx_list = np.zeros([data_loader.dataset.__len__()])
conf_list = np.zeros([data_loader.dataset.__len__()])
#
counter = 0
# loop over the pool set
with torch.no_grad():
for data_idx, (x, y, data_info, idx_orig) in \
enumerate(data_loader):
filenames = [nii_seq_tk.parse_filename(y) for y in data_info]
datalength = [nii_seq_tk.parse_length(y) for y in data_info]
if isinstance(x, torch.Tensor):
x = x.to(self.input_std.device,
dtype=self.input_std.dtype)
else:
nii_display.f_die("data input is not a tensor")
# front-end
feat_vec = self.m_front_end(x)
# back-end
scores, _, energy = self.m_back_end.inference(feat_vec)
# add the energy (confidence score) and data index to the buffer
conf_list[counter:counter+x.shape[0]] = np.array(
[x.item() for x in energy])
idx_list[counter:counter+x.shape[0]] = np.array(
idx_orig)
counter += x.shape[0]
# select data with low enerngy (i.e., high confidence, the model
# already seen this kind of data, thus the data is useless)
sorted_idx = np.argsort(conf_list)
# retrieve the data index
return_idx = [idx_list[x] for x in sorted_idx[:num_sample]]
# return the data index,
# the corresponding samples will be added to training set
return return_idx
class Loss():
""" Wrapper for scripts, ignore it
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 19,868 | 31.518822 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/model-AL-PosE/model.py | #!/usr/bin/env python
"""
model.py for Active learning model
This model.py consists of two parts:
1. A CM with SSL-based front-end and linear back-end.
The same model as 07-asvspoof-ssl/model-W2V-XLSR-ft-GF/model.py,
but the code is revised and simplified.
2. A function al_retrieve_data to scoring the pool set data.
al_retrieve_data scores the pool set data and returns a list of data index.
The returned data index will be used to retrieve the data from pool.
al_retrieve_data is called in core_scripts/nn_manager/nn_manager_AL.py.
Please check the training algorithm there.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.util_bayesian as nii_bayesian
import sandbox.util_loss_metric as nii_loss_util
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq as fq
class SSLModel(torch_nn.Module):
def __init__(self, mpath, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args
----
mpath: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath])
self.model = md[0]
# this should be loaded from md
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" output = extract_feat(input_data)
input:
------
input_data,tensor, (batch, length, 1) or (batch, length)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
# put the model to GPU if it not there
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
# input should be in shape (batch, length)
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# emb has shape [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
##############
## FOR MODEL
##############
class FrontEnd(torch_nn.Module):
""" Front end wrapper
"""
def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False):
super(FrontEnd, self).__init__()
# dimension of output feature
self.out_dim = output_dim
# whether fix SSL or not
self.flag_fix_ssl = fix_ssl
# ssl part
self.ssl_model = SSLModel(mpath, ssl_out_dim)
# post transformation part
self.m_front_end_process = torch_nn.Linear(
self.ssl_model.out_dim, self.out_dim)
return
def set_flag_fix_ssl(self, fix_ssl):
self.flag_fix_ssl = fix_ssl
return
def forward(self, wav):
""" output = front_end(wav)
input:
------
wav: tensor, (batch, length, 1)
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
if self.flag_fix_ssl:
self.ssl_model.eval()
with torch.no_grad():
x_ssl_feat = self.ssl_model.extract_feat(wav)
else:
x_ssl_feat = self.ssl_model.extract_feat(wav)
output = self.m_front_end_process(x_ssl_feat)
return output
class BackEnd(torch_nn.Module):
"""Back End Wrapper
"""
def __init__(self, input_dim, out_dim, num_classes,
dropout_rate, dropout_flag=True, dropout_trials=[1]):
super(BackEnd, self).__init__()
# input feature dimension
self.in_dim = input_dim
# output embedding dimension
self.out_dim = out_dim
# number of output classes
self.num_class = num_classes
# dropout rate
self.m_mcdp_rate = dropout_rate
self.m_mcdp_flag = dropout_flag
self.m_mcdp_num = dropout_trials
# linear linear to produce output logits
self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class)
return
def forward(self, feat):
""" logits, emb_vec = back_end_emb(feat)
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
logits: tensor, (batch, num_output_class)
emb_vec: tensor, (batch, emb_dim)
"""
# through the frame-level network
# (batch, frame_num, self.out_dim)
# average pooling -> (batch, self.out_dim)
feat_utt = feat.mean(1)
# output linear
logits = self.m_utt_level(feat_utt)
return logits, feat_utt
def inference(self, feat):
"""scores, emb_vec, energy = inference(feat)
This is used for inference, output includes the logits and
confidence scores.
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
scores: tensor, (batch, 1)
emb_vec: tensor, (batch, emb_dim)
energy: tensor, (batch, 1)
"""
# logits
logits, feat_utt = self.forward(feat)
# logits -> score
scores = logits[:, 1] - logits[:, 0]
# compute confidence using negative energy
energy = nii_loss_util.neg_energy(logits)
return scores, feat_utt, energy
class MainLossModule(torch_nn.Module):
""" Loss wrapper
"""
def __init__(self):
super(MainLossModule, self).__init__()
self.m_loss = torch_nn.CrossEntropyLoss()
return
def forward(self, logits, target):
return self.m_loss(logits, target)
class FeatLossModule(torch_nn.Module):
""" Loss wrapper over features
Not used here
"""
def __init__(self):
super(FeatLossModule, self).__init__()
return
def forward(self, data, target):
"""
"""
return 0
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
############################################
####
# auxililary
####
# flag of current training stage
# this variable will be overwritten
self.temp_flag = args.temp_flag
####
# Load protocol and prepare the target data for network training
####
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
####
# Bayesian parameter
####
self.m_mcdp_rate = None
self.m_mcdp_flag = True
# if [1], we will only do one inference
self.m_mcdropout_num = [1]
####
# Model definition
####
# front-end
# dimension of compressed front-end feature
self.v_feat_dim = 128
self.m_front_end = FrontEnd(self.v_feat_dim,
prj_conf.ssl_front_end_path,
prj_conf.ssl_front_end_out_dim)
# back-end
# dimension of utterance-level embedding vectors
self.v_emd_dim = self.v_feat_dim
# number of output classes
self.v_out_class = 2
self.m_back_end = BackEnd(self.v_feat_dim,
self.v_emd_dim,
self.v_out_class,
self.m_mcdp_rate,
self.m_mcdp_flag,
self.m_mcdropout_num)
#####
# Loss function
#####
self.m_ce_loss = MainLossModule()
self.m_cr_loss = FeatLossModule()
# weight for the feature loss
self.m_feat = 0.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but irrelevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but irrelevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but irrelevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but irrelevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _get_target_vec(self, num_sys, num_aug, bs, device, dtype):
target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)]
target = np.tile(target, bs)
target = torch.tensor(target, device=device, dtype=dtype)
return target
def __inference(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences, skip it
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, 0.0, 0.0, 0.0))
return None
# front-end
feat_vec = self.m_front_end(x)
# back-end
scores, _, energy = self.m_back_end.inference(feat_vec)
# print output
targets = self._get_target(filenames)
for filename, target, score, eps in \
zip(filenames, targets, scores, energy):
print("Output, {:s}, {:d}, {:f}, {:f}".format(
filename, target, score.item(), eps.item()))
# don't write output score as a single file
return None
def __forward_single_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# front-end & back-end
feat_vec = self.m_front_end(x)
logits, emb_vec = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
# loss
loss = self.m_ce_loss(logits, target_)
return loss
def __forward_multi_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# input will be (batchsize, length, 1+num_spoofed, num_aug)
bat_siz = x.shape[0]
pad_len = x.shape[1]
num_sys = x.shape[2]
num_aug = x.shape[3]
# to (batchsize * (1+num_spoofed) * num_aug, length)
x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len)
datalen_tmp = np.repeat(datalength, num_sys * num_aug)
# target vector
# this is for CE loss
# [1, 0, 0, ..., 1, 0, 0 ...]
target = self._get_target_vec(num_sys, num_aug, bat_siz,
x.device, torch.long)
# this is for contrasitive loss (ignore the augmentation)
target_feat = self._get_target_vec(num_sys, 1, bat_siz,
x.device, torch.long)
# front-end & back-end
feat_vec = self.m_front_end(x_new)
logits, emb_vec = self.m_back_end(feat_vec)
# CE loss
loss_ce = self.m_ce_loss(logits, target)
if self.m_feat:
# feat loss
loss_cr_1 = 0
loss_cr_2 = 0
# reshape to multi-view format
# (batch, (1+num_spoof), nview, dimension...)
feat_vec_ = feat_vec.view(bat_siz, num_sys, num_aug, -1,
feat_vec.shape[-1])
emb_vec_ = emb_vec.view(bat_siz, num_sys, num_aug, -1)
for bat_idx in range(bat_siz):
loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss(
feat_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss(
emb_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
return [[loss_ce, loss_cr_1, loss_cr_2],
[True, True, True]]
else:
return loss_ce
def forward(self, x, fileinfo):
"""
"""
if self.training and x.shape[2] > 1:
# if training with multi-view data
return self.__forward_multi_view(x, fileinfo)
elif self.training:
return self.__forward_single_view(x, fileinfo)
else:
return self.__inference(x, fileinfo)
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
def al_retrieve_data(self, data_loader, num_sample):
"""idx = al_retrieve_data(data_loader, num_sample)
Data retrival function for active learning
Args:
-----
data_loader: Pytorch DataLoader for the pool data set
num_sample: int, number of samples to be selected
Return
------
idx: list of index
"""
# buffer
# note that data_loader.dataset.__len__() returns the number of
# individual samples, not the number of mini-batches
idx_list = np.zeros([data_loader.dataset.__len__()])
conf_list = np.zeros([data_loader.dataset.__len__()])
#
counter = 0
# loop over the pool set
with torch.no_grad():
for data_idx, (x, y, data_info, idx_orig) in \
enumerate(data_loader):
# feedforward pass
filenames = [nii_seq_tk.parse_filename(y) for y in data_info]
datalength = [nii_seq_tk.parse_length(y) for y in data_info]
if isinstance(x, torch.Tensor):
x = x.to(self.input_std.device,
dtype=self.input_std.dtype)
else:
nii_display.f_die("data input is not a tensor")
# front-end
feat_vec = self.m_front_end(x)
# back-end
scores, _, energy = self.m_back_end.inference(feat_vec)
# add the energy (confidence score) and data index to the buffer
conf_list[counter:counter+x.shape[0]] = np.array(
[x.item() for x in energy])
idx_list[counter:counter+x.shape[0]] = np.array(
idx_orig)
counter += x.shape[0]
# select the least useful data (those with low enerngy, high-confidence)
sorted_idx = np.argsort(conf_list)
# retrieve the data index
return_idx = [idx_list[x] for x in sorted_idx[:num_sample]]
# return the data index,
# the corresponding samples will be added to training set
return return_idx
class Loss():
""" Wrapper for scripts, ignore it
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 19,133 | 31.651877 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/model-AL-NegE/model.py | #!/usr/bin/env python
"""
model.py for Active learning model
This model.py consists of two parts:
1. A CM with SSL-based front-end and linear back-end.
The same model as 07-asvspoof-ssl/model-W2V-XLSR-ft-GF/model.py,
but the code is revised and simplified.
2. A function al_retrieve_data to scoring the pool set data.
al_retrieve_data scores the pool set data and returns a list of data index.
The returned data index will be used to retrieve the data from pool.
al_retrieve_data is called in core_scripts/nn_manager/nn_manager_AL.py.
Please check the training algorithm there.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.util_bayesian as nii_bayesian
import sandbox.util_loss_metric as nii_loss_util
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq as fq
class SSLModel(torch_nn.Module):
def __init__(self, mpath, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args
----
mpath: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath])
self.model = md[0]
# this should be loaded from md
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" output = extract_feat(input_data)
input:
------
input_data,tensor, (batch, length, 1) or (batch, length)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
# put the model to GPU if it not there
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
# input should be in shape (batch, length)
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# emb has shape [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
##############
## FOR MODEL
##############
class FrontEnd(torch_nn.Module):
""" Front end wrapper
"""
def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False):
super(FrontEnd, self).__init__()
# dimension of output feature
self.out_dim = output_dim
# whether fix SSL or not
self.flag_fix_ssl = fix_ssl
# ssl part
self.ssl_model = SSLModel(mpath, ssl_out_dim)
# post transformation part
self.m_front_end_process = torch_nn.Linear(
self.ssl_model.out_dim, self.out_dim)
return
def set_flag_fix_ssl(self, fix_ssl):
self.flag_fix_ssl = fix_ssl
return
def forward(self, wav):
""" output = front_end(wav)
input:
------
wav: tensor, (batch, length, 1)
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
if self.flag_fix_ssl:
self.ssl_model.eval()
with torch.no_grad():
x_ssl_feat = self.ssl_model.extract_feat(wav)
else:
x_ssl_feat = self.ssl_model.extract_feat(wav)
output = self.m_front_end_process(x_ssl_feat)
return output
class BackEnd(torch_nn.Module):
"""Back End Wrapper
"""
def __init__(self, input_dim, out_dim, num_classes,
dropout_rate, dropout_flag=True, dropout_trials=[1]):
super(BackEnd, self).__init__()
# input feature dimension
self.in_dim = input_dim
# output embedding dimension
self.out_dim = out_dim
# number of output classes
self.num_class = num_classes
# dropout rate
self.m_mcdp_rate = dropout_rate
self.m_mcdp_flag = dropout_flag
self.m_mcdp_num = dropout_trials
# linear linear to produce output logits
self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class)
return
def forward(self, feat):
""" logits, emb_vec = back_end_emb(feat)
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
logits: tensor, (batch, num_output_class)
emb_vec: tensor, (batch, emb_dim)
"""
# through the frame-level network
# (batch, frame_num, self.out_dim)
# average pooling -> (batch, self.out_dim)
feat_utt = feat.mean(1)
# output linear
logits = self.m_utt_level(feat_utt)
return logits, feat_utt
def inference(self, feat):
"""scores, emb_vec, energy = inference(feat)
This is used for inference, output includes the logits and
confidence scores.
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
scores: tensor, (batch, 1)
emb_vec: tensor, (batch, emb_dim)
energy: tensor, (batch, 1)
"""
# logits
logits, feat_utt = self.forward(feat)
# logits -> score
scores = logits[:, 1] - logits[:, 0]
# compute confidence using negative energy
energy = nii_loss_util.neg_energy(logits)
return scores, feat_utt, energy
class MainLossModule(torch_nn.Module):
""" Loss wrapper
"""
def __init__(self):
super(MainLossModule, self).__init__()
self.m_loss = torch_nn.CrossEntropyLoss()
return
def forward(self, logits, target):
return self.m_loss(logits, target)
class FeatLossModule(torch_nn.Module):
""" Loss wrapper over features
Not used here
"""
def __init__(self):
super(FeatLossModule, self).__init__()
return
def forward(self, data, target):
"""
"""
return 0
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
############################################
####
# auxililary
####
# flag of current training stage
# this variable will be overwritten
self.temp_flag = args.temp_flag
####
# Load protocol and prepare the target data for network training
####
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
####
# Bayesian parameter
####
self.m_mcdp_rate = None
self.m_mcdp_flag = True
# if [1], we will only do one inference
self.m_mcdropout_num = [1]
####
# Model definition
####
# front-end
# dimension of compressed front-end feature
self.v_feat_dim = 128
self.m_front_end = FrontEnd(self.v_feat_dim,
prj_conf.ssl_front_end_path,
prj_conf.ssl_front_end_out_dim)
# back-end
# dimension of utterance-level embedding vectors
self.v_emd_dim = self.v_feat_dim
# number of output classes
self.v_out_class = 2
self.m_back_end = BackEnd(self.v_feat_dim,
self.v_emd_dim,
self.v_out_class,
self.m_mcdp_rate,
self.m_mcdp_flag,
self.m_mcdropout_num)
#####
# Loss function
#####
self.m_ce_loss = MainLossModule()
self.m_cr_loss = FeatLossModule()
# weight for the feature loss
self.m_feat = 0.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but irrelevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but irrelevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but irrelevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but irrelevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _get_target_vec(self, num_sys, num_aug, bs, device, dtype):
target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)]
target = np.tile(target, bs)
target = torch.tensor(target, device=device, dtype=dtype)
return target
def __inference(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences, skip it
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, 0.0, 0.0, 0.0))
return None
# front-end
feat_vec = self.m_front_end(x)
# back-end
scores, _, energy = self.m_back_end.inference(feat_vec)
# print output
targets = self._get_target(filenames)
for filename, target, score, eps in \
zip(filenames, targets, scores, energy):
print("Output, {:s}, {:d}, {:f}, {:f}".format(
filename, target, score.item(), eps.item()))
# don't write output score as a single file
return None
def __forward_single_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# front-end & back-end
feat_vec = self.m_front_end(x)
logits, emb_vec = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
# loss
loss = self.m_ce_loss(logits, target_)
return loss
def __forward_multi_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# input will be (batchsize, length, 1+num_spoofed, num_aug)
bat_siz = x.shape[0]
pad_len = x.shape[1]
num_sys = x.shape[2]
num_aug = x.shape[3]
# to (batchsize * (1+num_spoofed) * num_aug, length)
x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len)
datalen_tmp = np.repeat(datalength, num_sys * num_aug)
# target vector
# this is for CE loss
# [1, 0, 0, ..., 1, 0, 0 ...]
target = self._get_target_vec(num_sys, num_aug, bat_siz,
x.device, torch.long)
# this is for contrasitive loss (ignore the augmentation)
target_feat = self._get_target_vec(num_sys, 1, bat_siz,
x.device, torch.long)
# front-end & back-end
feat_vec = self.m_front_end(x_new)
logits, emb_vec = self.m_back_end(feat_vec)
# CE loss
loss_ce = self.m_ce_loss(logits, target)
if self.m_feat:
# feat loss
loss_cr_1 = 0
loss_cr_2 = 0
# reshape to multi-view format
# (batch, (1+num_spoof), nview, dimension...)
feat_vec_ = feat_vec.view(bat_siz, num_sys, num_aug, -1,
feat_vec.shape[-1])
emb_vec_ = emb_vec.view(bat_siz, num_sys, num_aug, -1)
for bat_idx in range(bat_siz):
loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss(
feat_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss(
emb_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
return [[loss_ce, loss_cr_1, loss_cr_2],
[True, True, True]]
else:
return loss_ce
def forward(self, x, fileinfo):
"""
"""
if self.training and x.shape[2] > 1:
# if training with multi-view data
return self.__forward_multi_view(x, fileinfo)
elif self.training:
return self.__forward_single_view(x, fileinfo)
else:
return self.__inference(x, fileinfo)
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
def al_retrieve_data(self, data_loader, num_sample):
"""idx = al_retrieve_data(data_loader, num_sample)
Data retrival function for active learning
Args:
-----
data_loader: Pytorch DataLoader for the pool data set
num_sample: int, number of samples to be selected
Return
------
idx: list of index
"""
# buffer
# note that data_loader.dataset.__len__() returns the number of
# individual samples, not the number of mini-batches
idx_list = np.zeros([data_loader.dataset.__len__()])
conf_list = np.zeros([data_loader.dataset.__len__()])
#
counter = 0
# loop over the pool set
with torch.no_grad():
for data_idx, (x, y, data_info, idx_orig) in \
enumerate(data_loader):
# feedforward pass
filenames = [nii_seq_tk.parse_filename(y) for y in data_info]
datalength = [nii_seq_tk.parse_length(y) for y in data_info]
if isinstance(x, torch.Tensor):
x = x.to(self.input_std.device,
dtype=self.input_std.dtype)
else:
nii_display.f_die("data input is not a tensor")
# front-end
feat_vec = self.m_front_end(x)
# back-end
scores, _, energy = self.m_back_end.inference(feat_vec)
# add the energy (confidence score) and data index to the buffer
conf_list[counter:counter+x.shape[0]] = np.array(
[x.item() for x in energy])
idx_list[counter:counter+x.shape[0]] = np.array(
idx_orig)
counter += x.shape[0]
# select the most useful data (those with high enerngy, low-confidence)
sorted_idx = np.argsort(conf_list)[::-1]
# retrieve the data index
return_idx = [idx_list[x] for x in sorted_idx[:num_sample]]
# return the data index,
# the corresponding samples will be added to training set
return return_idx
class Loss():
""" Wrapper for scripts, ignore it
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 19,138 | 31.66041 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/model-AL-Pas/model.py | #!/usr/bin/env python
"""
model.py for Active learning model
This model.py consists of two parts:
1. A CM with SSL-based front-end and linear back-end.
The same model as 07-asvspoof-ssl/model-W2V-XLSR-ft-GF/model.py,
but the code is revised and simplified.
2. A function al_retrieve_data to scoring the pool set data.
al_retrieve_data scores the pool set data and returns a list of data index.
The returned data index will be used to retrieve the data from pool.
al_retrieve_data is called in core_scripts/nn_manager/nn_manager_AL.py.
Please check the training algorithm there.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.util_bayesian as nii_bayesian
import sandbox.util_loss_metric as nii_loss_util
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq as fq
class SSLModel(torch_nn.Module):
def __init__(self, mpath, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args
----
mpath: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath])
self.model = md[0]
# this should be loaded from md
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" output = extract_feat(input_data)
input:
------
input_data,tensor, (batch, length, 1) or (batch, length)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
# put the model to GPU if it not there
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
# input should be in shape (batch, length)
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# emb has shape [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
##############
## FOR MODEL
##############
class FrontEnd(torch_nn.Module):
""" Front end wrapper
"""
def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False):
super(FrontEnd, self).__init__()
# dimension of output feature
self.out_dim = output_dim
# whether fix SSL or not
self.flag_fix_ssl = fix_ssl
# ssl part
self.ssl_model = SSLModel(mpath, ssl_out_dim)
# post transformation part
self.m_front_end_process = torch_nn.Linear(
self.ssl_model.out_dim, self.out_dim)
return
def set_flag_fix_ssl(self, fix_ssl):
self.flag_fix_ssl = fix_ssl
return
def forward(self, wav):
""" output = front_end(wav)
input:
------
wav: tensor, (batch, length, 1)
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
if self.flag_fix_ssl:
self.ssl_model.eval()
with torch.no_grad():
x_ssl_feat = self.ssl_model.extract_feat(wav)
else:
x_ssl_feat = self.ssl_model.extract_feat(wav)
output = self.m_front_end_process(x_ssl_feat)
return output
class BackEnd(torch_nn.Module):
"""Back End Wrapper
"""
def __init__(self, input_dim, out_dim, num_classes,
dropout_rate, dropout_flag=True, dropout_trials=[1]):
super(BackEnd, self).__init__()
# input feature dimension
self.in_dim = input_dim
# output embedding dimension
self.out_dim = out_dim
# number of output classes
self.num_class = num_classes
# dropout rate
self.m_mcdp_rate = dropout_rate
self.m_mcdp_flag = dropout_flag
self.m_mcdp_num = dropout_trials
# linear linear to produce output logits
self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class)
return
def forward(self, feat):
""" logits, emb_vec = back_end_emb(feat)
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
logits: tensor, (batch, num_output_class)
emb_vec: tensor, (batch, emb_dim)
"""
# through the frame-level network
# (batch, frame_num, self.out_dim)
# average pooling -> (batch, self.out_dim)
feat_utt = feat.mean(1)
# output linear
logits = self.m_utt_level(feat_utt)
return logits, feat_utt
def inference(self, feat):
"""scores, emb_vec, energy = inference(feat)
This is used for inference, output includes the logits and
confidence scores.
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
scores: tensor, (batch, 1)
emb_vec: tensor, (batch, emb_dim)
energy: tensor, (batch, 1)
"""
# logits
logits, feat_utt = self.forward(feat)
# logits -> score
scores = logits[:, 1] - logits[:, 0]
# compute confidence using negative energy
energy = nii_loss_util.neg_energy(logits)
return scores, feat_utt, energy
class MainLossModule(torch_nn.Module):
""" Loss wrapper
"""
def __init__(self):
super(MainLossModule, self).__init__()
self.m_loss = torch_nn.CrossEntropyLoss()
return
def forward(self, logits, target):
return self.m_loss(logits, target)
class FeatLossModule(torch_nn.Module):
""" Loss wrapper over features
Not used here
"""
def __init__(self):
super(FeatLossModule, self).__init__()
return
def forward(self, data, target):
"""
"""
return 0
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
############################################
####
# auxililary
####
# flag of current training stage
# this variable will be overwritten
self.temp_flag = args.temp_flag
####
# Load protocol and prepare the target data for network training
####
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
####
# Bayesian parameter
####
self.m_mcdp_rate = None
self.m_mcdp_flag = True
# if [1], we will only do one inference
self.m_mcdropout_num = [1]
####
# Model definition
####
# front-end
# dimension of compressed front-end feature
self.v_feat_dim = 128
self.m_front_end = FrontEnd(self.v_feat_dim,
prj_conf.ssl_front_end_path,
prj_conf.ssl_front_end_out_dim)
# back-end
# dimension of utterance-level embedding vectors
self.v_emd_dim = self.v_feat_dim
# number of output classes
self.v_out_class = 2
self.m_back_end = BackEnd(self.v_feat_dim,
self.v_emd_dim,
self.v_out_class,
self.m_mcdp_rate,
self.m_mcdp_flag,
self.m_mcdropout_num)
#####
# Loss function
#####
self.m_ce_loss = MainLossModule()
self.m_cr_loss = FeatLossModule()
# weight for the feature loss
self.m_feat = 0.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but irrelevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but irrelevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but irrelevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but irrelevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _get_target_vec(self, num_sys, num_aug, bs, device, dtype):
target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)]
target = np.tile(target, bs)
target = torch.tensor(target, device=device, dtype=dtype)
return target
def __inference(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences, skip it
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, 0.0, 0.0, 0.0))
return None
# front-end
feat_vec = self.m_front_end(x)
# back-end
scores, _, energy = self.m_back_end.inference(feat_vec)
# print output
targets = self._get_target(filenames)
for filename, target, score, eps in \
zip(filenames, targets, scores, energy):
print("Output, {:s}, {:d}, {:f}, {:f}".format(
filename, target, score.item(), eps.item()))
# don't write output score as a single file
return None
def __forward_single_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# front-end & back-end
feat_vec = self.m_front_end(x)
logits, emb_vec = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
# loss
loss = self.m_ce_loss(logits, target_)
return loss
def __forward_multi_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# input will be (batchsize, length, 1+num_spoofed, num_aug)
bat_siz = x.shape[0]
pad_len = x.shape[1]
num_sys = x.shape[2]
num_aug = x.shape[3]
# to (batchsize * (1+num_spoofed) * num_aug, length)
x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len)
datalen_tmp = np.repeat(datalength, num_sys * num_aug)
# target vector
# this is for CE loss
# [1, 0, 0, ..., 1, 0, 0 ...]
target = self._get_target_vec(num_sys, num_aug, bat_siz,
x.device, torch.long)
# this is for contrasitive loss (ignore the augmentation)
target_feat = self._get_target_vec(num_sys, 1, bat_siz,
x.device, torch.long)
# front-end & back-end
feat_vec = self.m_front_end(x_new)
logits, emb_vec = self.m_back_end(feat_vec)
# CE loss
loss_ce = self.m_ce_loss(logits, target)
if self.m_feat:
# feat loss
loss_cr_1 = 0
loss_cr_2 = 0
# reshape to multi-view format
# (batch, (1+num_spoof), nview, dimension...)
feat_vec_ = feat_vec.view(bat_siz, num_sys, num_aug, -1,
feat_vec.shape[-1])
emb_vec_ = emb_vec.view(bat_siz, num_sys, num_aug, -1)
for bat_idx in range(bat_siz):
loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss(
feat_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss(
emb_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
return [[loss_ce, loss_cr_1, loss_cr_2],
[True, True, True]]
else:
return loss_ce
def forward(self, x, fileinfo):
"""
"""
if self.training and x.shape[2] > 1:
# if training with multi-view data
return self.__forward_multi_view(x, fileinfo)
elif self.training:
return self.__forward_single_view(x, fileinfo)
else:
return self.__inference(x, fileinfo)
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
def al_retrieve_data(self, data_loader, num_sample):
"""idx = al_retrieve_data(data_loader, num_sample)
Data retrival function for active learning
Args:
-----
data_loader: Pytorch DataLoader for the pool data set
num_sample: int, number of samples to be selected
Return
------
idx: list of index
"""
# randomly select data index
sorted_idx = np.arange(data_loader.dataset.__len__())
np.random.shuffle(sorted_idx)
return_idx = sorted_idx[0:num_sample]
# return the data index,
# the corresponding samples will be added to training set
return return_idx
class Loss():
""" Wrapper for scripts, ignore it
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 17,572 | 31.009107 | 78 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-ocsoftmax/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The default training/inference process wrapper
Requires model.py and config.py
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers}
# Load file list and create data loader
trn_lst = nii_list_tool.read_list_from_text(prj_conf.trn_list)
trn_set = nii_dset.NIIDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate)
if prj_conf.val_list is not None:
val_lst = nii_list_tool.read_list_from_text(prj_conf.val_list)
val_set = nii_dset.NIIDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NIIDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 6,366 | 34.569832 | 74 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-ocsoftmax/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.oc_softmax as nii_oc_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# 750 frames are quite long for ASVspoof2019 LA with frame_shift = 10ms
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors, which is input to oc-softmax layer
self.v_emd_dim = 256
# output class (1 for one-class softmax)
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# softmax
self.m_a_softmax = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfcc_dim // 16) * 32, 512),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(256, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_a_softmax.append(
nii_oc_softmax.OCAngleLayer(self.v_emd_dim)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, frame_feat_dim, frame_num)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, pad or trim each trial independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_num, frame_feat_dim)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# compute softmax output for each feature configuration
batch_size = feature_vec.shape[0] // self.v_submodels
# negative class scores
x_cos_val = torch.zeros(
[feature_vec.shape[0], self.v_out_class],
dtype=feature_vec.dtype, device=feature_vec.device)
# positive class scores
x_phi_val = torch.zeros_like(x_cos_val)
# get scores
for idx in range(self.v_submodels):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx],
inference)
x_cos_val[s_idx:e_idx] = tmp1
x_phi_val[s_idx:e_idx] = tmp2
if inference:
return x_cos_val
else:
return [x_cos_val, x_phi_val]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
a_softmax_act = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device).long()
target_vec = target_vec.repeat(self.v_submodels)
return [a_softmax_act, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
score = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], score.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_oc_softmax.OCSoftmaxWithLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 16,219 | 34.884956 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-ocsoftmax/config.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = 'asvspoof2019_trn'
val_set_name = 'asvspoof2019_val'
# for convenience
tmp = '../DATA/asvspoof2019_LA/'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = tmp + '/scp/train.lst'
# val_file_list: list of files for validation set. It can be None
val_list = tmp + '/scp/val.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [tmp + '/train_dev']
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.wav']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [1]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [False]
# Similar configurations for output features
output_dirs = []
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = None
# Optional argument
# Just a buffer for convenience
# It can contain anything
optional_argument = ['../DATA/asvspoof2019_LA/protocol.txt']
#########################################################
## Configuration for inference stage
#########################################################
# similar options to training stage
test_set_name = 'asvspoof2019_test'
# List of test set data
# for convenience, you may directly load test_set list here
test_list = tmp + '/scp/test.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = ['../DATA/asvspoof2019_LA/eval']
# Directories for output features, which are []
test_output_dirs = []
| 3,226 | 29.733333 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-restnet-ocsoftmax/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The default training/inference process wrapper
Requires model.py and config.py
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers}
# Load file list and create data loader
trn_lst = nii_list_tool.read_list_from_text(prj_conf.trn_list)
trn_set = nii_dset.NIIDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate)
if prj_conf.val_list is not None:
val_lst = nii_list_tool.read_list_from_text(prj_conf.val_list)
val_set = nii_dset.NIIDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NIIDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 6,366 | 34.569832 | 74 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-restnet-ocsoftmax/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import sandbox.block_resnet as nii_resnet
import core_scripts.other_tools.debug as nii_debug
import core_modules.oc_softmax as nii_oc_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# 750 frames are quite long for ASVspoof2019 LA with frame_shift = 10ms
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors, which will be into to oc-softmax layer
self.v_emd_dim = 256
# output class (1 for one-class softmax)
self.v_out_class = 1
####
# create network
####
# backend
self.m_model = []
# fronend
self.m_frontend = []
# softmax layer for backend
self.m_a_softmax = []
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_model.append(
nii_resnet.ResNet(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFCC(
self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx],
self.m_target_sr, self.lfcc_dim[idx], with_energy=True)
)
self.m_a_softmax.append(
nii_oc_softmax.OCAngleLayer(self.v_emd_dim)
)
self.m_model = torch_nn.ModuleList(self.m_model)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, frame_feat_dim, frame_num)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# input to resnet should be (batch, frame_feat_dim, frame_num)
x_sp_amp = x_sp_amp_buff
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_model) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_model)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, fft_bin, frame_length)
# 2. compute hidden features
features, final_output = m_model(x_sp_amp.unsqueeze(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = features
return output_emb
def _compute_score(self, feature_vec, angle=False):
"""
"""
# compute a-softmax output for each feature configuration
batch_size = feature_vec.shape[0] // self.v_submodels
# negaitve class scores
x_cos_val = torch.zeros(
[feature_vec.shape[0], self.v_out_class],
dtype=feature_vec.dtype, device=feature_vec.device)
# positive class scores
x_phi_val = torch.zeros_like(x_cos_val)
for idx in range(self.v_submodels):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx], angle)
x_cos_val[s_idx:e_idx] = tmp1
x_phi_val[s_idx:e_idx] = tmp2
return [x_cos_val, x_phi_val]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
a_softmax_act = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device).long()
target_vec = target_vec.repeat(self.v_submodels)
return [a_softmax_act, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
score = self._compute_score(feature_vec, True)[0]
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], score.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_oc_softmax.OCSoftmaxWithLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 13,524 | 33.414758 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-restnet-ocsoftmax/config.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = 'asvspoof2019_trn'
val_set_name = 'asvspoof2019_val'
# for convenience
tmp = '../DATA/asvspoof2019_LA/'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = tmp + '/scp/train.lst'
# val_file_list: list of files for validation set. It can be None
val_list = tmp + '/scp/val.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [tmp + '/train_dev']
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.wav']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [1]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [False]
# Similar configurations for output features
output_dirs = []
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = None
# Optional argument
# Just a buffer for convenience
# It can contain anything
optional_argument = ['../DATA/asvspoof2019_LA/protocol.txt']
#########################################################
## Configuration for inference stage
#########################################################
# similar options to training stage
test_set_name = 'asvspoof2019_test'
# List of test set data
# for convenience, you may directly load test_set list here
test_list = tmp + '/scp/test.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = ['../DATA/asvspoof2019_LA/eval']
# Directories for output features, which are []
test_output_dirs = []
| 3,226 | 29.733333 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-a-softmax/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The default training/inference process wrapper
Requires model.py and config.py
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers}
# Load file list and create data loader
trn_lst = nii_list_tool.read_list_from_text(prj_conf.trn_list)
trn_set = nii_dset.NIIDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate)
if prj_conf.val_list is not None:
val_lst = nii_list_tool.read_list_from_text(prj_conf.val_list)
val_set = nii_dset.NIIDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NIIDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 6,366 | 34.569832 | 74 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-a-softmax/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.a_softmax as nii_a_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
torch.manual_seed(1)
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# 750 frames are quite long for ASVspoof2019 LA with frame_shift = 10ms
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors, which will be into to a-softmax layer
self.v_emd_dim = 2
# output class (2 for a-softmax layer)
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# softmax
self.m_a_softmax = []
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfcc_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_a_softmax.append(
nii_a_softmax.AngleLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, frame_feat_dim, frame_num)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, pad or trim each trial independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_num, frame_feat_dim)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# compute a-softmax output for each feature configuration
batch_size = feature_vec.shape[0] // self.v_submodels
# negative class scores
x_cos_val = torch.zeros(
[feature_vec.shape[0], self.v_out_class],
dtype=feature_vec.dtype, device=feature_vec.device)
# positive class scores
x_phi_val = torch.zeros_like(x_cos_val)
for idx in range(self.v_submodels):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx],
inference)
x_cos_val[s_idx:e_idx] = tmp1
x_phi_val[s_idx:e_idx] = tmp2
if inference:
return torch_nn_func.softmax(x_cos_val, dim=1)[:, 1]
else:
return [x_cos_val, x_phi_val]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
a_softmax_act = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device).long()
target_vec = target_vec.repeat(self.v_submodels)
return [a_softmax_act, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
score = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], score.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_a_softmax.AngularSoftmaxWithLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 16,175 | 34.946667 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-a-softmax/config.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = 'asvspoof2019_trn'
val_set_name = 'asvspoof2019_val'
# for convenience
tmp = '../DATA/asvspoof2019_LA/'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = tmp + '/scp/train.lst'
# val_file_list: list of files for validation set. It can be None
val_list = tmp + '/scp/val.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [tmp + '/train_dev']
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.wav']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [1]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [False]
# Similar configurations for output features
output_dirs = []
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = None
# Optional argument
# Just a buffer for convenience
# It can contain anything
optional_argument = ['../DATA/asvspoof2019_LA/protocol.txt']
#########################################################
## Configuration for inference stage
#########################################################
# similar options to training stage
test_set_name = 'asvspoof2019_test'
# List of test set data
# for convenience, you may directly load test_set list here
test_list = tmp + '/scp/test.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = ['../DATA/asvspoof2019_LA/eval']
# Directories for output features, which are []
test_output_dirs = []
| 3,226 | 29.733333 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-sigmoid/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The default training/inference process wrapper
Requires model.py and config.py
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers}
# Load file list and create data loader
trn_lst = nii_list_tool.read_list_from_text(prj_conf.trn_list)
trn_set = nii_dset.NIIDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate)
if prj_conf.val_list is not None:
val_lst = nii_list_tool.read_list_from_text(prj_conf.val_list)
val_set = nii_dset.NIIDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NIIDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 6,366 | 34.569832 | 74 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-sigmoid/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# 750 frames are quite long for ASVspoof2019 LA with frame_shift = 10ms
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfcc_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, frame_feat_dim, frame_num)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, pad or trim each trial independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_num, frame_feat_dim)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,225 | 34.741784 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-sigmoid/config.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = 'asvspoof2019_trn'
val_set_name = 'asvspoof2019_val'
# for convenience
tmp = '../DATA/asvspoof2019_LA/'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = tmp + '/scp/train.lst'
# val_file_list: list of files for validation set. It can be None
val_list = tmp + '/scp/val.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [tmp + '/train_dev']
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.wav']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [1]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [False]
# Similar configurations for output features
output_dirs = []
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = None
# Optional argument
# Just a buffer for convenience
# It can contain anything
optional_argument = ['../DATA/asvspoof2019_LA/protocol.txt']
#########################################################
## Configuration for inference stage
#########################################################
# similar options to training stage
test_set_name = 'asvspoof2019_test'
# List of test set data
# for convenience, you may directly load test_set list here
test_list = tmp + '/scp/test.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = ['../DATA/asvspoof2019_LA/eval']
# Directories for output features, which are []
test_output_dirs = []
| 3,226 | 29.733333 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-10/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper.
Dataset API is replaced with NII_MergeDataSetLoader.
It is more convenient to train model on corpora stored in different directories.
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
in_trans_fns = prj_conf.test_input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.test_output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 7,819 | 35.886792 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-10/model.py | #!/usr/bin/env python
"""
model.py for harmonic-plus-noise NSF with trainable sinc filter
version: 9
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
# Building blocks (torch.nn modules + dimension operation)
#
# For blstm
class BLSTMLayer(torch_nn.Module):
""" Wrapper over dilated BLSTM
Input tensor: (batchsize=1, length, dim_in)
Output tensor: (batchsize=1, length, dim_out)
Recurrency is conducted along "length"
"""
def __init__(self, input_dim, output_dim):
super(BLSTMLayer, self).__init__()
if output_dim % 2 != 0:
print("Output_dim of BLSTMLayer is {:d}".format(output_dim))
print("BLSTMLayer expects a layer size of even number")
sys.exit(1)
# bi-directional LSTM
self.l_blstm = torch_nn.LSTM(input_dim, output_dim // 2, \
bidirectional=True)
def forward(self, x):
# permute to (length, batchsize=1, dim)
blstm_data, _ = self.l_blstm(x.permute(1, 0, 2))
# permute it backt to (batchsize=1, length, dim)
return blstm_data.permute(1, 0, 2)
#
# 1D dilated convolution that keep the input/output length
class Conv1dKeepLength(torch_nn.Conv1d):
""" Wrapper for causal convolution
Input tensor: (batchsize=1, length, dim_in)
Output tensor: (batchsize=1, length, dim_out)
https://github.com/pytorch/pytorch/issues/1333
Note: Tanh is optional
"""
def __init__(self, input_dim, output_dim, dilation_s, kernel_s,
causal = False, stride = 1, groups=1, bias=True, \
tanh = True, pad_mode='constant'):
super(Conv1dKeepLength, self).__init__(
input_dim, output_dim, kernel_s, stride=stride,
padding = 0, dilation = dilation_s, groups=groups, bias=bias)
self.pad_mode = pad_mode
self.causal = causal
# input & output length will be the same
if self.causal:
# left pad to make the convolution causal
self.pad_le = dilation_s * (kernel_s - 1)
self.pad_ri = 0
else:
# pad on both sizes
self.pad_le = dilation_s * (kernel_s - 1) // 2
self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le
if tanh:
self.l_ac = torch_nn.Tanh()
else:
self.l_ac = torch_nn.Identity()
def forward(self, data):
# permute to (batchsize=1, dim, length)
# add one dimension (batchsize=1, dim, ADDED_DIM, length)
# pad to ADDED_DIM
# squeeze and return to (batchsize=1, dim, length)
# https://github.com/pytorch/pytorch/issues/1333
x = torch_nn_func.pad(data.permute(0, 2, 1).unsqueeze(2), \
(self.pad_le, self.pad_ri, 0, 0),
mode = self.pad_mode).squeeze(2)
# tanh(conv1())
# permmute back to (batchsize=1, length, dim)
output = self.l_ac(super(Conv1dKeepLength, self).forward(x))
return output.permute(0, 2, 1)
#
# Moving average
class MovingAverage(Conv1dKeepLength):
""" Wrapper to define a moving average smoothing layer
Note: MovingAverage can be implemented using TimeInvFIRFilter too.
Here we define another Module dicrectly on Conv1DKeepLength
"""
def __init__(self, feature_dim, window_len, causal=False, \
pad_mode='replicate'):
super(MovingAverage, self).__init__(
feature_dim, feature_dim, 1, window_len, causal,
groups=feature_dim, bias=False, tanh=False, \
pad_mode=pad_mode)
# set the weighting coefficients
torch_nn.init.constant_(self.weight, 1/window_len)
# turn off grad for this layer
for p in self.parameters():
p.requires_grad = False
def forward(self, data):
return super(MovingAverage, self).forward(data)
#
# FIR filter layer
class TimeInvFIRFilter(Conv1dKeepLength):
""" Wrapper to define a FIR filter over Conv1d
Note: FIR Filtering is conducted on each dimension (channel)
independently: groups=channel_num in conv1d
"""
def __init__(self, feature_dim, filter_coef,
causal=True, flag_train=False):
""" __init__(self, feature_dim, filter_coef,
causal=True, flag_train=False)
feature_dim: dimension of input data
filter_coef: 1-D tensor of filter coefficients
causal: FIR is causal or not (default: true)
flag_train: whether train the filter coefficients (default false)
Input data: (batchsize=1, length, feature_dim)
Output data: (batchsize=1, length, feature_dim)
"""
super(TimeInvFIRFilter, self).__init__(
feature_dim, feature_dim, 1, filter_coef.shape[0], causal,
groups=feature_dim, bias=False, tanh=False)
if filter_coef.ndim == 1:
# initialize weight using provided filter_coef
with torch.no_grad():
tmp_coef = torch.zeros([feature_dim, 1,
filter_coef.shape[0]])
tmp_coef[:, 0, :] = filter_coef
tmp_coef = torch.flip(tmp_coef, dims=[2])
self.weight = torch.nn.Parameter(tmp_coef,
requires_grad=flag_train)
else:
print("TimeInvFIRFilter expects filter_coef to be 1-D tensor")
print("Please implement the code in __init__ if necessary")
sys.exit(1)
def forward(self, data):
return super(TimeInvFIRFilter, self).forward(data)
class TimeVarFIRFilter(torch_nn.Module):
""" TimeVarFIRFilter
Given sequences of filter coefficients and a signal, do filtering
Filter coefs: (batchsize=1, signal_length, filter_order = K)
Signal: (batchsize=1, signal_length, 1)
For batch 0:
For n in [1, sequence_length):
output(0, n, 1) = \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k)
Note: filter coef (0, n, :) is only used to compute the output
at (0, n, 1)
"""
def __init__(self):
super(TimeVarFIRFilter, self).__init__()
def forward(self, signal, f_coef):
"""
Filter coefs: (batchsize=1, signal_length, filter_order = K)
Signal: (batchsize=1, signal_length, 1)
Output: (batchsize=1, signal_length, 1)
For n in [1, sequence_length):
output(0, n, 1)= \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k)
This method may be not efficient:
Suppose signal [x_1, ..., x_N], filter [a_1, ..., a_K]
output [y_1, y_2, y_3, ..., y_N, *, * ... *]
= a_1 * [x_1, x_2, x_3, ..., x_N, 0, ..., 0]
+ a_2 * [ 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0]
+ a_3 * [ 0, 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0]
"""
signal_l = signal.shape[1]
order_k = f_coef.shape[-1]
# pad to (batchsize=1, signal_length + filter_order-1, dim)
padded_signal = torch_nn_func.pad(signal, (0, 0, 0, order_k - 1))
y = torch.zeros_like(signal)
# roll and weighted sum, only take [0:signal_length]
for k in range(order_k):
y += torch.roll(padded_signal, k, dims=1)[:, 0:signal_l, :] \
* f_coef[:, :, k:k+1]
# done
return y
# Sinc filter generator
class SincFilter(torch_nn.Module):
""" SincFilter
Given the cut-off-frequency, produce the low-pass and high-pass
windowed-sinc-filters.
If input cut-off-frequency is (batchsize=1, signal_length, 1),
output filter coef is (batchsize=1, signal_length, filter_order).
For each time step in [1, signal_length), we calculate one
filter for low-pass sinc filter and another for high-pass filter.
Example:
import scipy
import scipy.signal
import numpy as np
filter_order = 31
cut_f = 0.2
sinc_layer = SincFilter(filter_order)
lp_coef, hp_coef = sinc_layer(torch.ones(1, 10, 1) * cut_f)
w, h1 = scipy.signal.freqz(lp_coef[0, 0, :].numpy(), [1])
w, h2 = scipy.signal.freqz(hp_coef[0, 0, :].numpy(), [1])
plt.plot(w, 20*np.log10(np.abs(h1)))
plt.plot(w, 20*np.log10(np.abs(h2)))
plt.plot([cut_f * np.pi, cut_f * np.pi], [-100, 0])
"""
def __init__(self, filter_order):
super(SincFilter, self).__init__()
# Make the filter oder an odd number
# [-(M-1)/2, ... 0, (M-1)/2]
#
self.half_k = (filter_order - 1) // 2
self.order = self.half_k * 2 +1
def hamming_w(self, n_index):
""" prepare hamming window for each time step
n_index (batchsize=1, signal_length, filter_order)
For each time step, n_index will be [-(M-1)/2, ... 0, (M-1)/2]
n_index[0, 0, :] = [-(M-1)/2, ... 0, (M-1)/2]
n_index[0, 1, :] = [-(M-1)/2, ... 0, (M-1)/2]
...
output (batchsize=1, signal_length, filter_order)
output[0, 0, :] = hamming_window
output[0, 1, :] = hamming_window
...
"""
# Hamming window
return 0.54 + 0.46 * torch.cos(2 * np.pi * n_index / self.order)
def sinc(self, x):
""" Normalized sinc-filter sin( pi * x) / pi * x
https://en.wikipedia.org/wiki/Sinc_function
Assume x (batchsize, signal_length, filter_order) and
x[0, 0, :] = [-half_order, - half_order+1, ... 0, ..., half_order]
x[:, :, self.half_order] -> time index = 0, sinc(0)=1
"""
y = torch.zeros_like(x)
y[:,:,0:self.half_k]=torch.sin(np.pi * x[:, :, 0:self.half_k]) \
/ (np.pi * x[:, :, 0:self.half_k])
y[:,:,self.half_k+1:]=torch.sin(np.pi * x[:, :, self.half_k+1:]) \
/ (np.pi * x[:, :, self.half_k+1:])
y[:,:,self.half_k] = 1
return y
def forward(self, cut_f):
""" lp_coef, hp_coef = forward(self, cut_f)
cut-off frequency cut_f (batchsize=1, length, dim = 1)
lp_coef: low-pass filter coefs (batchsize, length, filter_order)
hp_coef: high-pass filter coefs (batchsize, length, filter_order)
"""
# create the filter order index
with torch.no_grad():
# [- (M-1) / 2, ..., 0, ..., (M-1)/2]
lp_coef = torch.arange(-self.half_k, self.half_k + 1,
device=cut_f.device)
# [[[- (M-1) / 2, ..., 0, ..., (M-1)/2],
# [- (M-1) / 2, ..., 0, ..., (M-1)/2],
# ...
# ],
# [[- (M-1) / 2, ..., 0, ..., (M-1)/2],
# [- (M-1) / 2, ..., 0, ..., (M-1)/2],
# ...
# ]]
lp_coef = lp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1)
hp_coef = torch.arange(-self.half_k, self.half_k + 1,
device=cut_f.device)
hp_coef = hp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1)
# temporary buffer of [-1^n] for gain norm in hp_coef
tmp_one = torch.pow(-1, hp_coef)
# unnormalized filter coefs with hamming window
lp_coef = cut_f * self.sinc(cut_f * lp_coef) \
* self.hamming_w(lp_coef)
hp_coef = (self.sinc(hp_coef) \
- cut_f * self.sinc(cut_f * hp_coef)) \
* self.hamming_w(hp_coef)
# normalize the coef to make gain at 0/pi is 0 dB
# sum_n lp_coef[n]
lp_coef_norm = torch.sum(lp_coef, axis=2).unsqueeze(-1)
# sum_n hp_coef[n] * -1^n
hp_coef_norm = torch.sum(hp_coef * tmp_one, axis=2).unsqueeze(-1)
lp_coef = lp_coef / lp_coef_norm
hp_coef = hp_coef / hp_coef_norm
# return normed coef
return lp_coef, hp_coef
#
# Up sampling
class UpSampleLayer(torch_nn.Module):
""" Wrapper over up-sampling
Input tensor: (batchsize=1, length, dim)
Ouput tensor: (batchsize=1, length * up-sampling_factor, dim)
"""
def __init__(self, feature_dim, up_sampling_factor, smoothing=False):
super(UpSampleLayer, self).__init__()
# wrap a up_sampling layer
self.scale_factor = up_sampling_factor
self.l_upsamp = torch_nn.Upsample(scale_factor=self.scale_factor)
if smoothing:
self.l_ave1 = MovingAverage(feature_dim, self.scale_factor)
self.l_ave2 = MovingAverage(feature_dim, self.scale_factor)
else:
self.l_ave1 = torch_nn.Identity()
self.l_ave2 = torch_nn.Identity()
return
def forward(self, x):
# permute to (batchsize=1, dim, length)
up_sampled_data = self.l_upsamp(x.permute(0, 2, 1))
# permute it backt to (batchsize=1, length, dim)
# and do two moving average
return self.l_ave1(self.l_ave2(up_sampled_data.permute(0, 2, 1)))
# Neural filter block (1 block)
class NeuralFilterBlock(torch_nn.Module):
""" Wrapper over a single filter block
"""
def __init__(self, signal_size, hidden_size,\
kernel_size=3, conv_num=10):
super(NeuralFilterBlock, self).__init__()
self.signal_size = signal_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.conv_num = conv_num
self.dilation_s = [np.power(2, x) for x in np.arange(conv_num)]
# ff layer to expand dimension
self.l_ff_1 = torch_nn.Linear(signal_size, hidden_size, \
bias=False)
self.l_ff_1_tanh = torch_nn.Tanh()
# dilated conv layers
tmp = [Conv1dKeepLength(hidden_size, hidden_size, x, \
kernel_size, causal=True, bias=False) \
for x in self.dilation_s]
self.l_convs = torch_nn.ModuleList(tmp)
# ff layer to de-expand dimension
self.l_ff_2 = torch_nn.Linear(hidden_size, hidden_size//4, \
bias=False)
self.l_ff_2_tanh = torch_nn.Tanh()
self.l_ff_3 = torch_nn.Linear(hidden_size//4, signal_size, \
bias=False)
self.l_ff_3_tanh = torch_nn.Tanh()
# a simple scale
self.scale = torch_nn.Parameter(torch.tensor([0.1]),
requires_grad=False)
return
def forward(self, signal, context):
"""
Assume: signal (batchsize=1, length, signal_size)
context (batchsize=1, length, hidden_size)
Output: (batchsize=1, length, signal_size)
"""
# expand dimension
tmp_hidden = self.l_ff_1_tanh(self.l_ff_1(signal))
# loop over dilated convs
# output of a d-conv is input + context + d-conv(input)
for l_conv in self.l_convs:
tmp_hidden = tmp_hidden + l_conv(tmp_hidden) + context
# to be consistent with legacy configuration in CURRENNT
tmp_hidden = tmp_hidden * self.scale
# compress the dimesion and skip-add
tmp_hidden = self.l_ff_2_tanh(self.l_ff_2(tmp_hidden))
tmp_hidden = self.l_ff_3_tanh(self.l_ff_3(tmp_hidden))
output_signal = tmp_hidden + signal
return output_signal
#
# Sine waveform generator
#
# Sine waveform generator
class SineGen(torch_nn.Module):
""" Definition of sine generator
SineGen(samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False)
samp_rate: sampling rate in Hz
harmonic_num: number of harmonic overtones (default 0)
sine_amp: amplitude of sine-wavefrom (default 0.1)
noise_std: std of Gaussian noise (default 0.003)
voiced_thoreshold: F0 threshold for U/V classification (default 0)
flag_for_pulse: this SinGen is used inside PulseGen (default False)
Note: when flag_for_pulse is True, the first time step of a voiced
segment is always sin(np.pi) or cos(0)
"""
def __init__(self, samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False):
super(SineGen, self).__init__()
self.sine_amp = sine_amp
self.noise_std = noise_std
self.harmonic_num = harmonic_num
self.dim = self.harmonic_num + 1
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
self.flag_for_pulse = flag_for_pulse
def _f02uv(self, f0):
# generate uv signal
uv = torch.ones_like(f0)
uv = uv * (f0 > self.voiced_threshold)
return uv
def _f02sine(self, f0_values):
""" f0_values: (batchsize, length, dim)
where dim indicates fundamental tone and overtones
"""
# convert to F0 in rad. The interger part n can be ignored
# because 2 * np.pi * n doesn't affect phase
rad_values = (f0_values / self.sampling_rate) % 1
# initial phase noise (no noise for fundamental component)
rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2],\
device = f0_values.device)
rand_ini[:, 0] = 0
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
if not self.flag_for_pulse:
# for normal case
# To prevent torch.cumsum numerical overflow,
# it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
# Buffer tmp_over_one_idx indicates the time step to add -1.
# This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi
tmp_over_one = torch.cumsum(rad_values, 1) % 1
tmp_over_one_idx = (tmp_over_one[:, 1:, :] -
tmp_over_one[:, :-1, :]) < 0
cumsum_shift = torch.zeros_like(rad_values)
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) \
* 2 * np.pi)
else:
# If necessary, make sure that the first time step of every
# voiced segments is sin(pi) or cos(0)
# This is used for pulse-train generation
# identify the last time step in unvoiced segments
uv = self._f02uv(f0_values)
uv_1 = torch.roll(uv, shifts=-1, dims=1)
uv_1[:, -1, :] = 1
u_loc = (uv < 1) * (uv_1 > 0)
# get the instantanouse phase
tmp_cumsum = torch.cumsum(rad_values, dim=1)
# different batch needs to be processed differently
for idx in range(f0_values.shape[0]):
temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
# stores the accumulation of i.phase within
# each voiced segments
tmp_cumsum[idx, :, :] = 0
tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
# rad_values - tmp_cumsum: remove the accumulation of i.phase
# within the previous voiced segment.
i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
# get the sines
sines = torch.cos(i_phase * 2 * np.pi)
return sines
def forward(self, f0):
""" sine_tensor, uv = forward(f0)
input F0: tensor(batchsize=1, length, dim=1)
f0 for unvoiced steps should be 0
output sine_tensor: tensor(batchsize=1, length, dim)
output uv: tensor(batchsize=1, length, 1)
"""
with torch.no_grad():
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \
device=f0.device)
# fundamental component
f0_buf[:, :, 0] = f0[:, :, 0]
for idx in np.arange(self.harmonic_num):
# idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
f0_buf[:, :, idx+1] = f0_buf[:, :, 0] * (idx+2)
# generate sine waveforms
sine_waves = self._f02sine(f0_buf) * self.sine_amp
# generate uv signal
#uv = torch.ones(f0.shape)
#uv = uv * (f0 > self.voiced_threshold)
uv = self._f02uv(f0)
# noise: for unvoiced should be similar to sine_amp
# std = self.sine_amp/3 -> max value ~ self.sine_amp
#. for voiced regions is self.noise_std
noise_amp = uv * self.noise_std + (1-uv) * self.sine_amp / 3
noise = noise_amp * torch.randn_like(sine_waves)
# first: set the unvoiced part to 0 by uv
# then: additive noise
sine_waves = sine_waves * uv + noise
return sine_waves, uv, noise
#####
## Model definition
##
## For condition module only provide Spectral feature to Filter block
class CondModuleHnSincNSF(torch_nn.Module):
""" Condition module for hn-sinc-NSF
Upsample and transform input features
CondModuleHnSincNSF(input_dimension, output_dimension, up_sample_rate,
blstm_dimension = 64, cnn_kernel_size = 3)
Spec, F0, cut_off_freq = CondModuleHnSincNSF(features, F0)
Both input features should be frame-level features
If x doesn't contain F0, just ignore the returned F0
CondModuleHnSincNSF(input_dim, output_dim, up_sample,
blstm_s = 64, cnn_kernel_s = 3,
voiced_threshold = 0):
input_dim: sum of dimensions of input features
output_dim: dim of the feature Spec to be used by neural filter-block
up_sample: up sampling rate of input features
blstm_s: dimension of the features from blstm (default 64)
cnn_kernel_s: kernel size of CNN in condition module (default 3)
voiced_threshold: f0 > voiced_threshold is voiced, otherwise unvoiced
"""
def __init__(self, input_dim, output_dim, up_sample, \
blstm_s = 64, cnn_kernel_s = 3, voiced_threshold = 0):
super(CondModuleHnSincNSF, self).__init__()
# input feature dimension
self.input_dim = input_dim
self.output_dim = output_dim
self.up_sample = up_sample
#self.blstm_s = blstm_s
self.cnn_kernel_s = cnn_kernel_s
self.cut_f_smooth = up_sample * 4
self.voiced_threshold = voiced_threshold
# the blstm layer
tmp_input_size = [input_dim, output_dim, output_dim]
tmp_output_size = [output_dim, output_dim, output_dim]
tmp = [Conv1dKeepLength(x, y, dilation_s = 1,
kernel_s = self.cnn_kernel_s)
for x, y in zip(tmp_input_size, tmp_output_size)]
self.l_conv1ds = torch_nn.ModuleList(tmp)
#self.l_conv1ds = BLSTMLayer(input_dim, self.blstm_s)
# the CNN layer (+1 dim for cut_off_frequence of sinc filter)
#self.l_conv1d = Conv1dKeepLength(self.blstm_s, \
# self.output_dim, \
# dilation_s = 1, \
# kernel_s = self.cnn_kernel_s)
# Upsampling layer for hidden features
self.l_upsamp = UpSampleLayer(self.output_dim, \
self.up_sample, True)
# separate layer for up-sampling normalized F0 values
self.l_upsamp_f0_hi = UpSampleLayer(1, self.up_sample, True)
# Upsampling for F0: don't smooth up-sampled F0
self.l_upsamp_F0 = UpSampleLayer(1, self.up_sample, False)
# Another smoothing layer to smooth the cut-off frequency
# for sinc filters. Use a larger window to smooth
self.l_cut_f_smooth = MovingAverage(1, self.cut_f_smooth)
def get_cut_f(self, hidden_feat, f0):
""" cut_f = get_cut_f(self, feature, f0)
feature: (batchsize, length, dim=1)
f0: (batchsize, length, dim=1)
"""
# generate uv signal
uv = torch.ones_like(f0) * (f0 > self.voiced_threshold)
# hidden_feat is between (-1, 1) after conv1d with tanh
# (-0.2, 0.2) + 0.3 = (0.1, 0.5)
# voiced: (0.1, 0.5) + 0.4 = (0.5, 0.9)
# unvoiced: (0.1, 0.5) = (0.1, 0.5)
return hidden_feat * 0.2 + uv * 0.4 + 0.3
def forward(self, feature, f0):
""" spec, f0 = forward(self, feature, f0)
feature: (batchsize, length, dim)
f0: (batchsize, length, dim=1), which should be F0 at frame-level
spec: (batchsize, length, self.output_dim), at wave-level
f0: (batchsize, length, 1), at wave-level
"""
tmp = feature
for l_conv in self.l_conv1ds:
tmp = l_conv(tmp)
tmp = self.l_upsamp(tmp)
# concatenat normed F0 with hidden spectral features
context = torch.cat((tmp[:, :, 0:self.output_dim-1], \
self.l_upsamp_f0_hi(feature[:, :, -1:])), \
dim=2)
# hidden feature for cut-off frequency
hidden_cut_f = tmp[:, :, self.output_dim-1:]
# directly up-sample F0 without smoothing
f0_upsamp = self.l_upsamp_F0(f0)
# get the cut-off-frequency from output of CNN
cut_f = self.get_cut_f(hidden_cut_f, f0_upsamp)
# smooth the cut-off-frequency using fixed average smoothing
cut_f_smoothed = self.l_cut_f_smooth(cut_f)
# return
return context, f0_upsamp, cut_f_smoothed, hidden_cut_f
# For source module
class SourceModuleHnNSF(torch_nn.Module):
""" SourceModule for hn-nsf
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0)
sampling_rate: sampling_rate in Hz
harmonic_num: number of harmonic above F0 (default: 0)
sine_amp: amplitude of sine source signal (default: 0.1)
add_noise_std: std of additive Gaussian noise (default: 0.003)
note that amplitude of noise in unvoiced is decided
by sine_amp
voiced_threshold: threhold to set U/V given F0 (default: 0)
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
uv (batchsize, length, 1)
"""
def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0):
super(SourceModuleHnNSF, self).__init__()
self.sine_amp = sine_amp
self.noise_std = add_noise_std
# to produce sine waveforms
self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
sine_amp, add_noise_std, voiced_threshod)
# to merge source harmonics into a single excitation
self.l_linear = torch_nn.Linear(harmonic_num+1, 1)
self.l_tanh = torch_nn.Tanh()
def forward(self, x):
"""
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
"""
# source for harmonic branch
sine_wavs, uv, _ = self.l_sin_gen(x)
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
# source for noise branch, in the same shape as uv
noise = torch.randn_like(uv) * self.sine_amp / 3
return sine_merge, noise, uv
# For Filter module
class FilterModuleHnSincNSF(torch_nn.Module):
""" Filter for Hn-sinc-NSF
FilterModuleHnSincNSF(signal_size, hidden_size, sinc_order = 31,
block_num = 5, kernel_size = 3,
conv_num_in_block = 10)
signal_size: signal dimension (should be 1)
hidden_size: dimension of hidden features inside neural filter block
sinc_order: order of the sinc filter
block_num: number of neural filter blocks in harmonic branch
kernel_size: kernel size in dilated CNN
conv_num_in_block: number of d-conv1d in one neural filter block
Usage:
output = FilterModuleHnSincNSF(har_source, noi_source, cut_f, context)
har_source: source for harmonic branch (batchsize, length, dim=1)
noi_source: source for noise branch (batchsize, length, dim=1)
cut_f: cut-off-frequency of sinc filters (batchsize, length, dim=1)
context: hidden features to be added (batchsize, length, dim)
output: (batchsize, length, dim=1)
"""
def __init__(self, signal_size, hidden_size, sinc_order = 31, \
block_num = 5, kernel_size = 3, conv_num_in_block = 10):
super(FilterModuleHnSincNSF, self).__init__()
self.signal_size = signal_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.block_num = block_num
self.conv_num_in_block = conv_num_in_block
self.sinc_order = sinc_order
# filter blocks for harmonic branch
tmp = [NeuralFilterBlock(signal_size, hidden_size, \
kernel_size, conv_num_in_block) \
for x in range(self.block_num)]
self.l_har_blocks = torch_nn.ModuleList(tmp)
# filter blocks for noise branch (only one block, 5 sub-blocks)
tmp = [NeuralFilterBlock(signal_size, hidden_size, \
kernel_size, conv_num_in_block // 2) \
for x in range(1)]
self.l_noi_blocks = torch_nn.ModuleList(tmp)
# sinc filter generators and time-variant filtering layer
self.l_sinc_coef = SincFilter(self.sinc_order)
self.l_tv_filtering = TimeVarFIRFilter()
# done
def forward(self, har_component, noi_component, cond_feat, cut_f):
"""
"""
# harmonic component
for l_har_block in self.l_har_blocks:
har_component = l_har_block(har_component, cond_feat)
# noise componebt
for l_noi_block in self.l_noi_blocks:
noi_component = l_noi_block(noi_component, cond_feat)
# get sinc filter coefficients
lp_coef, hp_coef = self.l_sinc_coef(cut_f)
# time-variant filtering
har_signal = self.l_tv_filtering(har_component, lp_coef)
noi_signal = self.l_tv_filtering(noi_component, hp_coef)
# get output
return har_signal + noi_signal
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
self.input_dim = in_dim
self.output_dim = out_dim
# configurations
# amplitude of sine waveform (for each harmonic)
self.sine_amp = 0.1
# standard deviation of Gaussian noise for additive noise
self.noise_std = 0.003
# dimension of hidden features in filter blocks
self.hidden_dim = 64
# upsampling rate on input acoustic features (16kHz * 5ms = 80)
# assume input_reso has the same value
self.upsamp_rate = prj_conf.input_reso[0]
# sampling rate (Hz)
self.sampling_rate = prj_conf.wav_samp_rate
# CNN kernel size in filter blocks
self.cnn_kernel_s = 3
# number of filter blocks (for harmonic branch)
# noise branch only uses 1 block
self.filter_block_num = 5
# number of dilated CNN in each filter block
self.cnn_num_in_block = 10
# number of harmonic overtones in source
self.harmonic_num = 7
# order of sinc-windowed-FIR-filter
self.sinc_order = 31
# the three modules
self.m_cond = CondModuleHnSincNSF(self.input_dim, \
self.hidden_dim, \
self.upsamp_rate, \
cnn_kernel_s=self.cnn_kernel_s)
self.m_source = SourceModuleHnNSF(self.sampling_rate,
self.harmonic_num,
self.sine_amp, self.noise_std)
self.m_filter = FilterModuleHnSincNSF(self.output_dim, \
self.hidden_dim, \
self.sinc_order, \
self.filter_block_num, \
self.cnn_kernel_s, \
self.cnn_num_in_block)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def forward(self, x):
""" definition of forward method
Assume x (batchsize=1, length, dim)
Return output(batchsize=1, length)
"""
# assume x[:, :, -1] is F0, denormalize F0
f0 = x[:, :, -1:]
# normalize the input features data
feat = self.normalize_input(x)
# condition module
# feature-to-filter-block, f0-up-sampled, cut-off-f-for-sinc,
# hidden-feature-for-cut-off-f
cond_feat, f0_upsamped, cut_f, hid_cut_f = self.m_cond(feat, f0)
# source module
# harmonic-source, noise-source (for noise branch), uv
har_source, noi_source, uv = self.m_source(f0_upsamped)
# neural filter module (including sinc-based FIR filtering)
# output
output = self.m_filter(har_source, noi_source, cond_feat, cut_f)
if self.training:
# just in case we need to penalize the hidden feauture for
# cut-off-freq.
return [output.squeeze(-1), hid_cut_f]
else:
return output.squeeze(-1)
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
# frame shift (number of points)
self.frame_hops = [80, 40, 640]
# frame length
self.frame_lens = [320, 80, 1920]
# fft length
self.fft_n = [512, 128, 2048]
# window type in stft
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# loss function
self.loss = torch_nn.MSELoss()
# weight to penalize hidden features for cut-off-frequency
# for experiments on CMU-arctic, ATR-F009, VCTK, cutoff_w = 0.0
self.cutoff_w = 0.0
return
def _stft(self, signal, fft_p, frame_shift, frame_len):
""" wrapper of torch.stft
Remember to use onesided=True, pad_mode="constant"
Signal (batchsize, length)
Output (batchsize, fft_p/2+1, frame_num, 2)
"""
# to be compatible with different torch versions
if torch.__version__.split('.')[1].isnumeric() and \
int(torch.__version__.split('.')[1]) < 7:
return torch.stft(
signal, fft_p, frame_shift, frame_len,
window=self.win(frame_len, dtype=signal.dtype,
device=signal.device),
onesided=True, pad_mode="constant")
else:
return torch.stft(
signal, fft_p, frame_shift, frame_len,
window=self.win(frame_len, dtype=signal.dtype,
device=signal.device),
onesided=True, pad_mode="constant", return_complex=False)
def _amp(self, x):
""" _amp(stft)
x_stft: (batchsize, fft_p/2+1, frame_num, 2)
output: (batchsize, fft_p/2+1, frame_num)
output[x, y, z] = log(x_stft[x, y, z, 1]^2 + x_stft[x, y, z, 2]^2
+ floor)
"""
return torch.log(torch.norm(x, 2, -1).pow(2) + self.amp_floor)
def compute(self, outputs, target):
""" Loss().compute(outputs, target) should return
the Loss in torch.tensor format
Assume output and target as (batchsize=1, length)
"""
# hidden-feature for cut-off-frequency
cut_f = outputs[1]
# generated signal
output = outputs[0]
# convert from (batchsize=1, length, dim=1) to (1, length)
if target.ndim == 3:
target.squeeze_(-1)
# compute loss
loss = 0
for frame_shift, frame_len, fft_p in \
zip(self.frame_hops, self.frame_lens, self.fft_n):
x_stft = self._stft(output, fft_p, frame_shift, frame_len)
y_stft = self._stft(target, fft_p, frame_shift, frame_len)
x_sp_amp = self._amp(x_stft)
y_sp_amp = self._amp(y_stft)
loss += self.loss(x_sp_amp, y_sp_amp)
# A norm on cut_f, which forces sinc-cut-off-frequency
# to be close to the U/V-decided value
# Experiments on CMU-arctic, ATR-F009, and VCTK don't use it
# by setting self.cutoff_w = 0.0
# However, just in case
loss += self.cutoff_w * self.loss(cut_f, torch.zeros_like(cut_f))
return loss
if __name__ == "__main__":
print("Definition of model")
| 40,407 | 38.810837 | 78 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-10/config.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = 'cmu_all_trn'
val_set_name = 'cmu_all_val'
# for convenience
tmp = '../DATA/cmu-arctic-data-set'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = [tmp + '/scp/train.lst']
# val_file_list: list of files for validation set. It can be None
val_list = [tmp + '/scp/val.lst']
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']]
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [80, 1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.mfbsp', '.f0']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [80, 80]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [True, True]
# Similar configurations for output features
output_dirs = [[tmp + '/wav_16k_norm']]
output_dims = [1]
output_exts = ['.wav']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = 16000 * 3
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = 80 * 50
#########################################################
## Configuration for inference stage
#########################################################
# similar options to training stage
test_set_name = ['cmu_all_test_tiny']
# List of test set data
# for convenience, you may directly load test_set list here
test_list = [['slt_arctic_b0474', 'slt_arctic_b0475', 'slt_arctic_b0476',
'bdl_arctic_b0474', 'bdl_arctic_b0475', 'bdl_arctic_b0476',
'rms_arctic_b0474', 'rms_arctic_b0475', 'rms_arctic_b0476',
'clb_arctic_b0474', 'clb_arctic_b0475', 'clb_arctic_b0476']]
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']]
# Directories for output features, which are []
test_output_dirs = [[]]
| 3,430 | 32.31068 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/hn-nsf/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper.
Dataset API is replaced with NII_MergeDataSetLoader.
It is more convenient to train model on corpora stored in different directories.
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
in_trans_fns = prj_conf.test_input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.test_output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 7,819 | 35.886792 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/hn-nsf/model.py | #!/usr/bin/env python
"""
model.py for harmonic-plus-noise NSF
version: 1
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
# Building blocks (torch.nn modules + dimension operation)
#
# For blstm
class BLSTMLayer(torch_nn.Module):
""" Wrapper over BLSTM
Input tensor: (batchsize, length, dim_in)
Output tensor: (batchsize, length, dim_out)
We want to keep the length the same
"""
def __init__(self, input_dim, output_dim):
super(BLSTMLayer, self).__init__()
if output_dim % 2 != 0:
print("Output_dim of BLSTMLayer is {:d}".format(output_dim))
print("BLSTMLayer expects a layer size of even number")
sys.exit(1)
# bi-directional LSTM
self.l_blstm = torch_nn.LSTM(input_dim, output_dim // 2, \
bidirectional=True)
def forward(self, x):
# permute to (length, batchsize=1, dim)
blstm_data, _ = self.l_blstm(x.permute(1, 0, 2))
# permute it backt to (batchsize=1, length, dim)
return blstm_data.permute(1, 0, 2)
#
# 1D dilated convolution that keep the input/output length
class Conv1dKeepLength(torch_nn.Conv1d):
""" Wrapper for causal convolution
Input tensor: (batchsize, length, dim_in)
Output tensor: (batchsize, length, dim_out)
https://github.com/pytorch/pytorch/issues/1333
Note: Tanh is applied
"""
def __init__(self, input_dim, output_dim, dilation_s, kernel_s,
causal = False, stride = 1, groups=1, bias=True, \
tanh = True):
super(Conv1dKeepLength, self).__init__(
input_dim, output_dim, kernel_s, stride=stride,
padding = 0, dilation = dilation_s, groups=groups, bias=bias)
self.causal = causal
# input & output length will be the same
if self.causal:
# left pad to make the convolution causal
self.pad_le = dilation_s * (kernel_s - 1)
self.pad_ri = 0
else:
# pad on both sizes
self.pad_le = dilation_s * (kernel_s - 1) // 2
self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le
if tanh:
self.l_ac = torch_nn.Tanh()
else:
self.l_ac = torch_nn.Identity()
def forward(self, data):
# permute to (batchsize=1, dim, length)
# add one dimension (batchsize=1, dim, ADDED_DIM, length)
# pad to ADDED_DIM
# squeeze and return to (batchsize=1, dim, length)
# https://github.com/pytorch/pytorch/issues/1333
x = torch_nn_func.pad(data.permute(0, 2, 1).unsqueeze(2), \
(self.pad_le, self.pad_ri, 0, 0)).squeeze(2)
# tanh(conv1())
# permmute back to (batchsize=1, length, dim)
output = self.l_ac(super(Conv1dKeepLength, self).forward(x))
return output.permute(0, 2, 1)
#
# Moving average
class MovingAverage(Conv1dKeepLength):
""" Wrapper to define a moving average smoothing layer
Note: MovingAverage can be implemented using TimeInvFIRFilter too.
Here we define another Module dicrectly on Conv1DKeepLength
"""
def __init__(self, feature_dim, window_len, causal=False):
super(MovingAverage, self).__init__(
feature_dim, feature_dim, 1, window_len, causal,
groups=feature_dim, bias=False, tanh=False)
# set the weighting coefficients
torch_nn.init.constant_(self.weight, 1/window_len)
# turn off grad for this layer
for p in self.parameters():
p.requires_grad = False
def forward(self, data):
return super(MovingAverage, self).forward(data)
#
# FIR filter layer
class TimeInvFIRFilter(Conv1dKeepLength):
""" Wrapper to define a FIR filter over Conv1d
Note: FIR Filtering is conducted on each dimension (channel)
independently: groups=channel_num in conv1d
"""
def __init__(self, feature_dim, filter_coef,
causal=True, flag_train=False):
""" __init__(self, feature_dim, filter_coef,
causal=True, flag_train=False)
feature_dim: dimension of input data
filter_coef: 1-D tensor of filter coefficients
causal: FIR is causal or not (default: true)
flag_train: whether train the filter coefficients (default: false)
Input data: (batchsize, length, feature_dim)
Output data: (batchsize, length, feature_dim)
"""
super(TimeInvFIRFilter, self).__init__(
feature_dim, feature_dim, 1, filter_coef.shape[0], causal,
groups=feature_dim, bias=False, tanh=False)
if filter_coef.ndim == 1:
# initialize weight using provided filter_coef
with torch.no_grad():
tmp_coef = torch.zeros([feature_dim, 1,
filter_coef.shape[0]])
tmp_coef[:, 0, :] = filter_coef
tmp_coef = torch.flip(tmp_coef, dims=[2])
self.weight = torch.nn.Parameter(tmp_coef,
requires_grad=flag_train)
else:
print("TimeInvFIRFilter expects filter_coef to be 1-D tensor")
print("Please implement the code in __init__ if necessary")
sys.exit(1)
def forward(self, data):
return super(TimeInvFIRFilter, self).forward(data)
#
# Up sampling
class UpSampleLayer(torch_nn.Module):
""" Wrapper over up-sampling
Input tensor: (batchsize, length, dim)
Ouput tensor: (batchsize, length * up-sampling_factor, dim)
"""
def __init__(self, feature_dim, up_sampling_factor, smoothing=False):
super(UpSampleLayer, self).__init__()
# wrap a up_sampling layer
self.scale_factor = up_sampling_factor
self.l_upsamp = torch_nn.Upsample(scale_factor=self.scale_factor)
if smoothing:
self.l_ave1 = MovingAverage(feature_dim, self.scale_factor)
self.l_ave2 = MovingAverage(feature_dim, self.scale_factor)
else:
self.l_ave1 = torch_nn.Identity()
self.l_ave2 = torch_nn.Identity()
return
def forward(self, x):
# permute to (batchsize=1, dim, length)
up_sampled_data = self.l_upsamp(x.permute(0, 2, 1))
# permute it backt to (batchsize=1, length, dim)
# and do two moving average
return self.l_ave1(self.l_ave2(up_sampled_data.permute(0, 2, 1)))
# Neural filter block (1 block)
class NeuralFilterBlock(torch_nn.Module):
""" Wrapper over a single filter block
"""
def __init__(self, signal_size, hidden_size, \
kernel_size=3, conv_num=10):
super(NeuralFilterBlock, self).__init__()
self.signal_size = signal_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.conv_num = conv_num
self.dilation_size = [np.power(2, x) for x in np.arange(conv_num)]
# ff layer to expand dimension
self.l_ff_1 = torch_nn.Linear(signal_size, hidden_size, \
bias=False)
self.l_ff_1_tanh = torch_nn.Tanh()
# dilated conv layers
tmp = [Conv1dKeepLength(hidden_size, hidden_size, x, \
kernel_size, causal=True, bias=False) \
for x in self.dilation_size]
self.l_convs = torch_nn.ModuleList(tmp)
# ff layer to de-expand dimension
self.l_ff_2 = torch_nn.Linear(hidden_size, hidden_size//4,
bias=False)
self.l_ff_2_tanh = torch_nn.Tanh()
self.l_ff_3 = torch_nn.Linear(hidden_size//4, signal_size,
bias=False)
self.l_ff_3_tanh = torch_nn.Tanh()
# a simple scale
self.scale = torch_nn.Parameter(torch.tensor([0.1]),
requires_grad=False)
return
def forward(self, signal, context):
"""
Assume: signal (batchsize=1, length, signal_size)
context (batchsize=1, length, hidden_size)
Output: (batchsize=1, length, signal_size)
"""
# expand dimension
tmp_hidden = self.l_ff_1_tanh(self.l_ff_1(signal))
# loop over dilated convs
# output of a d-conv is input + context + d-conv(input)
for l_conv in self.l_convs:
tmp_hidden = tmp_hidden + l_conv(tmp_hidden) + context
# to be consistent with legacy configuration in CURRENNT
tmp_hidden = tmp_hidden * self.scale
# compress the dimesion and skip-add
tmp_hidden = self.l_ff_2_tanh(self.l_ff_2(tmp_hidden))
tmp_hidden = self.l_ff_3_tanh(self.l_ff_3(tmp_hidden))
output_signal = tmp_hidden + signal
return output_signal
class SineGen(torch_nn.Module):
""" Definition of sine generator
SineGen(samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False)
samp_rate: sampling rate in Hz
harmonic_num: number of harmonic overtones (default 0)
sine_amp: amplitude of sine-wavefrom (default 0.1)
noise_std: std of Gaussian noise (default 0.003)
voiced_thoreshold: F0 threshold for U/V classification (default 0)
flag_for_pulse: this SinGen is used inside PulseGen (default False)
Note: when flag_for_pulse is True, the first time step of a voiced
segment is always sin(np.pi) or cos(0)
"""
def __init__(self, samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False):
super(SineGen, self).__init__()
self.sine_amp = sine_amp
self.noise_std = noise_std
self.harmonic_num = harmonic_num
self.dim = self.harmonic_num + 1
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
self.flag_for_pulse = flag_for_pulse
def _f02uv(self, f0):
# generate uv signal
uv = torch.ones_like(f0)
uv = uv * (f0 > self.voiced_threshold)
return uv
def _f02sine(self, f0_values):
""" f0_values: (batchsize, length, dim)
where dim indicates fundamental tone and overtones
"""
# convert to F0 in rad. The interger part n can be ignored
# because 2 * np.pi * n doesn't affect phase
rad_values = (f0_values / self.sampling_rate) % 1
# initial phase noise (no noise for fundamental component)
rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2],\
device = f0_values.device)
rand_ini[:, 0] = 0
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
if not self.flag_for_pulse:
# for normal case
# To prevent torch.cumsum numerical overflow,
# it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
# Buffer tmp_over_one_idx indicates the time step to add -1.
# This will not change F0 of sine because (x-1) * 2*pi = x *2*pi
tmp_over_one = torch.cumsum(rad_values, 1) % 1
tmp_over_one_idx = (tmp_over_one[:, 1:, :] -
tmp_over_one[:, :-1, :]) < 0
cumsum_shift = torch.zeros_like(rad_values)
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) \
* 2 * np.pi)
else:
# If necessary, make sure that the first time step of every
# voiced segments is sin(pi) or cos(0)
# This is used for pulse-train generation
# identify the last time step in unvoiced segments
uv = self._f02uv(f0_values)
uv_1 = torch.roll(uv, shifts=-1, dims=1)
uv_1[:, -1, :] = 1
u_loc = (uv < 1) * (uv_1 > 0)
# get the instantanouse phase
tmp_cumsum = torch.cumsum(rad_values, dim=1)
# different batch needs to be processed differently
for idx in range(f0_values.shape[0]):
temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
# stores the accumulation of i.phase within
# each voiced segments
tmp_cumsum[idx, :, :] = 0
tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
# rad_values - tmp_cumsum: remove the accumulation of i.phase
# within the previous voiced segment.
i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
# get the sines
sines = torch.cos(i_phase * 2 * np.pi)
return sines
def forward(self, f0):
""" sine_tensor, uv = forward(f0)
input F0: tensor(batchsize=1, length, dim=1)
f0 for unvoiced steps should be 0
output sine_tensor: tensor(batchsize=1, length, dim)
output uv: tensor(batchsize=1, length, 1)
"""
with torch.no_grad():
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \
device=f0.device)
# fundamental component
f0_buf[:, :, 0] = f0[:, :, 0]
for idx in np.arange(self.harmonic_num):
# idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
f0_buf[:, :, idx+1] = f0_buf[:, :, 0] * (idx+2)
# generate sine waveforms
sine_waves = self._f02sine(f0_buf) * self.sine_amp
# generate uv signal
#uv = torch.ones(f0.shape)
#uv = uv * (f0 > self.voiced_threshold)
uv = self._f02uv(f0)
# noise: for unvoiced should be similar to sine_amp
# std = self.sine_amp/3 -> max value ~ self.sine_amp
#. for voiced regions is self.noise_std
noise_amp = uv * self.noise_std + (1-uv) * self.sine_amp / 3
noise = noise_amp * torch.randn_like(sine_waves)
# first: set the unvoiced part to 0 by uv
# then: additive noise
sine_waves = sine_waves * uv + noise
return sine_waves, uv, noise
#####
## Model definition
##
## For condition module only provide Spectral feature to Filter block
class CondModule(torch_nn.Module):
""" Conditiona module
Upsample and transform input features
CondModule(input_dimension, output_dimension, up_sample_rate,
blstm_dimension = 64, cnn_kernel_size = 3)
Spec, F0 = CondModule(features, F0)
Both input features should be frame-level features
If x doesn't contain F0, just ignore the returned F0
"""
def __init__(self, input_dim, output_dim, up_sample, \
blstm_s = 64, cnn_kernel_s = 3):
super(CondModule, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.up_sample = up_sample
self.blstm_s = blstm_s
self.cnn_kernel_s = cnn_kernel_s
self.l_blstm = BLSTMLayer(input_dim, self.blstm_s)
self.l_conv1d = Conv1dKeepLength(self.blstm_s, output_dim, 1, \
self.cnn_kernel_s)
self.l_upsamp = UpSampleLayer(self.output_dim, self.up_sample,
True)
# Upsampling for F0: don't smooth up-sampled F0
self.l_upsamp_F0 = UpSampleLayer(1, self.up_sample, False)
def forward(self, feature, f0):
""" spec, f0 = forward(self, feature, f0)
feature: (batchsize, length, dim)
f0: (batchsize, length, dim=1), which should be F0 at frame-level
spec: (batchsize, length, self.output_dim), at wave-level
f0: (batchsize, length, 1), at wave-level
"""
spec = self.l_upsamp(self.l_conv1d(self.l_blstm(feature)))
f0 = self.l_upsamp_F0(f0)
return spec, f0
# For source module
class SourceModuleHnNSF(torch_nn.Module):
""" SourceModule for hn-nsf
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0)
sampling_rate: sampling_rate in Hz
harmonic_num: number of harmonic above F0 (default: 0)
sine_amp: amplitude of sine source signal (default: 0.1)
add_noise_std: std of additive Gaussian noise (default: 0.003)
note that amplitude of noise in unvoiced is decided
by sine_amp
voiced_threshold: threhold to set U/V given F0 (default: 0)
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
uv (batchsize, length, 1)
"""
def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0):
super(SourceModuleHnNSF, self).__init__()
self.sine_amp = sine_amp
self.noise_std = add_noise_std
# to produce sine waveforms
self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
sine_amp, add_noise_std, voiced_threshod)
# to merge source harmonics into a single excitation
self.l_linear = torch_nn.Linear(harmonic_num+1, 1)
self.l_tanh = torch_nn.Tanh()
def forward(self, x):
"""
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
"""
# source for harmonic branch
sine_wavs, uv, _ = self.l_sin_gen(x)
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
# source for noise branch, in the same shape as uv
noise = torch.randn_like(uv) * self.sine_amp / 3
return sine_merge, noise, uv
# For Filter module
class FilterModuleHnNSF(torch_nn.Module):
""" Filter for Hn-NSF
FilterModuleHnNSF(signal_size, hidden_size, fir_coef,
block_num = 5,
kernel_size = 3, conv_num_in_block = 10)
signal_size: signal dimension (should be 1)
hidden_size: dimension of hidden features inside neural filter block
fir_coef: list of FIR filter coeffs,
(low_pass_1, low_pass_2, high_pass_1, high_pass_2)
block_num: number of neural filter blocks in harmonic branch
kernel_size: kernel size in dilated CNN
conv_num_in_block: number of d-conv1d in one neural filter block
output = FilterModuleHnNSF(harmonic_source, noise_source, uv, context)
harmonic_source (batchsize, length, dim=1)
noise_source (batchsize, length, dim=1)
context (batchsize, length, dim)
uv (batchsize, length, dim)
output: (batchsize, length, dim=1)
"""
def __init__(self, signal_size, hidden_size, filter_coef, \
block_num = 5, kernel_size = 3, conv_num_in_block = 10):
super(FilterModuleHnNSF, self).__init__()
self.signal_size = signal_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.block_num = block_num
self.conv_num_in_block = conv_num_in_block
self.filter_coef = filter_coef
# filter blocks for harmonic branch
tmp = [NeuralFilterBlock(signal_size, hidden_size, \
kernel_size, conv_num_in_block) \
for x in range(self.block_num)]
self.l_har_blocks = torch_nn.ModuleList(tmp)
# filter blocks for noise branch (only one block, 5 sub-blocks)
tmp = [NeuralFilterBlock(signal_size, hidden_size, \
kernel_size, conv_num_in_block // 2) \
for x in range(1)]
self.l_noi_blocks = torch_nn.ModuleList(tmp)
# FIR filter groups
# lp_v: filter for voiced region, harmonic component
# lp_u: filter for unvoiced region, harmonic component
# hp_v: filter for voiced region, noise component
# hp_u: filter for unvoiced region, noise component
self.l_fir_lp_v = TimeInvFIRFilter(signal_size, filter_coef[0])
self.l_fir_lp_u = TimeInvFIRFilter(signal_size, filter_coef[1])
self.l_fir_hp_v = TimeInvFIRFilter(signal_size, filter_coef[2])
self.l_fir_hp_u = TimeInvFIRFilter(signal_size, filter_coef[3])
def forward(self, har_component, noi_component, condition_feat, uv):
"""
"""
# harmonic component
for l_har_block in self.l_har_blocks:
har_component = l_har_block(har_component, condition_feat)
# noise componebt
for l_noi_block in self.l_noi_blocks:
noi_component = l_noi_block(noi_component, condition_feat)
# harmonic + noise in time-domain
# assume uv is {0, 1}, produce a weight vector for voiced/unvoiced
# sigmoid is used to avoid {0, 1}, and uv is scaled to {-5, 5}
w_voi = torch.sigmoid((uv - 0.5) * 10)
w_unv = 1.0 - w_voi
har_v = self.l_fir_lp_v(har_component)
har_u = self.l_fir_lp_u(har_component)
noi_v = self.l_fir_hp_v(noi_component)
noi_u = self.l_fir_hp_u(noi_component)
output = (har_v + noi_v) * w_voi + (har_u + noi_u) * w_unv
return output
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
self.input_dim = in_dim
self.output_dim = out_dim
# configurations
# amplitude of sine waveform (for each harmonic)
self.sine_amp = 0.1
# standard deviation of Gaussian noise for additive noise
self.noise_std = 0.003
# dimension of hidden features in filter blocks
self.hidden_dim = 64
# upsampling rate on input acoustic features (16kHz * 5ms = 80)
# assume input_reso has the same value
self.upsamp_rate = prj_conf.input_reso[0]
# sampling rate (Hz)
self.sampling_rate = prj_conf.wav_samp_rate
# CNN kernel size in filter blocks
self.cnn_kernel_size = 3
# number of filter blocks (for harmonic branch)
# noise branch only uses 1 block
self.filter_block_num = 5
# number of dilated CNN in each filter block
self.cnn_num_in_block = 10
# number of harmonic overtone in source
self.harmonic_num = 7
# fixed filter coefficients
# computed using PM algorithm
# (tool: http://t-filter.engineerjs.com)
#
# low-pass for harmonic-component in voiced region
# 16kHz, pass-band 0-5K, gain 1, ripple 5dB,
# stop-band 7-8k, gain 0, ripple -40dB)
lp_v = [0.08538414199291068, 0.04920229475534168,
-0.1470178606967731, 0.24737764593887432,
0.7103067853166558, 0.24737764593887432,
-0.1470178606967731, 0.04920229475534168,
0.08538414199291068]
# low-pass for harmonic-copmonent in unvoiced region
# 16kHz, pass-band 0-1K, gain 1, ripple 5dB,
# stop-band 3-8k, gain 0, ripple -40dB)
lp_u = [0.00936455546502, 0.0416254862901, 0.0878313219556,
0.146086321198, 0.192602581136, 0.211221591449,
0.192602581136, 0.146086321198, 0.0878313219556,
0.0416254862901, 0.00936455546502]
#
# high-pass for noise-component in voiced region
# 16kHz, pass-band 7-8K, gain 1, ripple 5dB,
# stop-band 0-5k, gain 0, ripple -40dB)
hp_v = [-0.00936455546502148, 0.04162548629009957,
-0.08783132195564508, 0.1460863211980122,
-0.19260258113649556, 0.21122159144894015,
-0.19260258113649556, 0.1460863211980122,
-0.08783132195564508, 0.04162548629009957,
-0.00936455546502148]
#
# high-pass for noise-component in unvoiced region
# 16kHz, pass-band 3-8K, gain 1, ripple 5dB,
# stop-band 0-1k, gain 0, ripple -40dB)
hp_u = [0.0853841419929, -0.0492022947553, -0.147017860697,
-0.247377645939, 0.710306785317, -0.247377645939,
-0.147017860697, -0.0492022947553, 0.0853841419929]
self.fir_filters = [torch.tensor(lp_v), torch.tensor(lp_u),
torch.tensor(hp_v), torch.tensor(hp_u)]
# the three modules
self.m_condition = CondModule(self.input_dim, self.hidden_dim, \
self.upsamp_rate, \
cnn_kernel_s = self.cnn_kernel_size)
self.m_source = SourceModuleHnNSF(self.sampling_rate,
self.harmonic_num,
self.sine_amp, self.noise_std)
self.m_filter = FilterModuleHnNSF(self.output_dim,
self.hidden_dim,\
self.fir_filters,
self.filter_block_num, \
self.cnn_kernel_size, \
self.cnn_num_in_block)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def forward(self, x):
""" definition of forward method
Assume x (batchsize=1, length, dim)
Return output(batchsize=1, length)
"""
# assume x[:, :, -1] is F0, denormalize F0
f0 = x[:, :, -1:]
# normalize the data
feat = self.normalize_input(x)
# condition module
# features_for_filter_block, up-sampled F0
cond_feat, f0_upsamped = self.m_condition(feat, f0)
# source module
# harmonic-source, noise-source (for noise branch), uv flag
har_source, noi_source, uv = self.m_source(f0_upsamped)
# filter module (including FIR filtering)
# output signal
output = self.m_filter(har_source, noi_source, cond_feat, uv)
# output
return output.squeeze(-1)
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
# frame shift (number of points)
self.frame_hops = [80, 40, 640]
# frame length
self.frame_lens = [320, 80, 1920]
# FFT length
self.fft_n = [512, 128, 2048]
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# loss
self.loss = torch_nn.MSELoss()
return
def _stft(self, signal, fft_p, frame_shift, frame_len):
""" wrapper of torch.stft
Remember to use onesided=True, pad_mode="constant"
Signal (batchsize, length)
Output (batchsize, fft_p/2+1, frame_num, 2)
"""
# to be compatible with different torch versions
if torch.__version__.split('.')[1].isnumeric() and \
int(torch.__version__.split('.')[1]) < 7:
return torch.stft(
signal, fft_p, frame_shift, frame_len,
window=self.win(frame_len, dtype=signal.dtype,
device=signal.device),
onesided=True, pad_mode="constant")
else:
return torch.stft(
signal, fft_p, frame_shift, frame_len,
window=self.win(frame_len, dtype=signal.dtype,
device=signal.device),
onesided=True, pad_mode="constant", return_complex=False)
def _amp(self, x):
""" _amp(stft)
x_stft: (batchsize, fft_p/2+1, frame_num, 2)
output: (batchsize, fft_p/2+1, frame_num)
output[x, y, z] = log(x_stft[x, y, z, 1]^2 + x_stft[x, y, z, 2]^2
+ floor)
"""
return torch.log(torch.norm(x, 2, -1).pow(2) + self.amp_floor)
def compute(self, output, target):
""" Loss().compute(output, target) should return
the Loss in torch.tensor format
Assume output and target as (batchsize=1, length)
"""
# convert from (batchsize=1, length, dim=1) to (1, length)
if target.ndim == 3:
target.squeeze_(-1)
# compute loss
loss = 0
for frame_shift, frame_len, fft_p in \
zip(self.frame_hops, self.frame_lens, self.fft_n):
x_stft = self._stft(output, fft_p, frame_shift, frame_len)
y_stft = self._stft(target, fft_p, frame_shift, frame_len)
x_sp_amp = self._amp(x_stft)
y_sp_amp = self._amp(y_stft)
loss += self.loss(x_sp_amp, y_sp_amp)
return loss
if __name__ == "__main__":
print("Definition of model")
| 32,145 | 39.384422 | 78 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/hn-nsf/config.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = 'cmu_all_trn'
val_set_name = 'cmu_all_val'
# for convenience
tmp = '../DATA/cmu-arctic-data-set'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = [tmp + '/scp/train.lst']
# val_file_list: list of files for validation set. It can be None
val_list = [tmp + '/scp/val.lst']
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']]
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [80, 1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.mfbsp', '.f0']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [80, 80]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [True, True]
# Similar configurations for output features
output_dirs = [[tmp + '/wav_16k_norm']]
output_dims = [1]
output_exts = ['.wav']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = 16000 * 3
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = 80 * 50
#########################################################
## Configuration for inference stage
#########################################################
# similar options to training stage
test_set_name = ['cmu_all_test_tiny']
# List of test set data
# for convenience, you may directly load test_set list here
test_list = [['slt_arctic_b0474', 'slt_arctic_b0475', 'slt_arctic_b0476',
'bdl_arctic_b0474', 'bdl_arctic_b0475', 'bdl_arctic_b0476',
'rms_arctic_b0474', 'rms_arctic_b0475', 'rms_arctic_b0476',
'clb_arctic_b0474', 'clb_arctic_b0475', 'clb_arctic_b0476']]
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']]
# Directories for output features, which are []
test_output_dirs = [[]]
| 3,430 | 32.31068 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/cyc-noise-nsf-4/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper.
Dataset API is replaced with NII_MergeDataSetLoader.
It is more convenient to train model on corpora stored in different directories.
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
in_trans_fns = prj_conf.test_input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.test_output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 7,819 | 35.886792 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/cyc-noise-nsf-4/model.py | #!/usr/bin/env python
"""
model.py for Cyclic-noise-NSF
version: 4
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
# Building blocks (torch.nn modules + dimension operation)
#
# For blstm
class BLSTMLayer(torch_nn.Module):
""" Wrapper over BLSTM
Input tensor: (batchsize, length, dim_in)
Output tensor: (batchsize, length, dim_out)
We want to keep the length the same
"""
def __init__(self, input_dim, output_dim):
super(BLSTMLayer, self).__init__()
if output_dim % 2 != 0:
print("Output_dim of BLSTMLayer is {:d}".format(output_dim))
print("BLSTMLayer expects a layer size of even number")
sys.exit(1)
# bi-directional LSTM
self.l_blstm = torch_nn.LSTM(input_dim, output_dim // 2, \
bidirectional=True)
def forward(self, x):
# permute to (length, batchsize=1, dim)
blstm_data, _ = self.l_blstm(x.permute(1, 0, 2))
# permute it backt to (batchsize=1, length, dim)
return blstm_data.permute(1, 0, 2)
#
# 1D dilated convolution that keep the input/output length
class Conv1dKeepLength(torch_nn.Conv1d):
""" Wrapper for causal convolution
Input tensor: (batchsize, length, dim_in)
Output tensor: (batchsize, length, dim_out)
https://github.com/pytorch/pytorch/issues/1333
Note: Tanh is optional
"""
def __init__(self, input_dim, output_dim, dilation_s, kernel_s,
causal = False, stride = 1, groups=1, bias=True, \
tanh = True, pad_mode='constant'):
super(Conv1dKeepLength, self).__init__(
input_dim, output_dim, kernel_s, stride=stride,
padding = 0, dilation = dilation_s, groups=groups, bias=bias)
self.pad_mode = pad_mode
self.causal = causal
# input & output length will be the same
if self.causal:
# left pad to make the convolution causal
self.pad_le = dilation_s * (kernel_s - 1)
self.pad_ri = 0
else:
# pad on both sizes
self.pad_le = dilation_s * (kernel_s - 1) // 2
self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le
if tanh:
self.l_ac = torch_nn.Tanh()
else:
self.l_ac = torch_nn.Identity()
def forward(self, data):
# permute to (batchsize=1, dim, length)
# add one dimension (batchsize=1, dim, ADDED_DIM, length)
# pad to ADDED_DIM
# squeeze and return to (batchsize=1, dim, length)
# https://github.com/pytorch/pytorch/issues/1333
x = torch_nn_func.pad(data.permute(0, 2, 1).unsqueeze(2), \
(self.pad_le, self.pad_ri,0,0), \
mode = self.pad_mode).squeeze(2)
# tanh(conv1())
# permmute back to (batchsize=1, length, dim)
output = self.l_ac(super(Conv1dKeepLength, self).forward(x))
return output.permute(0, 2, 1)
#
# Moving average
class MovingAverage(Conv1dKeepLength):
""" Wrapper to define a moving average smoothing layer
Note: MovingAverage can be implemented using TimeInvFIRFilter too.
Here we define another Module dicrectly on Conv1DKeepLength
"""
def __init__(self, feature_dim, window_len, causal=False, \
pad_mode='replicate'):
super(MovingAverage, self).__init__(
feature_dim, feature_dim, 1, window_len, causal,
groups=feature_dim, bias=False, tanh=False, \
pad_mode=pad_mode)
# set the weighting coefficients
torch_nn.init.constant_(self.weight, 1/window_len)
# turn off grad for this layer
for p in self.parameters():
p.requires_grad = False
def forward(self, data):
return super(MovingAverage, self).forward(data)
#
# FIR filter layer
class TimeInvFIRFilter(Conv1dKeepLength):
""" Wrapper to define a FIR filter over Conv1d
FIR Filtering is conducted on each dimension (channel)
independently, i.e., groups=channel_num in conv1d
"""
def __init__(self, feature_dim, filter_coef,
causal=True, flag_trn=False):
""" __init__(self, feature_dim, filter_coef,
causal=True, flag_trn=False)
feature_dim: dimension of input data
filter_coef: 1-D tensor of filter coefficients
causal: FIR is causal or not (default: true)
flag_trn: whether learn the filter coefficients (default false)
Input data: (batchsize=1, length, feature_dim)
Output data: (batchsize=1, length, feature_dim)
"""
super(TimeInvFIRFilter, self).__init__(
feature_dim, feature_dim, 1, filter_coef.shape[0], causal,
groups=feature_dim, bias=False, tanh=False)
if filter_coef.ndim == 1:
# initialize weight using provided filter_coef
with torch.no_grad():
tmp_coef = torch.zeros([feature_dim, 1,
filter_coef.shape[0]])
tmp_coef[:, 0, :] = filter_coef
tmp_coef = torch.flip(tmp_coef, dims=[2])
self.weight = torch.nn.Parameter(tmp_coef,
requires_grad=flag_trn)
else:
print("TimeInvFIRFilter expects filter_coef as 1-D tensor")
print("Please implement the code in __init__ if necessary")
sys.exit(1)
def forward(self, data):
return super(TimeInvFIRFilter, self).forward(data)
class TimeVarFIRFilter(torch_nn.Module):
""" TimeVarFIRFilter
Given sequences of filter coefficients and a signal, do filtering
Filter coefs: (batchsize, signal_length, filter_order = K)
Signal: (batchsize, signal_length, 1)
For batch 0:
For n in [1, sequence_length):
output(0, n, 1) = \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k)
Note: filter coef (0, n, :) is only used to compute the output
at (0, n, 1)
"""
def __init__(self):
super(TimeVarFIRFilter, self).__init__()
def forward(self, signal, f_coef):
"""
Filter coefs: (batchsize=1, signal_length, filter_order = K)
Signal: (batchsize=1, signal_length, 1)
Output: (batchsize=1, signal_length, 1)
For n in [1, sequence_length):
output(0, n, 1) = \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k)
This method may be not efficient:
Suppose signal [x_1, ..., x_N], filter [a_1, ..., a_K]
output [y_1, y_2, y_3, ..., y_N, *, * ... *]
= a_1 * [x_1, x_2, x_3, ..., x_N, 0, ..., 0]
+ a_2 * [ 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0]
+ a_3 * [ 0, 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0]
"""
signal_l = signal.shape[1]
order_k = f_coef.shape[-1]
# pad to (batchsize=1, signal_length + filter_order-1, dim)
padded_signal = torch_nn_func.pad(signal, (0, 0, 0, order_k - 1))
y = torch.zeros_like(signal)
# roll and weighted sum, only take [0:signal_length]
for k in range(order_k):
y += torch.roll(padded_signal, k, dims=1)[:, 0:signal_l, :] \
* f_coef[:, :, k:k+1]
# done
return y
class SignalsConv1d(torch_nn.Module):
""" Filtering input signal with time invariant filter
Note: FIRFilter conducted filtering given fixed FIR weight
SignalsConv1d convolves two signals
Note: this is based on torch.nn.functional.conv1d
"""
def __init__(self):
super(SignalsConv1d, self).__init__()
def forward(self, signal, system_ir):
""" output = forward(signal, system_ir)
signal: (batchsize, length1, dim)
system_ir: (length2, dim)
output: (batchsize, length1, dim)
"""
if signal.shape[-1] != system_ir.shape[-1]:
print("Error: SignalsConv1d expects shape:")
print("signal (batchsize, length1, dim)")
print("system_id (batchsize, length2, dim)")
print("But received signal: {:s}".format(str(signal.shape)))
print(" system_ir: {:s}".format(str(system_ir.shape)))
sys.exit(1)
padding_length = system_ir.shape[0] - 1
groups = signal.shape[-1]
# pad signal on the left
signal_pad = torch_nn_func.pad(signal.permute(0, 2, 1),\
(padding_length, 0))
# prepare system impulse response as (dim, 1, length2)
# also flip the impulse response
ir = torch.flip(system_ir.unsqueeze(1).permute(2, 1, 0), \
dims=[2])
# convolute
output = torch_nn_func.conv1d(signal_pad, ir, groups=groups)
return output.permute(0, 2, 1)
# Sinc filter generator
class SincFilter(torch_nn.Module):
""" SincFilter
Given the cut-off-frequency, produce the low-pass and high-pass
windowed-sinc-filters.
If input cut-off-frequency is (batchsize=1, signal_length, 1),
output filter coef is (batchsize=1, signal_length, filter_order).
For each time step in [1, signal_length), we calculate one
filter for low-pass sinc filter and another for high-pass filter.
Example:
import scipy
import scipy.signal
import numpy as np
filter_order = 31
cut_f = 0.2
sinc_layer = SincFilter(filter_order)
lp_coef, hp_coef = sinc_layer(torch.ones(1, 10, 1) * cut_f)
w, h1 = scipy.signal.freqz(lp_coef[0, 0, :].numpy(), [1])
w, h2 = scipy.signal.freqz(hp_coef[0, 0, :].numpy(), [1])
plt.plot(w, 20*np.log10(np.abs(h1)))
plt.plot(w, 20*np.log10(np.abs(h2)))
plt.plot([cut_f * np.pi, cut_f * np.pi], [-100, 0])
"""
def __init__(self, filter_order):
super(SincFilter, self).__init__()
# Make the filter oder an odd number
# [-(M-1)/2, ... 0, (M-1)/2]
#
self.half_k = (filter_order - 1) // 2
self.order = self.half_k * 2 +1
def hamming_w(self, n_index):
""" prepare hamming window for each time step
n_index (batchsize=1, signal_length, filter_order)
For each step, n_index.shape is [-(M-1)/2, ... 0, (M-1)/2]
where,
n_index[0, 0, :] = [-(M-1)/2, ... 0, (M-1)/2]
n_index[0, 1, :] = [-(M-1)/2, ... 0, (M-1)/2]
...
output (batchsize=1, signal_length, filter_order)
output[0, 0, :] = hamming_window
output[0, 1, :] = hamming_window
...
"""
# Hamming window
return 0.54 + 0.46 * torch.cos(2 * np.pi * n_index / self.order)
def sinc(self, x):
""" Normalized sinc-filter sin( pi * x) / pi * x
https://en.wikipedia.org/wiki/Sinc_function
Assume x (batchsize, signal_length, filter_order) and
x[0, 0, :] = [-half_order, - half_order+1, ... 0 ..., half_order]
x[:, :, self.half_order] -> time index = 0, sinc(0)=1
"""
y = torch.zeros_like(x)
y[:,:,0:self.half_k]=torch.sin(np.pi * x[:, :, 0:self.half_k]) \
/ (np.pi * x[:, :, 0:self.half_k])
y[:,:,self.half_k+1:]=torch.sin(np.pi * x[:, :, self.half_k+1:])\
/ (np.pi * x[:, :, self.half_k+1:])
y[:,:,self.half_k] = 1
return y
def forward(self, cut_f):
""" lp_coef, hp_coef = forward(self, cut_f)
cut-off frequency cut_f (batchsize=1, length, dim = 1)
lp_coef: low-pass filter coefs (batchsize, length, filter_order)
hp_coef: high-pass filter coefs (batchsize, length, filter_order)
"""
# create the filter order index
with torch.no_grad():
# [- (M-1) / 2, ..., 0, ..., (M-1)/2]
lp_coef = torch.arange(-self.half_k, self.half_k + 1,
device=cut_f.device)
# [[[- (M-1) / 2, ..., 0, ..., (M-1)/2],
# [- (M-1) / 2, ..., 0, ..., (M-1)/2],
# ...
# ],
# [[- (M-1) / 2, ..., 0, ..., (M-1)/2],
# [- (M-1) / 2, ..., 0, ..., (M-1)/2],
# ...
# ]]
lp_coef = lp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1)
hp_coef = torch.arange(-self.half_k, self.half_k + 1,
device=cut_f.device)
hp_coef = hp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1)
# temporary buffer of [-1^n] for gain norm in hp_coef
tmp_one = torch.pow(-1, hp_coef)
# unnormalized filter coefs with hamming window
lp_coef = cut_f * self.sinc(cut_f * lp_coef) \
* self.hamming_w(lp_coef)
hp_coef = (self.sinc(hp_coef) \
- cut_f * self.sinc(cut_f * hp_coef)) \
* self.hamming_w(hp_coef)
# normalize the coef to make gain at 0/pi is 0 dB
# sum_n lp_coef[n]
lp_coef_norm = torch.sum(lp_coef, axis=2).unsqueeze(-1)
# sum_n hp_coef[n] * -1^n
hp_coef_norm = torch.sum(hp_coef * tmp_one, axis=2).unsqueeze(-1)
lp_coef = lp_coef / lp_coef_norm
hp_coef = hp_coef / hp_coef_norm
# return normed coef
return lp_coef, hp_coef
#
# Up sampling
class UpSampleLayer(torch_nn.Module):
""" Wrapper over up-sampling
Input tensor: (batchsize, length, dim)
Ouput tensor: (batchsize, length * up-sampling_factor, dim)
"""
def __init__(self, feature_dim, up_sampling_factor, smoothing=False):
super(UpSampleLayer, self).__init__()
# wrap a up_sampling layer
self.scale_factor = up_sampling_factor
self.l_upsamp = torch_nn.Upsample(scale_factor=self.scale_factor)
if smoothing:
self.l_ave1 = MovingAverage(feature_dim, self.scale_factor)
self.l_ave2 = MovingAverage(feature_dim, self.scale_factor)
else:
self.l_ave1 = torch_nn.Identity()
self.l_ave2 = torch_nn.Identity()
return
def forward(self, x):
# permute to (batchsize=1, dim, length)
up_sampled_data = self.l_upsamp(x.permute(0, 2, 1))
# permute it backt to (batchsize=1, length, dim)
# and do two moving average
return self.l_ave1(self.l_ave2(up_sampled_data.permute(0, 2, 1)))
# Neural filter block (1 block)
class NeuralFilterBlock(torch_nn.Module):
""" Wrapper over a single filter block
"""
def __init__(self, signal_size, hidden_size, for_har_component=True,\
kernel_size=3, conv_num=10):
super(NeuralFilterBlock, self).__init__()
self.signal_size = signal_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.conv_num = conv_num
self.dilation_s = [np.power(2, x) for x in np.arange(conv_num)]
self.for_har = for_har_component
# ff layer to expand dimension
self.l_ff_1 = torch_nn.Linear(signal_size, hidden_size)
self.l_ff_1_tanh = torch_nn.Tanh()
# dilated conv layers
tmp = [Conv1dKeepLength(hidden_size, hidden_size, x, \
kernel_size, causal=True, bias=True) \
for x in self.dilation_s]
self.l_convs = torch_nn.ModuleList(tmp)
# ff layer to de-expand dimension
self.l_ff_2 = torch_nn.Linear(hidden_size, hidden_size//4)
self.l_ff_2_tanh = torch_nn.Tanh()
self.l_ff_3 = torch_nn.Linear(hidden_size//4, signal_size)
self.l_ff_3_tanh = torch_nn.Tanh()
# a simple scale
self.scale = torch_nn.Parameter(torch.tensor([0.1]),
requires_grad=False)
return
def forward(self, signal, context):
"""
Assume: signal (batchsize=1, length, signal_size)
context (batchsize=1, length, hidden_size)
Output: (batchsize=1, length, signal_size)
"""
# expand dimension
tmp_hidden = self.l_ff_1_tanh(self.l_ff_1(signal))
# loop over dilated convs
# output of a d-conv is input + context + d-conv(input)
for l_conv in self.l_convs:
tmp_hidden = tmp_hidden + l_conv(tmp_hidden) + context
# to be consistent with legacy configuration in CURRENNT
tmp_hidden = tmp_hidden * self.scale
# compress the dimesion and skip-add
tmp_hidden = self.l_ff_2_tanh(self.l_ff_2(tmp_hidden))
if self.for_har:
# if this block is used for harmonic component
tmp_hidden = self.l_ff_3_tanh(self.l_ff_3(tmp_hidden))
output_signal = tmp_hidden + signal
else:
# for noise component, no need to use skip-connection
output_signal = self.l_ff_3(tmp_hidden)
return output_signal
#
# Sine waveform generator
class SineGen(torch_nn.Module):
""" Definition of sine generator
SineGen(samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False)
samp_rate: sampling rate in Hz
harmonic_num: number of harmonic overtones (default 0)
sine_amp: amplitude of sine-wavefrom (default 0.1)
noise_std: std of Gaussian noise (default 0.003)
voiced_thoreshold: F0 threshold for U/V classification (default 0)
flag_for_pulse: this SinGen is used inside PulseGen (default False)
Note: when flag_for_pulse is True, the first time step of a voiced
segment is always sin(np.pi) or cos(0)
"""
def __init__(self, samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False):
super(SineGen, self).__init__()
self.sine_amp = sine_amp
self.noise_std = noise_std
self.harmonic_num = harmonic_num
self.dim = self.harmonic_num + 1
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
self.flag_for_pulse = flag_for_pulse
def _f02uv(self, f0):
# generate uv signal
uv = torch.ones_like(f0)
uv = uv * (f0 > self.voiced_threshold)
return uv
def _f02sine(self, f0_values):
""" f0_values: (batchsize, length, dim)
where dim indicates fundamental tone and overtones
"""
# convert to F0 in rad. The interger part n can be ignored
# because 2 * np.pi * n doesn't affect phase
rad_values = (f0_values / self.sampling_rate) % 1
# initial phase noise (no noise for fundamental component)
rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2],\
device = f0_values.device)
rand_ini[:, 0] = 0
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
if not self.flag_for_pulse:
# for normal case
# To prevent torch.cumsum numerical overflow,
# it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
# Buffer tmp_over_one_idx indicates the time step to add -1.
# This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi
tmp_over_one = torch.cumsum(rad_values, 1) % 1
tmp_over_one_idx = (tmp_over_one[:, 1:, :] -
tmp_over_one[:, :-1, :]) < 0
cumsum_shift = torch.zeros_like(rad_values)
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1)
* 2 * np.pi)
else:
# If necessary, make sure that the first time step of every
# voiced segments is sin(pi) or cos(0)
# This is used for pulse-train generation
# identify the last time step in unvoiced segments
uv = self._f02uv(f0_values)
uv_1 = torch.roll(uv, shifts=-1, dims=1)
uv_1[:, -1, :] = 1
u_loc = (uv < 1) * (uv_1 > 0)
# get the instantanouse phase
tmp_cumsum = torch.cumsum(rad_values, dim=1)
# different batch needs to be processed differently
for idx in range(f0_values.shape[0]):
temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
# stores the accumulation of i.phase within
# each voiced segments
tmp_cumsum[idx, :, :] = 0
tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
# rad_values - tmp_cumsum: remove the accumulation of i.phase
# within the previous voiced segment.
i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
# get the sines
sines = torch.cos(i_phase * 2 * np.pi)
return sines
def forward(self, f0):
""" sine_tensor, uv = forward(f0)
input F0: tensor(batchsize=1, length, dim=1)
f0 for unvoiced steps should be 0
output sine_tensor: tensor(batchsize=1, length, dim)
output uv: tensor(batchsize=1, length, 1)
"""
with torch.no_grad():
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \
device=f0.device)
# fundamental component
f0_buf[:, :, 0] = f0[:, :, 0]
for idx in np.arange(self.harmonic_num):
# idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
f0_buf[:, :, idx+1] = f0_buf[:, :, 0] * (idx+2)
# generate sine waveforms
sine_waves = self._f02sine(f0_buf) * self.sine_amp
# generate uv signal
#uv = torch.ones(f0.shape)
#uv = uv * (f0 > self.voiced_threshold)
uv = self._f02uv(f0)
# noise: for unvoiced should be similar to sine_amp
# std = self.sine_amp/3 -> max value ~ self.sine_amp
#. for voiced regions is self.noise_std
noise_amp = uv * self.noise_std + (1-uv) * self.sine_amp / 3
noise = noise_amp * torch.randn_like(sine_waves)
# first: set the unvoiced part to 0 by uv
# then: additive noise
sine_waves = sine_waves * uv + noise
return sine_waves, uv, noise
class PulseGen(torch_nn.Module):
""" Definition of Pulse train generator
There are many ways to implement pulse generator.
Here, PulseGen is based on SinGen. For a perfect
"""
def __init__(self, samp_rate, pulse_amp = 0.1,
noise_std = 0.003, voiced_threshold = 0):
super(PulseGen, self).__init__()
self.pulse_amp = pulse_amp
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
self.noise_std = noise_std
self.l_sinegen = SineGen(self.sampling_rate, harmonic_num=0,\
sine_amp=self.pulse_amp, noise_std=0,\
voiced_threshold=self.voiced_threshold,\
flag_for_pulse=True)
def forward(self, f0):
""" Pulse train generator
pulse_train, uv = forward(f0)
input F0: tensor(batchsize=1, length, dim=1)
f0 for unvoiced steps should be 0
output pulse_train: tensor(batchsize=1, length, dim)
output uv: tensor(batchsize=1, length, 1)
Note: self.l_sine doesn't make sure that the initial phase of
a voiced segment is np.pi, the first pulse in a voiced segment
may not be at the first time step within a voiced segment
"""
with torch.no_grad():
sine_wav, uv, noise = self.l_sinegen(f0)
# sine without additive noise
pure_sine = sine_wav - noise
# step t corresponds to a pulse if
# sine[t] > sine[t+1] & sine[t] > sine[t-1]
# & sine[t-1], sine[t+1], and sine[t] are voiced
# or
# sine[t] is voiced, sine[t-1] is unvoiced
# we use torch.roll to simulate sine[t+1] and sine[t-1]
sine_1 = torch.roll(pure_sine, shifts=1, dims=1)
uv_1 = torch.roll(uv, shifts=1, dims=1)
uv_1[:, 0, :] = 0
sine_2 = torch.roll(pure_sine, shifts=-1, dims=1)
uv_2 = torch.roll(uv, shifts=-1, dims=1)
uv_2[:, -1, :] = 0
loc = (pure_sine > sine_1) * (pure_sine > sine_2) \
* (uv_1 > 0) * (uv_2 > 0) * (uv > 0) \
+ (uv_1 < 1) * (uv > 0)
# pulse train without noise
pulse_train = pure_sine * loc
# additive noise to pulse train
# note that noise from sinegen is zero in voiced regions
pulse_noise = torch.randn_like(pure_sine) * self.noise_std
# with additive noise on pulse, and unvoiced regions
pulse_train += pulse_noise * loc + pulse_noise * (1 - uv)
return pulse_train, sine_wav, uv, pulse_noise
class CyclicNoiseGen_v1(torch_nn.Module):
""" CyclicnoiseGen_v1
Cyclic noise with a single parameter of beta.
Pytorch v1 implementation assumes f_t is also fixed
"""
def __init__(self, samp_rate,
noise_std = 0.003, voiced_threshold = 0):
super(CyclicNoiseGen_v1, self).__init__()
self.samp_rate = samp_rate
self.noise_std = noise_std
self.voiced_threshold = voiced_threshold
self.l_pulse = PulseGen(samp_rate, pulse_amp=1.0, \
noise_std=noise_std, \
voiced_threshold=voiced_threshold)
self.l_conv = SignalsConv1d()
def noise_decay(self, beta, f0mean):
""" decayed_noise = noise_decay(beta, f0mean)
decayed_noise = n[t]exp(-t * f_mean / beta / samp_rate)
beta: (dim=1) or (batchsize=1, 1, dim=1)
f0mean (batchsize=1, 1, dim=1)
decayed_noise (batchsize=1, length, dim=1)
"""
with torch.no_grad():
# exp(-1.0 n / T) < 0.01 => n > -log(0.01)*T = 4.60*T
# truncate the noise when decayed by -40 dB
length = 4.6 * self.samp_rate / f0mean
length = length.int()
time_idx = torch.arange(0, length, device=beta.device)
time_idx = time_idx.unsqueeze(0).unsqueeze(2)
time_idx = time_idx.repeat(beta.shape[0], 1, beta.shape[2])
noise = torch.randn(time_idx.shape, device=beta.device)
# due to Pytorch implementation, use f0_mean as the f0 factor
decay = torch.exp(-time_idx * f0mean / beta / self.samp_rate)
return noise * self.noise_std * decay
def forward(self, f0s, beta):
""" Producde cyclic-noise
"""
# pulse train
pulse_train, sine_wav, uv, noise = self.l_pulse(f0s)
pure_pulse = pulse_train - noise * (1.0 - uv)
# decayed_noise (length, dim=1)
if (uv<1).all():
# all unvoiced
cyc_noise = torch.zeros_like(sine_wav)
else:
f0mean = f0s[uv>0].mean()
decayed_noise = self.noise_decay(beta, f0mean)[0, :, :]
# convolute
cyc_noise = self.l_conv(pure_pulse, decayed_noise)
# add noise in invoiced segments
cyc_noise = cyc_noise + noise * (1.0 - uv)
return cyc_noise, pulse_train, sine_wav, uv, noise
#####
## Model definition
##
## For condition module only provide Spectral feature to Filter block
class CondModuleHnSincNSF(torch_nn.Module):
""" Condition module for hn-sinc-NSF
Upsample and transform input features
CondModuleHnSincNSF(input_dimension, output_dimension,
up_sample_rate,
blstm_dimension = 64, cnn_kernel_size = 3)
Spec, F0, cut_off_freq = CondModuleHnSincNSF(features, F0)
Both input features should be frame-level features
If x doesn't contain F0, just ignore the returned F0
CondModuleHnSincNSF(input_dim, output_dim, up_sample,
blstm_s = 64, cnn_kernel_s = 3,
voiced_threshold = 0):
input_dim: sum of dimensions of input features
output_dim: dim of the feature Spec to be used by neural filter-block
up_sample: up sampling rate of input features
blstm_s: dimension of the features from blstm (default 64)
cnn_kernel_s: kernel size of CNN in condition module (default 3)
voiced_threshold: f0 > voiced_threshold is voiced, otherwise unvoiced
"""
def __init__(self, input_dim, output_dim, up_sample, \
blstm_s = 64, cnn_kernel_s = 3, voiced_threshold = 0):
super(CondModuleHnSincNSF, self).__init__()
# input feature dimension
self.input_dim = input_dim
self.output_dim = output_dim
self.up_sample = up_sample
self.blstm_s = blstm_s
self.cnn_kernel_s = cnn_kernel_s
self.cut_f_smooth = up_sample * 4
self.voiced_threshold = voiced_threshold
# the blstm layer
self.l_blstm = BLSTMLayer(input_dim, self.blstm_s)
# the CNN layer (+1 dim for cut_off_frequence of sinc filter)
self.l_conv1d = Conv1dKeepLength(self.blstm_s, \
self.output_dim + 1, \
dilation_s = 1, \
kernel_s = self.cnn_kernel_s)
# Upsampling layer for hidden features
self.l_upsamp = UpSampleLayer(self.output_dim + 1, \
self.up_sample, True)
# Upsampling for F0: don't smooth up-sampled F0
self.l_upsamp_F0 = UpSampleLayer(1, self.up_sample, False)
# Another smoothing layer to smooth the cut-off frequency
# for sinc filters. Use a larger window to smooth
self.l_cut_f_smooth = MovingAverage(1, self.cut_f_smooth)
def get_cut_f(self, hidden_feat, f0):
""" cut_f = get_cut_f(self, feature, f0)
feature: (batchsize, length, dim=1)
f0: (batchsize, length, dim=1)
"""
# generate uv signal
uv = torch.ones_like(f0) * (f0 > self.voiced_threshold)
# hidden_feat is between (-1, 1) after conv1d with tanh
# (-0.2, 0.2) + 0.3 = (0.1, 0.5)
# voiced: (0.1, 0.5) + 0.4 = (0.5, 0.9)
# unvoiced: (0.1, 0.5) = (0.1, 0.5)
return hidden_feat * 0.2 + uv * 0.4 + 0.3
def forward(self, feature, f0):
""" spec, f0 = forward(self, feature, f0)
feature: (batchsize, length, dim)
f0: (batchsize, length, dim=1), which should be F0 at frame-level
spec: (batchsize, length, self.output_dim), at wave-level
f0: (batchsize, length, 1), at wave-level
"""
# Different from the paper, for simplicitiy, output of conv1d
# is fed to the neural filter blocks without concatenating F0
tmp = self.l_upsamp(self.l_conv1d(self.l_blstm(feature)))
spec = tmp[:, :, 0:self.output_dim]
# directly up-sample F0 without smoothing
f0_upsamp = self.l_upsamp_F0(f0)
# get the cut-off-frequency from output of CNN
cut_f = self.get_cut_f(tmp[:, :, self.output_dim:], f0_upsamp)
# smooth the cut-off-frequency using fixed average smoothing
cut_f_smoothed = self.l_cut_f_smooth(cut_f)
# return
return spec, f0_upsamp, cut_f_smoothed
# For source module
class SourceModuleCycNoise_v1(torch_nn.Module):
""" SourceModuleCycNoise_v1
SourceModule(sampling_rate, noise_std=0.003, voiced_threshod=0)
sampling_rate: sampling_rate in Hz
noise_std: std of Gaussian noise (default: 0.003)
voiced_threshold: threhold to set U/V given F0 (default: 0)
cyc, noise, uv = SourceModuleCycNoise_v1(F0_upsampled, beta)
F0_upsampled (batchsize, length, 1)
beta (1)
cyc (batchsize, length, 1)
noise (batchsize, length, 1)
uv (batchsize, length, 1)
"""
def __init__(self, sampling_rate, \
noise_std=0.003, voiced_threshod=0):
super(SourceModuleCycNoise_v1, self).__init__()
self.sampling_rate = sampling_rate
self.noise_std = noise_std
self.l_cyc_gen = CyclicNoiseGen_v1(sampling_rate, noise_std,
voiced_threshod)
def forward(self, f0_upsamped, beta):
"""
cyc, noise, uv = SourceModuleCycNoise_v1(F0, beta)
F0_upsampled (batchsize, length, 1)
beta (1)
cyc (batchsize, length, 1)
noise (batchsize, length, 1)
uv (batchsize, length, 1)
"""
# source for harmonic branch
cyc, pulse, sine, uv, add_noi = self.l_cyc_gen(f0_upsamped, beta)
# source for noise branch, in the same shape as uv
noise = torch.randn_like(uv) * self.noise_std / 3
return cyc, noise, uv
# For Filter module
class FilterModuleCycNoiseNSF(torch_nn.Module):
""" Filter for cyclic noise nsf
FilterModuleCycNoiseNSF(signal_size, hidden_size, sinc_order = 31,
block_num = 5, kernel_size = 3,
conv_num_in_block = 10)
signal_size: signal dimension (should be 1)
hidden_size: dimension of hidden features inside neural filter block
sinc_order: order of the sinc filter
block_num: number of neural filter blocks in harmonic branch
kernel_size: kernel size in dilated CNN
conv_num_in_block: number of d-conv1d in one neural filter block
Usage:
out = FilterModuleCycNoiseNSF(har_source, noi_source, cut_f, context)
har_source: source for harmonic branch (batchsize, length, dim=1)
noi_source: source for noise branch (batchsize, length, dim=1)
cut_f: cut-off-frequency of sinc filters (batchsize, length, dim=1)
context: hidden features to be added (batchsize, length, dim)
out: (batchsize, length, dim=1)
"""
def __init__(self, signal_size, hidden_size, sinc_order = 31, \
block_num = 5, kernel_size = 3, conv_num_in_block = 10):
super(FilterModuleCycNoiseNSF, self).__init__()
self.signal_size = signal_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.block_num = block_num
self.conv_num_in_block = conv_num_in_block
self.sinc_order = sinc_order
# filter blocks for harmonic branch
tmp = [NeuralFilterBlock(signal_size, hidden_size, True, \
kernel_size, conv_num_in_block) \
for x in range(self.block_num)]
self.l_har_blocks = torch_nn.ModuleList(tmp)
# filter blocks for noise branch (only one block, 5 sub-blocks)
tmp = [NeuralFilterBlock(signal_size, hidden_size, False, \
kernel_size, conv_num_in_block // 2) \
for x in range(1)]
self.l_noi_blocks = torch_nn.ModuleList(tmp)
# sinc filter generators and time-variant filtering layer
self.l_sinc_coef = SincFilter(self.sinc_order)
self.l_tv_filtering = TimeVarFIRFilter()
# done
def forward(self, har_component, noi_component, cond_feat, cut_f):
"""
"""
# harmonic component
#
hidden_signals = []
for l_har_block in self.l_har_blocks:
hidden_signal = l_har_block(har_component, cond_feat)
hidden_signals.append(hidden_signal)
har_component = hidden_signal
# noise componebt
for l_noi_block in self.l_noi_blocks:
noi_component = l_noi_block(noi_component, cond_feat)
# get sinc filter coefficients
lp_coef, hp_coef = self.l_sinc_coef(cut_f)
# time-variant filtering
har_signal = self.l_tv_filtering(har_component, lp_coef)
noi_signal = self.l_tv_filtering(noi_component, hp_coef)
# get output
return har_signal + noi_signal, hidden_signals
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
self.input_dim = in_dim
self.output_dim = out_dim
# configurations
# amplitude of sine waveform (for each harmonic)
self.sine_amp = 0.1
# standard deviation of Gaussian noise for additive noise
self.noise_std = 0.003
# dimension of hidden features in filter blocks
self.hidden_dim = 64
# upsampling rate on input acoustic features (16kHz * 5ms = 80)
# assume input_reso has the same value
self.upsamp_rate = prj_conf.input_reso[0]
# sampling rate (Hz)
self.sampling_rate = prj_conf.wav_samp_rate
# CNN kernel size in filter blocks
self.cnn_kernel_s = 3
# number of filter blocks (for harmonic branch)
# noise branch only uses 1 block
self.filter_block_num = 5
# number of dilated CNN in each filter block
self.cnn_num_in_block = 10
# sinc filter order (odd number)
self.sinc_order = 31
# number of harmonics for sine mask
# note: cyclis-noise-nsf doesn't use harmonic overtone in source
self.harmonic_num = 7
# beta parameter for cyclic-noise
self.beta = 0.870
# the three modules
self.m_cond = CondModuleHnSincNSF(self.input_dim, \
self.hidden_dim, \
self.upsamp_rate, \
cnn_kernel_s=self.cnn_kernel_s)
self.m_source = SourceModuleCycNoise_v1(self.sampling_rate,
self.noise_std)
self.m_filter = FilterModuleCycNoiseNSF(self.output_dim, \
self.hidden_dim, \
self.sinc_order, \
self.filter_block_num, \
self.cnn_kernel_s, \
self.cnn_num_in_block)
# one additional module to generate sine mask
self.m_sinemask = SineGen(self.sampling_rate, self.harmonic_num)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, \
data_mean_std=None):
""" Load mean/std of input/output features
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def forward(self, x):
""" definition of forward method
Assume x (batchsize=1, length, dim)
Return output(batchsize=1, length)
"""
# assume x[:, :, -1] is F0, denormalize F0
f0 = x[:, :, -1:]
# normalize the input features data
feat = self.normalize_input(x)
# condition module
# features_for_filter_block, up-sampled F0, cut-off-frequency
cond_feat, f0_upsamp, cut_f = self.m_cond(feat, f0)
# source module
# here we assume beta is fixed
beta = torch.ones(1, 1, 1, device=f0_upsamp.device) * self.beta
# harmonic-source signal, noise-source signal, uv flag
har_source, noi_source, uv = self.m_source(f0_upsamp, beta)
# neural filter module (including sinc-based FIR filtering)
# output signal, hidden signals
output, hidden = self.m_filter(har_source, noi_source, \
cond_feat, cut_f)
if self.training:
# hidden signals shape as (batchsize=1, length)
hidden = [x.squeeze(-1) for x in hidden]
# sine for masking
with torch.no_grad():
sine_mask, uv, noise = self.m_sinemask(f0_upsamp)
sine_mask = (sine_mask - noise).mean(axis=-1)
# return
return [output.squeeze(-1), hidden, sine_mask]
else:
return output.squeeze(-1)
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
# frame shift (number of points)
self.frame_hops = [80, 40, 640]
# frame length
self.frame_lens = [320, 80, 1920]
# FFT length
self.fft_n = [512, 128, 2048]
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# floor to determine the frames to be masked
self.mask_power_threshold = 0.0000001
# loss function
self.loss = torch_nn.MSELoss()
return
def _stft(self, signal, fft_p, frame_shift, frame_len):
""" wrapper of torch.stft
Remember to use onesided=True, pad_mode="constant"
Signal (batchsize, length)
Output (batchsize, fft_p/2+1, frame_num, 2)
"""
# to be compatible with different torch versions
if torch.__version__.split('.')[1].isnumeric() and \
int(torch.__version__.split('.')[1]) < 7:
return torch.stft(
signal, fft_p, frame_shift, frame_len,
window=self.win(frame_len, dtype=signal.dtype,
device=signal.device),
onesided=True, pad_mode="constant")
else:
return torch.stft(
signal, fft_p, frame_shift, frame_len,
window=self.win(frame_len, dtype=signal.dtype,
device=signal.device),
onesided=True, pad_mode="constant", return_complex=False)
def _amp(self, x):
""" _amp(stft)
x_stft: (batchsize, fft_p/2+1, frame_num, 2)
output: (batchsize, fft_p/2+1, frame_num)
output[x, y, z] = log(x_stft[x, y, z, 1]^2 + x_stft[x, y, z, 2]^2
+ floor)
"""
return torch.log(torch.norm(x, 2, -1).pow(2) + self.amp_floor)
def _mask_stft(self, mask_signal, fft_p, frame_shift, frame_len):
"""
calculate the masking weights from input mask signal (sine)
"""
# power spectrum of the masking signal (sine signal)
x_stft = self._stft(mask_signal, fft_p, frame_shift, frame_len)
# x_pow (batchsize, fft_p/2+1, frame_num)
x_stft_pow = torch.norm(x_stft, 2, -1).pow(2)
# get the normalizing weight for each frame
# x_flag (batchsize, frame_num)
x_flag = x_stft_pow.mean(axis=1) > self.mask_power_threshold
# x_stft_max (batchsize, frame_num)
x_stft_max = x_stft_pow.max(axis=1)[0]
x_stft_max[~x_flag] = 1.0
# x_stft_weight (batchsize, frame_num)
x_stft_weight = 1 / x_stft_max * x_flag
# normalizing the mask
# mask_normed (batchsize, fft_p/2+1, frame_num, 2)
mask_normed = torch.ones_like(x_stft)
# normalize the mask, so that maximum mask weight = 1
# mask_normed[:, :, :, 0] is used to mask the real-part
# of an spectrum
# mask_normed[:, :, :, 1] is used to mask the imaginary-part
# of an spectrum
mask_normed[:, :, :, 0] = x_stft_pow * x_stft_weight.unsqueeze(1)
mask_normed[:, :, :, 1] = mask_normed[:, :, :, 0]
return mask_normed
def stft_amp(self, signal, fft_p, frame_shift, frame_len, mask=None):
""" compute STFT log amplitude
signal: (batchsize, length)
output: (batchsize, fft_p/2+1, frame_num)
mask: (batchsize, fft_p/2+1, frame_num, 2)
"""
x_stft = self._stft(signal, fft_p, frame_shift, frame_len)
if mask is None:
x_sp_amp = self._amp(x_stft)
else:
# apply mask if necessary
# mask[:, :, :, 0] is used to mask the real-part
# of an spectrum
# mask[:, :, :, 1] is used to mask the imaginary-part
# of an spectrum
x_sp_amp = self._amp(x_stft * mask)
return x_sp_amp
def compute(self, outputs, target):
""" Loss().compute(output, target) should return
the Loss in torch.tensor format
Assume output and target as (batchsize=1, length)
"""
# generated signal
output = outputs[0]
# hidden signals from each filter block in harmonic branch
hiddens = outputs[1]
# sine mask signal
sinemask = outputs[2]
# convert from (batchsize=1, length, dim=1) to (1, length)
if target.ndim == 3:
target.squeeze_(-1)
# compute loss over target and output
loss = 0
for frame_s, frame_l, fft_p in \
zip(self.frame_hops, self.frame_lens, self.fft_n):
# between generated signal and target
gen_sp_amp = self.stft_amp(output, fft_p, frame_s, frame_l)
tar_sp_amp = self.stft_amp(target, fft_p, frame_s, frame_l)
loss += self.loss(gen_sp_amp, tar_sp_amp)
# masked spectral loss between hidden signals & target
with torch.no_grad():
# produce stft of sine mask
mask = self._mask_stft(sinemask, fft_p, frame_s, frame_l)
# apply mask to target signal
tar_sp_masked_amp = self.stft_amp(target, fft_p, \
frame_s, frame_l, mask)
for hidden in hiddens:
h_sp_masked_amp = self.stft_amp(hidden, fft_p, frame_s, \
frame_l, mask)
loss += self.loss(h_sp_masked_amp, tar_sp_masked_amp)
# done
return loss
if __name__ == "__main__":
print("Definition of model")
| 49,402 | 38.937753 | 77 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/cyc-noise-nsf-4/config.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = 'cmu_all_trn'
val_set_name = 'cmu_all_val'
# for convenience
tmp = '../DATA/cmu-arctic-data-set'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = [tmp + '/scp/train.lst']
# val_file_list: list of files for validation set. It can be None
val_list = [tmp + '/scp/val.lst']
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']]
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [80, 1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.mfbsp', '.f0']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [80, 80]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [True, True]
# Similar configurations for output features
output_dirs = [[tmp + '/wav_16k_norm']]
output_dims = [1]
output_exts = ['.wav']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = 16000 * 3
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = 80 * 50
#########################################################
## Configuration for inference stage
#########################################################
# similar options to training stage
test_set_name = ['cmu_all_test_tiny']
# List of test set data
# for convenience, you may directly load test_set list here
test_list = [['slt_arctic_b0474', 'slt_arctic_b0475', 'slt_arctic_b0476',
'bdl_arctic_b0474', 'bdl_arctic_b0475', 'bdl_arctic_b0476',
'rms_arctic_b0474', 'rms_arctic_b0475', 'rms_arctic_b0476',
'clb_arctic_b0474', 'clb_arctic_b0475', 'clb_arctic_b0476']]
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']]
# Directories for output features, which are []
test_output_dirs = [[]]
| 3,430 | 32.31068 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-hifigan/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The default training/inference process wrapper
Requires model.py and config.py
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.nn_manager.nn_manager_GAN as nii_nn_wrapper_GAN
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler,
'pin_memory': True}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
inout_trans_fns = prj_conf.input_output_trans_fn \
if hasattr(prj_conf, 'input_output_trans_fn') else None
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns,
inoutput_augment_func = inout_trans_fns)
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns,
inoutput_augment_func = inout_trans_fns)
else:
val_set = None
# initialize the model and loss function
model_G = prj_model.ModelGenerator(
trn_set.get_in_dim(), trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
model_D = prj_model.ModelDiscriminator(
trn_set.get_in_dim(), trn_set.get_out_dim(),
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = None
# initialize the optimizer
optimizer_G_wrap = nii_op_wrapper.OptimizerWrapper(model_G, args)
optimizer_D_wrap = nii_op_wrapper.OptimizerWrapper(model_D, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint_G = None
checkpoint_D = None
else:
tmp_str = args.trained_model.split(",")
checkpoint_G = torch.load(tmp_str[0])
if len(tmp_str) > 1:
checkpoint_D = torch.load(tmp_str[1])
else:
checkpoint_D = None
# start training
nii_nn_wrapper_GAN.f_train_wrapper_GAN(
args, model_G, model_D,
loss_wrapper, device,
optimizer_G_wrap, optimizer_D_wrap,
trn_set, val_set,
checkpoint_G, checkpoint_D)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
inout_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'test_input_output_trans_fn') \
else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq = None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns,
inoutput_augment_func = inout_trans_fns)
# initialize model
model = prj_model.ModelGenerator(
test_set.get_in_dim(), test_set.get_out_dim(), args, prj_conf)
if args.trained_model == "":
print("Please provide ---trained-model")
sys.exit(1)
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(
args, model, device, test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 8,338 | 35.574561 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-hifigan/model.py | #!/usr/bin/env python
"""
model.py for hn-nsf + hifigan discriminator
HifiGAN part is adopted from https://github.com/jik876/hifi-gan
HiFi-GAN: Generative Adversarial Networks for Efficient and
High Fidelity Speech Synthesis
By Jungil Kong, Jaehyeon Kim, Jaekyoung Bae
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
import core_scripts.other_tools.debug as nii_debug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2021, Xin Wang"
#########
## Loss definition
#########
class LossAuxGen():
""" Wrapper to define loss function
"""
def __init__(self):
""" Multi-resolution STFT loss
"""
# frame shift (number of points)
self.frame_hops = [80, 40, 640]
# frame length
self.frame_lens = [320, 80, 1920]
# fft length
self.fft_n = [512, 128, 2048]
# window type in stft
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# loss function
self.loss = torch_nn.L1Loss()
# weight for this loss
self.loss_weight = 45
# a buffer to store the window coefficients
self.win_buf = {}
return
def _stft(self, signal, fft_p, frame_shift, frame_len):
""" output = _stft(signal, fft_p, frame_shift, frame_len)
wrapper of torch.stft
Remember to use onesided=True, pad_mode="constant"
input
-----
Signal, tensor, (batchsize, length)
fft_p: int, FFT points
frame_shift: int, frame shift, in number of waveform points
frame_len: int, frame length, in number of waveform points
output
------
Output, tensor (batchsize, fft_p/2+1, frame_num, 2)
"""
# buffer to store the window coefficients
if not frame_len in self.win_buf:
win_coef = self.win(frame_len, dtype=signal.dtype,
device=signal.device)
self.win_buf[frame_len] = win_coef
win_coef = self.win_buf[frame_len]
# to be compatible with different torch versions
if torch.__version__.split('.')[1].isnumeric() and \
int(torch.__version__.split('.')[1]) < 7:
return torch.stft(
signal, fft_p, frame_shift, frame_len,
window=win_coef,
onesided=True, pad_mode="constant")
else:
return torch.stft(
signal, fft_p, frame_shift, frame_len,
window=win_coef,
onesided=True, pad_mode="constant",
return_complex=False)
def _amp(self, x):
""" otuput = _amp(stft)
compute STFT amplitude
input
-----
x_stft: tensor (batchsize, fft_p/2+1, frame_num, 2)
output:
output: (batchsize, fft_p/2+1, frame_num)
Note that output[x, y, z] =
log(x_stft[x, y, z, 1]^2 + x_stft[x, y, z, 2]^2 + floor)
"""
return torch.log(torch.norm(x, 2, -1).pow(2) + self.amp_floor)
def compute(self, output, target):
""" loss = compute(output, target)
input
-----
output: tensor, output signal from a model, (batch, length, 1)
target: tensor, natural target signal, (batch, length, 1)
output
------
loss: scalar,
"""
if output.ndim == 3:
output_tmp = output.squeeze(-1)
if target.ndim == 3:
target_tmp = target.squeeze(-1)
# compute loss
loss = 0
for frame_shift, frame_len, fft_p in \
zip(self.frame_hops, self.frame_lens, self.fft_n):
x_stft = self._stft(output_tmp, fft_p, frame_shift, frame_len)
y_stft = self._stft(target_tmp, fft_p, frame_shift, frame_len)
x_sp_amp = self._amp(x_stft)
y_sp_amp = self._amp(y_stft)
loss += self.loss(x_sp_amp, y_sp_amp)
return loss * self.loss_weight
#####
## Model Generator definition
#####
class BLSTMLayer(torch_nn.Module):
""" Wrapper over BLSTM
Assume hidden layer = 1
"""
def __init__(self, input_dim, output_dim):
super(BLSTMLayer, self).__init__()
if output_dim % 2 != 0:
print("Output_dim of BLSTMLayer is {:d}".format(output_dim))
print("BLSTMLayer expects a layer size of even number")
sys.exit(1)
# bi-directional LSTM
self.l_blstm = torch_nn.LSTM(input_dim, output_dim // 2, \
bidirectional=True, batch_first=True)
def forward(self, x):
"""output = fowrard(x)
input
-----
x: tensor (batchsize=1, length, dim_in)
output
------
Output: tensor, (batchsize=1, length, dim_out)
"""
blstm_data, _ = self.l_blstm(x)
return blstm_data
class Conv1dKeepLength(torch_nn.Conv1d):
""" Wrapper for causal convolution
Input tensor: (batchsize=1, length, dim_in)
Output tensor: (batchsize=1, length, dim_out)
https://github.com/pytorch/pytorch/issues/1333
Note: Tanh is optional
"""
def __init__(self, input_dim, output_dim, dilation_s, kernel_s,
causal = False, stride = 1, groups=1, bias=True, \
tanh = True, pad_mode='constant'):
super(Conv1dKeepLength, self).__init__(
input_dim, output_dim, kernel_s, stride=stride,
padding = 0, dilation = dilation_s, groups=groups, bias=bias)
self.pad_mode = pad_mode
self.causal = causal
# input & output length will be the same
if self.causal:
# left pad to make the convolution causal
self.pad_le = dilation_s * (kernel_s - 1)
self.pad_ri = 0
else:
# pad on both sizes
self.pad_le = dilation_s * (kernel_s - 1) // 2
self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le
if tanh:
self.l_ac = torch_nn.Tanh()
else:
self.l_ac = torch_nn.Identity()
def forward(self, data):
# permute to (batchsize=1, dim, length)
# add one dimension (batchsize=1, dim, ADDED_DIM, length)
# pad to ADDED_DIM
# squeeze and return to (batchsize=1, dim, length)
# https://github.com/pytorch/pytorch/issues/1333
x = torch_nn_func.pad(data.permute(0, 2, 1).unsqueeze(2), \
(self.pad_le, self.pad_ri, 0, 0),
mode = self.pad_mode).squeeze(2)
# tanh(conv1())
# permmute back to (batchsize=1, length, dim)
output = self.l_ac(super(Conv1dKeepLength, self).forward(x))
return output.permute(0, 2, 1)
#
# Moving average
class MovingAverage(Conv1dKeepLength):
""" Wrapper to define a moving average smoothing layer
Note: MovingAverage can be implemented using TimeInvFIRFilter too.
Here we define another Module dicrectly on Conv1DKeepLength
"""
def __init__(self, feature_dim, window_len, causal=False, \
pad_mode='replicate'):
super(MovingAverage, self).__init__(
feature_dim, feature_dim, 1, window_len, causal,
groups=feature_dim, bias=False, tanh=False, \
pad_mode=pad_mode)
# set the weighting coefficients
torch_nn.init.constant_(self.weight, 1/window_len)
# turn off grad for this layer
for p in self.parameters():
p.requires_grad = False
def forward(self, data):
return super(MovingAverage, self).forward(data)
#
# FIR filter layer
class TimeInvFIRFilter(Conv1dKeepLength):
""" Wrapper to define a FIR filter over Conv1d
Note: FIR Filtering is conducted on each dimension (channel)
independently: groups=channel_num in conv1d
"""
def __init__(self, feature_dim, filter_coef,
causal=True, flag_train=False):
""" __init__(self, feature_dim, filter_coef,
causal=True, flag_train=False)
feature_dim: dimension of input data
filter_coef: 1-D tensor of filter coefficients
causal: FIR is causal or not (default: true)
flag_train: whether train the filter coefficients (default false)
Input data: (batchsize=1, length, feature_dim)
Output data: (batchsize=1, length, feature_dim)
"""
super(TimeInvFIRFilter, self).__init__(
feature_dim, feature_dim, 1, filter_coef.shape[0], causal,
groups=feature_dim, bias=False, tanh=False)
if filter_coef.ndim == 1:
# initialize weight using provided filter_coef
with torch.no_grad():
tmp_coef = torch.zeros([feature_dim, 1,
filter_coef.shape[0]])
tmp_coef[:, 0, :] = filter_coef
tmp_coef = torch.flip(tmp_coef, dims=[2])
self.weight = torch.nn.Parameter(tmp_coef,
requires_grad=flag_train)
else:
print("TimeInvFIRFilter expects filter_coef to be 1-D tensor")
print("Please implement the code in __init__ if necessary")
sys.exit(1)
def forward(self, data):
return super(TimeInvFIRFilter, self).forward(data)
class TimeVarFIRFilter(torch_nn.Module):
""" TimeVarFIRFilter
Given sequences of filter coefficients and a signal, do filtering
Filter coefs: (batchsize=1, signal_length, filter_order = K)
Signal: (batchsize=1, signal_length, 1)
For batch 0:
For n in [1, sequence_length):
output(0, n, 1) = \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k)
Note: filter coef (0, n, :) is only used to compute the output
at (0, n, 1)
"""
def __init__(self):
super(TimeVarFIRFilter, self).__init__()
def forward(self, signal, f_coef):
"""
Filter coefs: (batchsize=1, signal_length, filter_order = K)
Signal: (batchsize=1, signal_length, 1)
Output: (batchsize=1, signal_length, 1)
For n in [1, sequence_length):
output(0, n, 1)= \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k)
This method may be not efficient:
Suppose signal [x_1, ..., x_N], filter [a_1, ..., a_K]
output [y_1, y_2, y_3, ..., y_N, *, * ... *]
= a_1 * [x_1, x_2, x_3, ..., x_N, 0, ..., 0]
+ a_2 * [ 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0]
+ a_3 * [ 0, 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0]
"""
signal_l = signal.shape[1]
order_k = f_coef.shape[-1]
# pad to (batchsize=1, signal_length + filter_order-1, dim)
padded_signal = torch_nn_func.pad(signal, (0, 0, 0, order_k - 1))
y = torch.zeros_like(signal)
# roll and weighted sum, only take [0:signal_length]
for k in range(order_k):
y += torch.roll(padded_signal, k, dims=1)[:, 0:signal_l, :] \
* f_coef[:, :, k:k+1]
# done
return y
# Sinc filter generator
class SincFilter(torch_nn.Module):
""" SincFilter
Given the cut-off-frequency, produce the low-pass and high-pass
windowed-sinc-filters.
If input cut-off-frequency is (batchsize=1, signal_length, 1),
output filter coef is (batchsize=1, signal_length, filter_order).
For each time step in [1, signal_length), we calculate one
filter for low-pass sinc filter and another for high-pass filter.
Example:
import scipy
import scipy.signal
import numpy as np
filter_order = 31
cut_f = 0.2
sinc_layer = SincFilter(filter_order)
lp_coef, hp_coef = sinc_layer(torch.ones(1, 10, 1) * cut_f)
w, h1 = scipy.signal.freqz(lp_coef[0, 0, :].numpy(), [1])
w, h2 = scipy.signal.freqz(hp_coef[0, 0, :].numpy(), [1])
plt.plot(w, 20*np.log10(np.abs(h1)))
plt.plot(w, 20*np.log10(np.abs(h2)))
plt.plot([cut_f * np.pi, cut_f * np.pi], [-100, 0])
"""
def __init__(self, filter_order):
super(SincFilter, self).__init__()
# Make the filter oder an odd number
# [-(M-1)/2, ... 0, (M-1)/2]
#
self.half_k = (filter_order - 1) // 2
self.order = self.half_k * 2 +1
def hamming_w(self, n_index):
""" prepare hamming window for each time step
n_index (batchsize=1, signal_length, filter_order)
For each time step, n_index will be [-(M-1)/2, ... 0, (M-1)/2]
n_index[0, 0, :] = [-(M-1)/2, ... 0, (M-1)/2]
n_index[0, 1, :] = [-(M-1)/2, ... 0, (M-1)/2]
...
output (batchsize=1, signal_length, filter_order)
output[0, 0, :] = hamming_window
output[0, 1, :] = hamming_window
...
"""
# Hamming window
return 0.54 + 0.46 * torch.cos(2 * np.pi * n_index / self.order)
def sinc(self, x):
""" Normalized sinc-filter sin( pi * x) / pi * x
https://en.wikipedia.org/wiki/Sinc_function
Assume x (batchsize, signal_length, filter_order) and
x[0, 0, :] = [-half_order, - half_order+1, ... 0, ..., half_order]
x[:, :, self.half_order] -> time index = 0, sinc(0)=1
"""
y = torch.zeros_like(x)
y[:,:,0:self.half_k]=torch.sin(np.pi * x[:, :, 0:self.half_k]) \
/ (np.pi * x[:, :, 0:self.half_k])
y[:,:,self.half_k+1:]=torch.sin(np.pi * x[:, :, self.half_k+1:]) \
/ (np.pi * x[:, :, self.half_k+1:])
y[:,:,self.half_k] = 1
return y
def forward(self, cut_f):
""" lp_coef, hp_coef = forward(self, cut_f)
cut-off frequency cut_f (batchsize=1, length, dim = 1)
lp_coef: low-pass filter coefs (batchsize, length, filter_order)
hp_coef: high-pass filter coefs (batchsize, length, filter_order)
"""
# create the filter order index
with torch.no_grad():
# [- (M-1) / 2, ..., 0, ..., (M-1)/2]
lp_coef = torch.arange(-self.half_k, self.half_k + 1,
device=cut_f.device)
# [[[- (M-1) / 2, ..., 0, ..., (M-1)/2],
# [- (M-1) / 2, ..., 0, ..., (M-1)/2],
# ...
# ],
# [[- (M-1) / 2, ..., 0, ..., (M-1)/2],
# [- (M-1) / 2, ..., 0, ..., (M-1)/2],
# ...
# ]]
lp_coef = lp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1)
hp_coef = torch.arange(-self.half_k, self.half_k + 1,
device=cut_f.device)
hp_coef = hp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1)
# temporary buffer of [-1^n] for gain norm in hp_coef
tmp_one = torch.pow(-1, hp_coef)
# unnormalized filter coefs with hamming window
lp_coef = cut_f * self.sinc(cut_f * lp_coef) \
* self.hamming_w(lp_coef)
hp_coef = (self.sinc(hp_coef) \
- cut_f * self.sinc(cut_f * hp_coef)) \
* self.hamming_w(hp_coef)
# normalize the coef to make gain at 0/pi is 0 dB
# sum_n lp_coef[n]
lp_coef_norm = torch.sum(lp_coef, axis=2).unsqueeze(-1)
# sum_n hp_coef[n] * -1^n
hp_coef_norm = torch.sum(hp_coef * tmp_one, axis=2).unsqueeze(-1)
lp_coef = lp_coef / lp_coef_norm
hp_coef = hp_coef / hp_coef_norm
# return normed coef
return lp_coef, hp_coef
#
# Up sampling
class UpSampleLayer(torch_nn.Module):
""" Wrapper over up-sampling
Input tensor: (batchsize=1, length, dim)
Ouput tensor: (batchsize=1, length * up-sampling_factor, dim)
"""
def __init__(self, feature_dim, up_sampling_factor, smoothing=False):
super(UpSampleLayer, self).__init__()
# wrap a up_sampling layer
self.scale_factor = up_sampling_factor
self.l_upsamp = torch_nn.Upsample(scale_factor=self.scale_factor)
if smoothing:
self.l_ave1 = MovingAverage(feature_dim, self.scale_factor)
self.l_ave2 = MovingAverage(feature_dim, self.scale_factor)
else:
self.l_ave1 = torch_nn.Identity()
self.l_ave2 = torch_nn.Identity()
return
def forward(self, x):
# permute to (batchsize=1, dim, length)
up_sampled_data = self.l_upsamp(x.permute(0, 2, 1))
# permute it backt to (batchsize=1, length, dim)
# and do two moving average
return self.l_ave1(self.l_ave2(up_sampled_data.permute(0, 2, 1)))
# Neural filter block (1 block)
class NeuralFilterBlock(torch_nn.Module):
""" Wrapper over a single filter block
"""
def __init__(self, signal_size, hidden_size,\
kernel_size=3, conv_num=10):
super(NeuralFilterBlock, self).__init__()
self.signal_size = signal_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.conv_num = conv_num
self.dilation_s = [np.power(2, x) for x in np.arange(conv_num)]
# ff layer to expand dimension
self.l_ff_1 = torch_nn.Linear(signal_size, hidden_size, \
bias=False)
self.l_ff_1_tanh = torch_nn.Tanh()
# dilated conv layers
tmp = [Conv1dKeepLength(hidden_size, hidden_size, x, \
kernel_size, causal=True, bias=False) \
for x in self.dilation_s]
self.l_convs = torch_nn.ModuleList(tmp)
# ff layer to de-expand dimension
self.l_ff_2 = torch_nn.Linear(hidden_size, hidden_size//4, \
bias=False)
self.l_ff_2_tanh = torch_nn.Tanh()
self.l_ff_3 = torch_nn.Linear(hidden_size//4, signal_size, \
bias=False)
self.l_ff_3_tanh = torch_nn.Tanh()
# a simple scale
self.scale = torch_nn.Parameter(torch.tensor([1/len(self.l_convs)]),
requires_grad=False)
return
def forward(self, signal, context):
"""
Assume: signal (batchsize=1, length, signal_size)
context (batchsize=1, length, hidden_size)
Output: (batchsize=1, length, signal_size)
"""
# expand dimension
tmp_hidden = self.l_ff_1_tanh(self.l_ff_1(signal))
# loop over dilated convs
# output of a d-conv is input + context + d-conv(input)
for l_conv in self.l_convs:
tmp_hidden = tmp_hidden + l_conv(tmp_hidden) + context
# to be consistent with legacy configuration in CURRENNT
tmp_hidden = tmp_hidden * self.scale
# compress the dimesion and skip-add
tmp_hidden = self.l_ff_2_tanh(self.l_ff_2(tmp_hidden))
tmp_hidden = self.l_ff_3_tanh(self.l_ff_3(tmp_hidden))
output_signal = tmp_hidden + signal
return output_signal
#
# Sine waveform generator
#
# Sine waveform generator
class SineGen(torch_nn.Module):
""" Definition of sine generator
SineGen(samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False)
samp_rate: sampling rate in Hz
harmonic_num: number of harmonic overtones (default 0)
sine_amp: amplitude of sine-wavefrom (default 0.1)
noise_std: std of Gaussian noise (default 0.003)
voiced_thoreshold: F0 threshold for U/V classification (default 0)
flag_for_pulse: this SinGen is used inside PulseGen (default False)
Note: when flag_for_pulse is True, the first time step of a voiced
segment is always sin(np.pi) or cos(0)
"""
def __init__(self, samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False):
super(SineGen, self).__init__()
self.sine_amp = sine_amp
self.noise_std = noise_std
self.harmonic_num = harmonic_num
self.dim = self.harmonic_num + 1
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
self.flag_for_pulse = flag_for_pulse
def _f02uv(self, f0):
# generate uv signal
uv = torch.ones_like(f0)
uv = uv * (f0 > self.voiced_threshold)
return uv
def _f02sine(self, f0_values):
""" f0_values: (batchsize, length, dim)
where dim indicates fundamental tone and overtones
"""
# convert to F0 in rad. The interger part n can be ignored
# because 2 * np.pi * n doesn't affect phase
rad_values = (f0_values / self.sampling_rate) % 1
# initial phase noise (no noise for fundamental component)
rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2],\
device = f0_values.device)
rand_ini[:, 0] = 0
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
if not self.flag_for_pulse:
# for normal case
# To prevent torch.cumsum numerical overflow,
# it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
# Buffer tmp_over_one_idx indicates the time step to add -1.
# This will not change F0 of sine because (x-1) * 2*pi = x *2*pi
tmp_over_one = torch.cumsum(rad_values, 1) % 1
tmp_over_one_idx = (tmp_over_one[:, 1:, :] -
tmp_over_one[:, :-1, :]) < 0
cumsum_shift = torch.zeros_like(rad_values)
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1)
* 2 * np.pi)
else:
# If necessary, make sure that the first time step of every
# voiced segments is sin(pi) or cos(0)
# This is used for pulse-train generation
# identify the last time step in unvoiced segments
uv = self._f02uv(f0_values)
uv_1 = torch.roll(uv, shifts=-1, dims=1)
uv_1[:, -1, :] = 1
u_loc = (uv < 1) * (uv_1 > 0)
# get the instantanouse phase
tmp_cumsum = torch.cumsum(rad_values, dim=1)
# different batch needs to be processed differently
for idx in range(f0_values.shape[0]):
temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
# stores the accumulation of i.phase within
# each voiced segments
tmp_cumsum[idx, :, :] = 0
tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
# rad_values - tmp_cumsum: remove the accumulation of i.phase
# within the previous voiced segment.
i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
# get the sines
sines = torch.cos(i_phase * 2 * np.pi)
return sines
def forward(self, f0):
""" sine_tensor, uv = forward(f0)
input F0: tensor(batchsize=1, length, dim=1)
f0 for unvoiced steps should be 0
output sine_tensor: tensor(batchsize=1, length, dim)
output uv: tensor(batchsize=1, length, 1)
"""
with torch.no_grad():
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \
device=f0.device)
# fundamental component
f0_buf[:, :, 0] = f0[:, :, 0]
for idx in np.arange(self.harmonic_num):
# idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
f0_buf[:, :, idx+1] = f0_buf[:, :, 0] * (idx+2)
# generate sine waveforms
sine_waves = self._f02sine(f0_buf) * self.sine_amp
# generate uv signal
#uv = torch.ones(f0.shape)
#uv = uv * (f0 > self.voiced_threshold)
uv = self._f02uv(f0)
# noise: for unvoiced should be similar to sine_amp
# std = self.sine_amp/3 -> max value ~ self.sine_amp
#. for voiced regions is self.noise_std
noise_amp = uv * self.noise_std + (1-uv) * self.sine_amp / 3
noise = noise_amp * torch.randn_like(sine_waves)
# first: set the unvoiced part to 0 by uv
# then: additive noise
sine_waves = sine_waves * uv + noise
return sine_waves, uv, noise
#####
## Model definition
##
## For condition module only provide Spectral feature to Filter block
class CondModuleHnSincNSF(torch_nn.Module):
""" Condition module for hn-sinc-NSF
Upsample and transform input features
CondModuleHnSincNSF(input_dimension, output_dimension, up_sample_rate,
blstm_dimension = 64, cnn_kernel_size = 3)
Spec, F0, cut_off_freq = CondModuleHnSincNSF(features, F0)
Both input features should be frame-level features
If x doesn't contain F0, just ignore the returned F0
CondModuleHnSincNSF(input_dim, output_dim, up_sample,
blstm_s = 64, cnn_kernel_s = 3,
voiced_threshold = 0):
input_dim: sum of dimensions of input features
output_dim: dim of the feature Spec to be used by neural filter-block
up_sample: up sampling rate of input features
blstm_s: dimension of the features from blstm (default 64)
cnn_kernel_s: kernel size of CNN in condition module (default 3)
voiced_threshold: f0 > voiced_threshold is voiced, otherwise unvoiced
"""
def __init__(self, input_dim, output_dim, up_sample, \
blstm_s = 64, cnn_kernel_s = 3, voiced_threshold = 0):
super(CondModuleHnSincNSF, self).__init__()
# input feature dimension
self.input_dim = input_dim
self.output_dim = output_dim
self.up_sample = up_sample
self.blstm_s = blstm_s
self.cnn_kernel_s = cnn_kernel_s
self.cut_f_smooth = up_sample * 4
self.voiced_threshold = voiced_threshold
# the blstm layer
self.l_blstm = BLSTMLayer(input_dim, self.blstm_s)
# the CNN layer (+1 dim for cut_off_frequence of sinc filter)
self.l_conv1d = Conv1dKeepLength(self.blstm_s, \
self.output_dim, \
dilation_s = 1, \
kernel_s = self.cnn_kernel_s)
# Upsampling layer for hidden features
self.l_upsamp = UpSampleLayer(self.output_dim, \
self.up_sample, True)
# separate layer for up-sampling normalized F0 values
self.l_upsamp_f0_hi = UpSampleLayer(1, self.up_sample, True)
# Upsampling for F0: don't smooth up-sampled F0
self.l_upsamp_F0 = UpSampleLayer(1, self.up_sample, False)
# Another smoothing layer to smooth the cut-off frequency
# for sinc filters. Use a larger window to smooth
self.l_cut_f_smooth = MovingAverage(1, self.cut_f_smooth)
def get_cut_f(self, hidden_feat, f0):
""" cut_f = get_cut_f(self, feature, f0)
feature: (batchsize, length, dim=1)
f0: (batchsize, length, dim=1)
"""
# generate uv signal
uv = torch.ones_like(f0) * (f0 > self.voiced_threshold)
# hidden_feat is between (-1, 1) after conv1d with tanh
# (-0.2, 0.2) + 0.3 = (0.1, 0.5)
# voiced: (0.1, 0.5) + 0.4 = (0.5, 0.9)
# unvoiced: (0.1, 0.5) = (0.1, 0.5)
return hidden_feat * 0.2 + uv * 0.4 + 0.3
def forward(self, feature, f0):
""" spec, f0 = forward(self, feature, f0)
feature: (batchsize, length, dim)
f0: (batchsize, length, dim=1), which should be F0 at frame-level
spec: (batchsize, length, self.output_dim), at wave-level
f0: (batchsize, length, 1), at wave-level
"""
tmp = self.l_upsamp(self.l_conv1d(self.l_blstm(feature)))
# concatenat normed F0 with hidden spectral features
context = torch.cat((tmp[:, :, 0:self.output_dim-1], \
self.l_upsamp_f0_hi(feature[:, :, -1:])), \
dim=2)
# hidden feature for cut-off frequency
hidden_cut_f = tmp[:, :, self.output_dim-1:]
# directly up-sample F0 without smoothing
f0_upsamp = self.l_upsamp_F0(f0)
# get the cut-off-frequency from output of CNN
cut_f = self.get_cut_f(hidden_cut_f, f0_upsamp)
# smooth the cut-off-frequency using fixed average smoothing
cut_f_smoothed = self.l_cut_f_smooth(cut_f)
# return
return context, f0_upsamp, cut_f_smoothed, hidden_cut_f
# For source module
class SourceModuleHnNSF(torch_nn.Module):
""" SourceModule for hn-nsf
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0)
sampling_rate: sampling_rate in Hz
harmonic_num: number of harmonic above F0 (default: 0)
sine_amp: amplitude of sine source signal (default: 0.1)
add_noise_std: std of additive Gaussian noise (default: 0.003)
note that amplitude of noise in unvoiced is decided
by sine_amp
voiced_threshold: threhold to set U/V given F0 (default: 0)
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
uv (batchsize, length, 1)
"""
def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0):
super(SourceModuleHnNSF, self).__init__()
self.sine_amp = sine_amp
self.noise_std = add_noise_std
# to produce sine waveforms
self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
sine_amp, add_noise_std, voiced_threshod)
# to merge source harmonics into a single excitation
self.l_linear = torch_nn.Linear(harmonic_num+1, 1)
self.l_tanh = torch_nn.Tanh()
def forward(self, x):
"""
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
"""
# source for harmonic branch
sine_wavs, uv, _ = self.l_sin_gen(x)
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
# source for noise branch, in the same shape as uv
noise = torch.randn_like(uv) * self.sine_amp / 3
return sine_merge, noise, uv
# For Filter module
class FilterModuleHnSincNSF(torch_nn.Module):
""" Filter for Hn-sinc-NSF
FilterModuleHnSincNSF(signal_size, hidden_size, sinc_order = 31,
block_num = 5, kernel_size = 3,
conv_num_in_block = 10)
signal_size: signal dimension (should be 1)
hidden_size: dimension of hidden features inside neural filter block
sinc_order: order of the sinc filter
block_num: number of neural filter blocks in harmonic branch
kernel_size: kernel size in dilated CNN
conv_num_in_block: number of d-conv1d in one neural filter block
Usage:
output = FilterModuleHnSincNSF(har_source, noi_source, cut_f, context)
har_source: source for harmonic branch (batchsize, length, dim=1)
noi_source: source for noise branch (batchsize, length, dim=1)
cut_f: cut-off-frequency of sinc filters (batchsize, length, dim=1)
context: hidden features to be added (batchsize, length, dim)
output: (batchsize, length, dim=1)
"""
def __init__(self, signal_size, hidden_size, sinc_order = 31, \
block_num = 5, kernel_size = 3, conv_num_in_block = 10):
super(FilterModuleHnSincNSF, self).__init__()
self.signal_size = signal_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.block_num = block_num
self.conv_num_in_block = conv_num_in_block
self.sinc_order = sinc_order
# filter blocks for harmonic branch
tmp = [NeuralFilterBlock(signal_size, hidden_size, \
kernel_size, conv_num_in_block) \
for x in range(self.block_num)]
self.l_har_blocks = torch_nn.ModuleList(tmp)
# filter blocks for noise branch (only one block, 5 sub-blocks)
tmp = [NeuralFilterBlock(signal_size, hidden_size, \
kernel_size, conv_num_in_block // 2) \
for x in range(1)]
self.l_noi_blocks = torch_nn.ModuleList(tmp)
# sinc filter generators and time-variant filtering layer
self.l_sinc_coef = SincFilter(self.sinc_order)
self.l_tv_filtering = TimeVarFIRFilter()
# done
def forward(self, har_component, noi_component, cond_feat, cut_f):
"""
"""
# harmonic component
for l_har_block in self.l_har_blocks:
har_component = l_har_block(har_component, cond_feat)
# noise componebt
for l_noi_block in self.l_noi_blocks:
noi_component = l_noi_block(noi_component, cond_feat)
# get sinc filter coefficients
lp_coef, hp_coef = self.l_sinc_coef(cut_f)
# time-variant filtering
har_signal = self.l_tv_filtering(har_component, lp_coef)
noi_signal = self.l_tv_filtering(noi_component, hp_coef)
# get output
return har_signal + noi_signal
class ModelGenerator(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(ModelGenerator, self).__init__()
########## basic config ########
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
self.input_dim = in_dim
self.output_dim = out_dim
###############################
# configurations
# amplitude of sine waveform (for each harmonic)
self.sine_amp = 0.1
# standard deviation of Gaussian noise for additive noise
self.noise_std = 0.003
# dimension of hidden features in filter blocks
self.hidden_dim = 64
# upsampling rate on input acoustic features (16kHz * 5ms = 80)
# assume input_reso has the same value
self.upsamp_rate = prj_conf.input_reso[0]
# sampling rate (Hz)
self.sampling_rate = prj_conf.wav_samp_rate
# CNN kernel size in filter blocks
self.cnn_kernel_s = 3
# number of filter blocks (for harmonic branch)
# noise branch only uses 1 block
self.filter_block_num = 5
# number of dilated CNN in each filter block
self.cnn_num_in_block = 10
# number of harmonic overtones in source
self.harmonic_num = 7
# order of sinc-windowed-FIR-filter
self.sinc_order = 31
# the three modules
self.m_cond = CondModuleHnSincNSF(self.input_dim, \
self.hidden_dim, \
self.upsamp_rate, \
cnn_kernel_s=self.cnn_kernel_s)
self.m_source = SourceModuleHnNSF(self.sampling_rate,
self.harmonic_num,
self.sine_amp, self.noise_std)
self.m_filter = FilterModuleHnSincNSF(self.output_dim, \
self.hidden_dim, \
self.sinc_order, \
self.filter_block_num, \
self.cnn_kernel_s, \
self.cnn_num_in_block)
# loss function on spectra
self.m_aux_loss = LossAuxGen()
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def forward(self, x):
""" definition of forward method
Assume x (batchsize=1, length, dim)
Return output(batchsize=1, length)
"""
# assume x[:, :, -1] is F0, denormalize F0
f0 = x[:, :, -1:]
# normalize the input features data
feat = self.normalize_input(x)
# condition module
# feature-to-filter-block, f0-up-sampled, cut-off-f-for-sinc,
# hidden-feature-for-cut-off-f
cond_feat, f0_upsamped, cut_f, hid_cut_f = self.m_cond(feat, f0)
# source module
# harmonic-source, noise-source (for noise branch), uv
har_source, noi_source, uv = self.m_source(f0_upsamped)
# neural filter module (including sinc-based FIR filtering)
# output
output = self.m_filter(har_source, noi_source, cond_feat, cut_f)
return output
def loss_aux(self, nat_wav, gen_tuple, data_in):
return self.m_aux_loss.compute(gen_tuple, nat_wav)
#########
## Model Discriminator definition
#########
def get_padding(kernel_size, dilation=1):
"""Function to compute the padding length for CNN layers
"""
# L_out = (L_in + 2*pad - dila * (ker - 1) - 1) // stride + 1
# stride -> 1
# L_out = L_in + 2*pad - dila * (ker - 1)
# L_out == L_in ->
# 2 * pad = dila * (ker - 1)
return int((kernel_size*dilation - dilation)/2)
class DiscriminatorP(torch_nn.Module):
def __init__(self, period,
kernel_size=5, stride=3, use_spectral_norm=False):
super(DiscriminatorP, self).__init__()
self.leaky_relu_slope = 0.1
self.period = period
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = torch_nn.ModuleList([
norm_f(
torch_nn.Conv2d(1, 32, (kernel_size, 1), (stride, 1),
padding=(get_padding(5, 1), 0))),
norm_f(
torch_nn.Conv2d(32, 128, (kernel_size, 1), (stride, 1),
padding=(get_padding(5, 1), 0))),
norm_f(
torch_nn.Conv2d(128, 512, (kernel_size, 1), (stride, 1),
padding=(get_padding(5, 1), 0))),
norm_f(
torch_nn.Conv2d(512, 1024, (kernel_size, 1), (stride, 1),
padding=(get_padding(5, 1), 0))),
norm_f(
torch_nn.Conv2d(1024, 1024, (kernel_size, 1), 1,
padding=(2, 0))),
])
self.conv_post = norm_f(
torch_nn.Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
return
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = torch_nn_func.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = torch_nn_func.leaky_relu(x, self.leaky_relu_slope)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiPeriodDiscriminator(torch_nn.Module):
def __init__(self):
super(MultiPeriodDiscriminator, self).__init__()
self.discriminators = torch_nn.ModuleList([
DiscriminatorP(2),
DiscriminatorP(3),
DiscriminatorP(5),
DiscriminatorP(7),
DiscriminatorP(11),
])
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class DiscriminatorS(torch_nn.Module):
def __init__(self, use_spectral_norm=False):
super(DiscriminatorS, self).__init__()
self.leaky_relu_slope = 0.1
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = torch_nn.ModuleList([
norm_f(
torch_nn.Conv1d(1, 128, 15, 1, padding=7)),
norm_f(
torch_nn.Conv1d(128, 128, 41, 2, groups=4, padding=20)),
norm_f(
torch_nn.Conv1d(128, 256, 41, 2, groups=16, padding=20)),
norm_f(
torch_nn.Conv1d(256, 512, 41, 4, groups=16, padding=20)),
norm_f(
torch_nn.Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
norm_f(
torch_nn.Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
norm_f(
torch_nn.Conv1d(1024, 1024, 5, 1, padding=2)),
])
self.conv_post = norm_f(torch_nn.Conv1d(1024, 1, 3, 1, padding=1))
return
def forward(self, x):
fmap = []
for l in self.convs:
x = l(x)
x = torch_nn_func.leaky_relu(x, self.leaky_relu_slope)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiScaleDiscriminator(torch_nn.Module):
def __init__(self):
super(MultiScaleDiscriminator, self).__init__()
self.discriminators = torch_nn.ModuleList([
DiscriminatorS(use_spectral_norm=True),
DiscriminatorS(),
DiscriminatorS(),
])
self.meanpools = torch_nn.ModuleList([
torch_nn.AvgPool1d(4, 2, padding=2),
torch_nn.AvgPool1d(4, 2, padding=2)
])
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
if i != 0:
y = self.meanpools[i-1](y)
y_hat = self.meanpools[i-1](y_hat)
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class ModelDiscriminator(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(ModelDiscriminator, self).__init__()
self.m_mpd = MultiPeriodDiscriminator()
self.m_msd = MultiScaleDiscriminator()
# done
return
def _feature_loss(self, fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
return loss*2
def _discriminator_loss(self, disc_real_outputs, disc_generated_outputs):
loss = 0
r_losses = []
g_losses = []
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1-dr)**2)
g_loss = torch.mean(dg**2)
loss += (r_loss + g_loss)
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return loss, r_losses, g_losses
def _generator_loss(self, disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
l = torch.mean((1-dg)**2)
gen_losses.append(l)
loss += l
return loss, gen_losses
def loss_for_D(self, nat_wav, gen_wav_detached, input_feat):
# gen_wav has been detached
nat_wav_tmp = nat_wav.permute(0, 2, 1)
gen_wav_tmp = gen_wav_detached.permute(0, 2, 1)
# MPD
y_df_hat_r, y_df_hat_g, _, _ = self.m_mpd(nat_wav_tmp, gen_wav_tmp)
loss_disc_f, _, _ = self._discriminator_loss(y_df_hat_r, y_df_hat_g)
# MSD
y_ds_hat_r, y_ds_hat_g, _, _ = self.m_msd(nat_wav_tmp, gen_wav_tmp)
loss_disc_s, _, _ = self._discriminator_loss(y_ds_hat_r, y_ds_hat_g)
return loss_disc_f + loss_disc_s
def loss_for_G(self, nat_wav, gen_wav, input_feat):
nat_wav_tmp = nat_wav.permute(0, 2, 1)
gen_wav_tmp = gen_wav.permute(0, 2, 1)
# MPD
y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = self.m_mpd(nat_wav_tmp,
gen_wav_tmp)
# MSD
y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = self.m_msd(nat_wav_tmp,
gen_wav_tmp)
loss_fm_f = self._feature_loss(fmap_f_r, fmap_f_g)
loss_fm_s = self._feature_loss(fmap_s_r, fmap_s_g)
loss_gen_f, _ = self._generator_loss(y_df_hat_g)
loss_gen_s, _ = self._generator_loss(y_ds_hat_g)
return loss_fm_f + loss_fm_s + loss_gen_f + loss_gen_s
if __name__ == "__main__":
print("Definition of model")
| 48,552 | 37.170597 | 77 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-9/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper.
Dataset API is replaced with NII_MergeDataSetLoader.
It is more convenient to train model on corpora stored in different directories.
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
in_trans_fns = prj_conf.test_input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.test_output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 7,819 | 35.886792 | 80 | py |
Subsets and Splits