repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
baconian-project | baconian-project-master/baconian/algo/dynamics/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/algo/dynamics/gaussian_process_dynamiocs_model.py | from baconian.core.core import EnvSpec
from baconian.algo.dynamics.dynamics_model import TrainableDyanmicsModel, LocalDyanmicsModel
import gpflow
import numpy as np
from baconian.common.sampler.sample_data import TransitionData
from baconian.algo.dynamics.third_party.mgpr import MGPR
import tensorflow as tf
from baconian.tf.util import *
from baconian.tf.tf_parameters import ParametersWithTensorflowVariable
from baconian.common.data_pre_processing import DataScaler
class GaussianProcessDyanmicsModel(LocalDyanmicsModel, TrainableDyanmicsModel):
kernel_type_dict = {
'RBF': (gpflow.kernels.RBF, dict(ARD=True))
}
"""
Dynamics approximated by multivariate gaussian process model based GPflow package.
Mostly refer the implementation of PILCO repo in https://github.com/nrontsis/PILCO
"""
def __init__(self, env_spec: EnvSpec, batch_data, init_state=None,
name_scope='gp_dynamics_model', name='gp_dynamics_model',
gp_kernel_type='RBF'):
if gp_kernel_type not in self.kernel_type_dict.keys():
raise TypeError(
'Not supported {} kernel, choose from {}'.format(gp_kernel_type, list(self.kernel_type_dict.keys())))
parameters = ParametersWithTensorflowVariable(tf_var_list=[],
rest_parameters=dict(),
name='{}_param'.format(name),
require_snapshot=False)
super().__init__(env_spec=env_spec, parameters=parameters, init_state=init_state, name=name)
self.name_scope = name_scope
state_action_data = np.hstack((batch_data.state_set, batch_data.action_set))
delta_state_data = batch_data.new_state_set - batch_data.state_set
with tf.variable_scope(self.name_scope):
self.mgpr_model = MGPR(name='mgpr', action_dim=env_spec.flat_action_dim,
x=state_action_data, y=delta_state_data,
state_dim=env_spec.flat_obs_dim)
var_list = get_tf_collection_var_list(key=tf.GraphKeys.GLOBAL_VARIABLES,
scope=self.name_scope)
self.parameters.set_tf_var_list(tf_var_list=sorted(list(set(var_list)), key=lambda x: x.name))
def init(self):
super().init()
def _state_transit(self, state, action, required_var=False, **kwargs):
deltas, vars = self.mgpr_model.predict(x=np.expand_dims(np.concatenate([state, action], axis=0), axis=0))
if required_var is True:
return np.squeeze(deltas) + state, np.squeeze(vars)
else:
return np.squeeze(deltas) + state
def copy_from(self, obj) -> bool:
raise NotImplementedError
def make_copy(self):
raise NotImplementedError
def train(self, restart=1, batch_data: TransitionData = None, *kwargs):
if batch_data:
state_action_data = np.hstack((batch_data.state_set, batch_data.action_set))
delta_state_data = batch_data.new_state_set - batch_data.state_set
self.mgpr_model.set_XY(X=state_action_data, Y=delta_state_data)
self.mgpr_model.optimize(restarts=restart)
| 3,282 | 48 | 117 | py |
baconian-project | baconian-project-master/baconian/algo/dynamics/third_party/mgpr.py | """
Code from https://github.com/nrontsis/PILCO
"""
import tensorflow as tf
import gpflow
import numpy as np
from typeguard import typechecked
float_type = gpflow.settings.dtypes.float_type
def randomize(model):
mean = 1
sigma = 0.01
model.kern.lengthscales.assign(
mean + sigma * np.random.normal(size=model.kern.lengthscales.shape))
model.kern.variance.assign(
mean + sigma * np.random.normal(size=model.kern.variance.shape))
if model.likelihood.variance.trainable:
model.likelihood.variance.assign(
mean + sigma * np.random.normal())
class MGPR(gpflow.Parameterized):
def __init__(self, action_dim, state_dim, x, y, name=None):
super(MGPR, self).__init__(name)
self.num_outputs = state_dim
self.num_dims = action_dim + state_dim
self.optimizers = []
self.models = []
self._create_models(X=x, Y=y)
def _create_models(self, X: np.ndarray, Y: np.ndarray):
for i in range(self.num_outputs):
kern = gpflow.kernels.RBF(input_dim=X.shape[1], ARD=True)
kern.lengthscales.prior = gpflow.priors.Gamma(1, 10) # priors have to be included before
kern.variance.prior = gpflow.priors.Gamma(1.5, 2) # before the model gets compiled
# TODO: Maybe fix noise for better conditioning
model = gpflow.models.GPR(X, Y[:, i:i + 1], kern, name="{}_{}".format(self.name, i))
model.clear()
model.compile()
self.models.append(model)
def set_XY(self, X: np.ndarray, Y: np.ndarray):
for i in range(len(self.models)):
self.models[i].X = X
self.models[i].Y = Y[:, i:i + 1]
def optimize(self, restarts=1):
if len(self.optimizers) == 0: # This is the first call to optimize();
for model in self.models:
# Create an gpflow.train.ScipyOptimizer object for every model embedded in mgpr
optimizer = gpflow.train.ScipyOptimizer(method='L-BFGS-B')
optimizer.minimize(model)
self.optimizers.append(optimizer)
restarts -= 1
for model, optimizer in zip(self.models, self.optimizers):
session = optimizer._model.enquire_session(None)
best_parameters = model.read_values(session=session)
best_likelihood = model.compute_log_likelihood()
for restart in range(restarts):
randomize(model)
optimizer._optimizer.minimize(session=session,
feed_dict=optimizer._gen_feed_dict(optimizer._model, None),
step_callback=None)
likelihood = model.compute_log_likelihood()
if likelihood > best_likelihood:
best_parameters = model.read_values(session=session)
best_likelihood = likelihood
model.assign(best_parameters)
def predict(self, x):
means = []
vars = []
for i in range(self.num_outputs):
mean, var = self.models[i].predict_f(x)
means.append(mean)
vars.append(var)
return np.array(means), np.array(vars)
def predict_on_noisy_inputs(self, m, s):
iK, beta = self.calculate_factorizations()
return self.predict_given_factorizations(m, s, iK, beta)
def calculate_factorizations(self):
K = self.K(self.X)
batched_eye = tf.eye(tf.shape(self.X)[0], batch_shape=[self.num_outputs], dtype=float_type)
L = tf.cholesky(K + self.noise[:, None, None] * batched_eye)
iK = tf.cholesky_solve(L, batched_eye)
Y_ = tf.transpose(self.Y)[:, :, None]
# Why do we transpose Y? Maybe we need to change the definition of self.Y() or beta?
beta = tf.cholesky_solve(L, Y_)[:, :, 0]
return iK, beta
def predict_given_factorizations(self, m, s, iK, beta):
"""
Approximate GP regression at noisy inputs via moment matching
IN: mean (m) (row vector) and (s) variance of the state
OUT: mean (M) (row vector), variance (S) of the action
and inv(s)*input-ouputcovariance
"""
s = tf.tile(s[None, None, :, :], [self.num_outputs, self.num_outputs, 1, 1])
inp = tf.tile(self.centralized_input(m)[None, :, :], [self.num_outputs, 1, 1])
# Calculate M and V: mean and inv(s) times input-output covariance
iL = tf.matrix_diag(1 / self.lengthscales)
iN = inp @ iL
B = iL @ s[0, ...] @ iL + tf.eye(self.num_dims, dtype=float_type)
# Redefine iN as in^T and t --> t^T
# B is symmetric so its the same
t = tf.linalg.transpose(
tf.matrix_solve(B, tf.linalg.transpose(iN), adjoint=True),
)
lb = tf.exp(-tf.reduce_sum(iN * t, -1) / 2) * beta
tiL = t @ iL
c = self.variance / tf.sqrt(tf.linalg.det(B))
M = (tf.reduce_sum(lb, -1) * c)[:, None]
V = tf.matmul(tiL, lb[:, :, None], adjoint_a=True)[..., 0] * c[:, None]
# Calculate S: Predictive Covariance
R = s @ tf.matrix_diag(
1 / tf.square(self.lengthscales[None, :, :]) +
1 / tf.square(self.lengthscales[:, None, :])
) + tf.eye(self.num_dims, dtype=float_type)
# TODO: change this block according to the PR of tensorflow. Maybe move it into a function?
X = inp[None, :, :, :] / tf.square(self.lengthscales[:, None, None, :])
X2 = -inp[:, None, :, :] / tf.square(self.lengthscales[None, :, None, :])
Q = tf.matrix_solve(R, s) / 2
Xs = tf.reduce_sum(X @ Q * X, -1)
X2s = tf.reduce_sum(X2 @ Q * X2, -1)
maha = -2 * tf.matmul(X @ Q, X2, adjoint_b=True) + \
Xs[:, :, :, None] + X2s[:, :, None, :]
#
k = tf.log(self.variance)[:, None] - \
tf.reduce_sum(tf.square(iN), -1) / 2
L = tf.exp(k[:, None, :, None] + k[None, :, None, :] + maha)
S = (tf.tile(beta[:, None, None, :], [1, self.num_outputs, 1, 1])
@ L @
tf.tile(beta[None, :, :, None], [self.num_outputs, 1, 1, 1])
)[:, :, 0, 0]
diagL = tf.transpose(tf.linalg.diag_part(tf.transpose(L)))
S = S - tf.diag(tf.reduce_sum(tf.multiply(iK, diagL), [1, 2]))
S = S / tf.sqrt(tf.linalg.det(R))
S = S + tf.diag(self.variance)
S = S - M @ tf.transpose(M)
return tf.transpose(M), S, tf.transpose(V)
def centralized_input(self, m):
return self.X - m
def K(self, X1, X2=None):
return tf.stack(
[model.kern.K(X1, X2) for model in self.models]
)
@property
def Y(self):
return tf.concat(
[model.Y.parameter_tensor for model in self.models],
axis=1
)
@property
def X(self):
return self.models[0].X.parameter_tensor
@property
def lengthscales(self):
return tf.stack(
[model.kern.lengthscales.constrained_tensor for model in self.models]
)
@property
def variance(self):
return tf.stack(
[model.kern.variance.constrained_tensor for model in self.models]
)
@property
def noise(self):
return tf.stack(
[model.likelihood.variance.constrained_tensor for model in self.models]
)
| 7,423 | 37.071795 | 105 | py |
baconian-project | baconian-project-master/baconian/algo/dynamics/third_party/gmm.py | """ This file defines a Gaussian mixture model class. """
import logging
import numpy as np
import scipy.linalg
from copy import deepcopy
LOGGER = logging.getLogger(__name__)
def logsum(vec, axis=0, keepdims=True):
# TODO: Add a docstring.
maxv = np.max(vec, axis=axis, keepdims=keepdims)
maxv[maxv == -float('inf')] = 0
return np.log(np.sum(np.exp(vec - maxv), axis=axis, keepdims=keepdims)) + maxv
class GMM(object):
""" Gaussian Mixture Model. """
def __init__(self, epsilon, init_sequential=False, eigreg=False, warmstart=True):
self.init_sequential = init_sequential
self.eigreg = eigreg
self.warmstart = warmstart
self.sigma = None
self.epsilon = epsilon
def inference(self, pts):
"""
Evaluate dynamics prior.
Args:
pts: A N x D array of points.
"""
# Compute posterior cluster weights.
logwts = self.clusterwts(pts)
# Compute posterior mean and covariance.
mu0, Phi = self.moments(logwts)
# Set hyperparameters.
m = self.N
n0 = m - 2 - mu0.shape[0]
# Normalize.
m = float(m) / self.N
n0 = float(n0) / self.N
return mu0, Phi, m, n0
def estep(self, data):
"""
Compute log observation probabilities under GMM.
Args:
data: A N x D array of points.
Returns:
logobs: A N x K array of log probabilities (for each point
on each cluster).
"""
# Constants.
N, D = data.shape
K = self.sigma.shape[0]
logobs = -0.5 * np.ones((N, K)) * D * np.log(2 * np.pi)
for i in range(K):
mu, sigma = self.mu[i], self.sigma[i]
L = scipy.linalg.cholesky(sigma, lower=True)
logobs[:, i] -= np.sum(np.log(np.diag(L)))
diff = (data - mu).T
soln = scipy.linalg.solve_triangular(L, diff, lower=True)
logobs[:, i] -= 0.5 * np.sum(soln ** 2, axis=0)
logobs += self.logmass.T
return logobs
def moments(self, logwts):
"""
Compute the moments of the cluster mixture with logwts.
Args:
logwts: A K x 1 array of log cluster probabilities.
Returns:
mu: A (D,) mean vector.
sigma: A D x D covariance matrix.
"""
# Exponentiate.
wts = np.exp(logwts)
# Compute overall mean.
mu = np.sum(self.mu * wts, axis=0)
# Compute overall covariance.
diff = self.mu - np.expand_dims(mu, axis=0)
diff_expand = np.expand_dims(self.mu, axis=1) * \
np.expand_dims(diff, axis=2)
wts_expand = np.expand_dims(wts, axis=2)
sigma = np.sum((self.sigma + diff_expand) * wts_expand, axis=0)
return mu, sigma
def clusterwts(self, data):
"""
Compute cluster weights for specified points under GMM.
Args:
data: An N x D array of points
Returns:
A K x 1 array of average cluster log probabilities.
"""
# Compute probability of each point under each cluster.
logobs = self.estep(data)
# Renormalize to get cluster weights.
logwts = logobs - logsum(logobs, axis=1)
# Average the cluster probabilities.
logwts = logsum(logwts, axis=0) - np.log(data.shape[0])
return logwts.T
def update(self, data, K, max_iterations=100):
"""
Run EM to update clusters.
Args:
data: An N x D data matrix, where N = number of data points.
K: Number of clusters to use.
"""
# Constants.
N = data.shape[0]
Do = data.shape[1]
LOGGER.debug('Fitting GMM with %d clusters on %d points', K, N)
if (not self.warmstart or self.sigma is None or
K != self.sigma.shape[0]):
# Initialization.
LOGGER.debug('Initializing GMM.')
self.sigma = np.zeros((K, Do, Do))
self.mu = np.zeros((K, Do))
self.logmass = np.log(1.0 / K) * np.ones((K, 1))
self.mass = (1.0 / K) * np.ones((K, 1))
self.N = data.shape[0]
N = self.N
# Set initial cluster indices.
if not self.init_sequential:
cidx = np.random.randint(0, K, size=(1, N))
else:
raise NotImplementedError()
# Initialize.
for i in range(K):
cluster_idx = (cidx == i)[0]
mu = np.mean(data[cluster_idx, :], axis=0) if data[cluster_idx, :].any() else self.mu[i]
diff = (data[cluster_idx, :] - mu).T
sigma = (1.0 / K) * (diff.dot(diff.T))
self.mu[i, :] = mu
self.sigma[i, :, :] = sigma + np.eye(Do) * 2e-6
prevll = -float('inf')
for itr in range(max_iterations):
# E-step: compute cluster probabilities.
logobs = self.estep(data)
# Compute log-likelihood.
ll = np.sum(logsum(logobs, axis=1))
LOGGER.debug('GMM itr %d/%d. Log likelihood: %f',
itr, max_iterations, ll)
if ll < prevll:
# TODO: Why does log-likelihood decrease sometimes?
LOGGER.debug('Log-likelihood decreased! Ending on itr=%d/%d',
itr, max_iterations)
break
if np.abs(ll - prevll) < 1e-5 * prevll:
LOGGER.debug('GMM converged on itr=%d/%d',
itr, max_iterations)
break
prevll = ll
# Renormalize to get cluster weights.
logw = logobs - logsum(logobs, axis=1)
assert logw.shape == (N, K)
# Renormalize again to get weights for refitting clusters.
logwn = logw - logsum(logw, axis=0)
assert logwn.shape == (N, K)
w = np.exp(logwn)
# M-step: update clusters.
# Fit cluster mass.
self.logmass = logsum(logw, axis=0).T
self.logmass = self.logmass - logsum(self.logmass, axis=0)
assert self.logmass.shape == (K, 1)
self.mass = np.exp(self.logmass)
# Reboot small clusters.
w[:, (self.mass < (1.0 / K) * 1e-4)[:, 0]] = 1.0 / N
# Fit cluster means.
w_expand = np.expand_dims(w, axis=2)
data_expand = np.expand_dims(data, axis=1)
self.mu = np.sum(w_expand * data_expand, axis=0)
# Fit covariances.
wdata = data_expand * np.sqrt(w_expand)
assert wdata.shape == (N, K, Do)
for i in range(K):
# Compute weighted outer product.
XX = wdata[:, i, :].T.dot(wdata[:, i, :])
mu = self.mu[i, :]
self.sigma[i, :, :] = XX - np.outer(mu, mu)
if self.eigreg: # Use eigenvalue regularization.
raise NotImplementedError()
else: # Use quick and dirty regularization.
sigma = self.sigma[i, :, :]
self.sigma[i, :, :] = 0.5 * (sigma + sigma.T) + \
1e-6 * np.eye(Do)
| 7,345 | 33.650943 | 104 | py |
baconian-project | baconian-project-master/baconian/algo/dynamics/third_party/__init__.py | # Date: 3/30/19
# Author: Luke
# Project: baconian-internal | 59 | 19 | 28 | py |
baconian-project | baconian-project-master/baconian/algo/dynamics/reward_func/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/algo/dynamics/reward_func/reward_func.py | from baconian.core.core import Basic
import abc
import numpy as np
class RewardFunc(Basic):
allow_duplicate_name = True
def __init__(self, name='reward_func'):
super().__init__(name=name)
@abc.abstractmethod
def __call__(self, state, action, new_state, **kwargs) -> float:
raise NotImplementedError
def init(self):
pass
class RandomRewardFunc(RewardFunc):
"""
Debug and test use only
"""
def __init__(self, name='random_reward_func'):
super().__init__(name)
def __call__(self, state=None, action=None, new_state=None, **kwargs) -> float:
return np.random.random()
class CostFunc(RewardFunc):
pass
class RewardFuncCostWrapper(CostFunc):
def __init__(self, reward_func: RewardFunc, name='reward_func'):
super().__init__(name)
self._reward_func = reward_func
def __call__(self, state, action, new_state, **kwargs) -> float:
return self._reward_func.__call__(state=state,
action=action,
new_state=new_state) * -1.0
def init(self):
self._reward_func.init()
class QuadraticCostFunc(CostFunc):
"""
A quadratic function
"""
def __init__(self, C, c, name='reward_func'):
"""
the cost is computed by 1/2[x_t, u_t].T * C * [x_t, u_t] + [x_t, u_t] * c
:param C: quadratic term
:param c: linear term
:param name:
"""
super().__init__(name)
self.C = np.array(C)
self.c = np.array(c)
self.state_action_flat_dim = self.C.shape[0]
assert self.state_action_flat_dim == self.C.shape[1]
assert len(self.C.shape) == 2
assert self.c.shape[0] == self.state_action_flat_dim
def __call__(self, state=None, action=None, new_state=None, **kwargs) -> float:
u_s = np.concatenate((np.array(state).reshape(-1), np.array(action).reshape(-1))).reshape(
self.state_action_flat_dim, 1)
res = 0.5 * np.dot(np.dot(u_s.T, self.C), u_s) + np.dot(u_s.T, self.c).reshape(())
return float(res)
| 2,146 | 27.25 | 98 | py |
baconian-project | baconian-project-master/baconian/algo/dynamics/terminal_func/terminal_func.py | from baconian.core.core import Basic
import abc
import numpy as np
class TerminalFunc(Basic):
allow_duplicate_name = True
def __init__(self, name='terminal_func'):
super().__init__(name=name)
@abc.abstractmethod
def __call__(self, state, action, new_state, **kwargs) -> bool:
raise NotImplementedError
def init(self):
pass
class RandomTerminalFunc(TerminalFunc):
"""
Debug and test use only
"""
def __init__(self, name='random_terminal_func'):
super().__init__(name)
def __call__(self, state=None, action=None, new_state=None, **kwargs) -> bool:
return np.random.random() > 0.5
class FixedEpisodeLengthTerminalFunc(Basic):
def __init__(self, max_step_length: int, step_count_fn, status=None,
name: str = 'fixed_epsiode_length_terminal_func'):
super().__init__(name, status)
self.max_step_length = max_step_length
self.step_count_fn = step_count_fn
def __call__(self, state=None, action=None, new_state=None, **kwargs) -> bool:
if self.step_count_fn() >= self.max_step_length:
return True
else:
return False
| 1,189 | 25.444444 | 82 | py |
baconian-project | baconian-project-master/baconian/algo/dynamics/terminal_func/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/algo/distribution/mvn.py | """
Module that compute diagonal multivariate normal distribution operation with tensorflow tensor as parameters
"""
import tensorflow as tf
import numpy as np
def kl(mean_p, var_p, mean_q, var_q, dims):
"""
Compute the KL divergence of diagonal multivariate normal distribution q, and p, which is KL(P||Q)
:param mean_p:
:param var_p:
:param mean_q:
:param var_q:
:param dims:
:return:
"""
# p is old
log_var_p = tf.log(var_p)
log_var_q = tf.log(var_q)
log_det_cov_p = tf.reduce_sum(log_var_p)
log_det_cov_q = tf.reduce_sum(log_var_q)
tr_p_q = tf.reduce_sum(tf.exp(log_var_p - log_var_q))
kl = 0.5 * tf.reduce_mean(
log_det_cov_q - log_det_cov_p + tr_p_q + tf.reduce_sum(tf.square(mean_q - mean_p) / tf.exp(log_det_cov_q),
axis=1) - dims)
return kl
def entropy(mean_p, var_p, dims):
return 0.5 * (dims * (np.log(2 * np.pi) + 1) + tf.reduce_sum(tf.log(var_p)))
def log_prob(variable_ph, mean_p, var_p):
log_prob = -0.5 * (tf.reduce_sum(tf.log(var_p)) + (np.log(2 * np.pi)))
log_prob += -0.5 * tf.reduce_sum(tf.square(variable_ph - mean_p) / var_p, axis=1)
return log_prob
def prob(variable_ph, mean_p, var_p):
return tf.log(log_prob(variable_ph, mean_p, var_p))
def sample(mean_p, var_p, dims):
return mean_p + tf.math.sqrt(var_p) * tf.random_normal(shape=(dims,))
| 1,439 | 29.638298 | 114 | py |
baconian-project | baconian-project-master/baconian/algo/distribution/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/baconian/algo/value_func/mlp_q_value.py | import typeguard as tg
from baconian.core.core import EnvSpec
import overrides
import tensorflow as tf
from baconian.tf.tf_parameters import ParametersWithTensorflowVariable
from baconian.tf.mlp import MLP
from baconian.common.special import *
from baconian.core.util import init_func_arg_record_decorator
from baconian.algo.utils import _get_copy_arg_with_tf_reuse
from baconian.algo.misc.placeholder_input import PlaceholderInput
from baconian.algo.value_func import QValueFunction
class MLPQValueFunction(QValueFunction, PlaceholderInput):
"""
Multi Layer Q Value Function, based on Tensorflow, take the state and action as input,
return the Q value for all action/ input action.
"""
@init_func_arg_record_decorator()
@tg.typechecked
def __init__(self,
env_spec: EnvSpec,
name: str,
name_scope: str,
mlp_config: list,
state_input: tf.Tensor = None,
action_input: tf.Tensor = None,
reuse=False,
input_norm: np.ndarray = None,
output_norm: np.ndarray = None,
output_low: np.ndarray = None,
output_high: np.ndarray = None,
):
with tf.name_scope(name_scope):
state_input = state_input if state_input is not None else tf.placeholder(
shape=[None, env_spec.flat_obs_dim],
dtype=tf.float32,
name='state_ph')
action_input = action_input if action_input is not None else tf.placeholder(
shape=[None, env_spec.flat_action_dim],
dtype=tf.float32,
name='action_ph')
with tf.variable_scope(name_scope):
mlp_input_ph = tf.concat([state_input, action_input], axis=1, name='state_action_input')
mlp_net_kwargs = dict(
reuse=reuse,
mlp_config=mlp_config,
input_norm=input_norm,
output_norm=output_norm,
output_high=output_high,
output_low=output_low,
name_scope=name_scope,
)
mlp_net = MLP(input_ph=mlp_input_ph,
net_name=name_scope,
**mlp_net_kwargs)
parameters = ParametersWithTensorflowVariable(tf_var_list=mlp_net.var_list,
rest_parameters=dict(
**mlp_net_kwargs,
name=name
),
default_save_type='tf',
name='{}_tf_param'.format(name))
QValueFunction.__init__(self,
env_spec=env_spec,
name=name,
action_input=action_input,
state_input=state_input,
parameters=None)
PlaceholderInput.__init__(self, parameters=parameters)
self.mlp_config = mlp_config
self.input_norm = input_norm
self.output_norm = output_norm
self.output_low = output_low
self.output_high = output_high
self.name_scope = name_scope
self.mlp_input_ph = mlp_input_ph
self.mlp_net = mlp_net
self.q_tensor = self.mlp_net.output
def copy_from(self, obj: PlaceholderInput) -> bool:
return PlaceholderInput.copy_from(self, obj)
def forward(self, obs: (np.ndarray, list), action: (np.ndarray, list), sess=None,
feed_dict=None, *args,
**kwargs):
sess = sess if sess else tf.get_default_session()
obs = make_batch(obs, original_shape=self.env_spec.obs_shape)
action = make_batch(action, original_shape=[self.env_spec.flat_action_dim])
feed_dict = {
self.state_input: obs,
self.action_input: action,
**self.parameters.return_tf_parameter_feed_dict()
} if feed_dict is None else {
**feed_dict,
**self.parameters.return_tf_parameter_feed_dict()
}
q = sess.run(self.q_tensor,
feed_dict=feed_dict)
return q
def init(self, source_obj=None):
self.parameters.init()
if source_obj:
self.copy_from(obj=source_obj)
def make_copy(self, *args, **kwargs):
kwargs = _get_copy_arg_with_tf_reuse(obj=self, kwargs=kwargs)
copy_mlp_q_value = MLPQValueFunction(env_spec=self.env_spec,
input_norm=self.input_norm,
output_norm=self.output_norm,
output_low=self.output_low,
output_high=self.output_high,
mlp_config=self.mlp_config,
**kwargs)
return copy_mlp_q_value
def save(self, *args, **kwargs):
return PlaceholderInput.save(self, *args, **kwargs)
def load(self, *args, **kwargs):
return PlaceholderInput.load(self, *args, **kwargs)
| 5,335 | 41.349206 | 100 | py |
baconian-project | baconian-project-master/baconian/algo/value_func/__init__.py | from .value_func import ValueFunction, QValueFunction, VValueFunction
from .mlp_q_value import MLPQValueFunction
from .mlp_v_value import MLPVValueFunc
| 152 | 37.25 | 69 | py |
baconian-project | baconian-project-master/baconian/algo/value_func/value_func.py | from baconian.core.core import Basic, EnvSpec
import typeguard as tg
from baconian.core.parameters import Parameters
import abc
class ValueFunction(Basic):
@tg.typechecked
def __init__(self, env_spec: EnvSpec, parameters: Parameters = None, name='value_func'):
super().__init__(name)
self.env_spec = env_spec
self.parameters = parameters
@property
def obs_space(self):
return self.env_spec.obs_space
@property
def action_space(self):
return self.env_spec.action_space
@tg.typechecked
def copy_from(self, obj) -> bool:
if not isinstance(obj, type(self)):
raise TypeError('Wrong type of obj %s to be copied, which should be %s' % (type(obj), type(self)))
return True
@abc.abstractmethod
def forward(self, *args, **kwargs):
raise NotImplementedError
@tg.typechecked
def update(self, param_update_args: dict, *args, **kwargs):
self.parameters.update(**param_update_args)
@abc.abstractmethod
def init(self, source_obj=None):
raise NotImplementedError
@abc.abstractmethod
def make_copy(self, *args, **kwargs):
raise NotImplementedError
class VValueFunction(ValueFunction):
def __init__(self, state_input, env_spec: EnvSpec, parameters: Parameters = None, name='value_func'):
self.state_input = state_input
super().__init__(env_spec, parameters, name)
class QValueFunction(ValueFunction):
def __init__(self, state_input, action_input, env_spec: EnvSpec, parameters: Parameters = None, name='value_func'):
self.state_input = state_input
self.action_input = action_input
super().__init__(env_spec, parameters, name)
| 1,733 | 29.421053 | 119 | py |
baconian-project | baconian-project-master/baconian/algo/value_func/mlp_v_value.py | import typeguard as tg
from baconian.core.core import EnvSpec
import overrides
import tensorflow as tf
from baconian.tf.tf_parameters import ParametersWithTensorflowVariable
from baconian.tf.mlp import MLP
from baconian.common.special import *
from baconian.algo.utils import _get_copy_arg_with_tf_reuse
from baconian.algo.misc.placeholder_input import PlaceholderInput
from baconian.algo.value_func import VValueFunction
class MLPVValueFunc(VValueFunction, PlaceholderInput):
"""
Multi Layer Q Value Function, based on Tensorflow, take the state and action as input,
return the Q value for all action/ input action.
"""
@tg.typechecked
def __init__(self,
env_spec: EnvSpec,
name_scope: str,
name: str,
mlp_config: list,
state_input: tf.Tensor = None,
reuse=False,
input_norm: np.ndarray = None,
output_norm: np.ndarray = None,
output_low: np.ndarray = None,
output_high: np.ndarray = None,
):
with tf.variable_scope(name_scope):
state_input = state_input if state_input is not None else tf.placeholder(
shape=[None, env_spec.flat_obs_dim],
dtype=tf.float32,
name='state_ph')
mlp_input_ph = state_input
mlp_kwargs = dict(
reuse=reuse,
mlp_config=mlp_config,
input_norm=input_norm,
output_norm=output_norm,
output_high=output_high,
output_low=output_low,
name_scope=name_scope
)
mlp_net = MLP(input_ph=mlp_input_ph,
net_name='mlp',
**mlp_kwargs)
parameters = ParametersWithTensorflowVariable(tf_var_list=mlp_net.var_list,
rest_parameters=mlp_kwargs,
name='mlp_v_value_function_tf_param')
VValueFunction.__init__(self,
env_spec=env_spec,
state_input=state_input,
name=name,
parameters=None)
PlaceholderInput.__init__(self, parameters=parameters)
self.name_scope = name_scope
self.mlp_config = mlp_config
self.input_norm = input_norm
self.output_norm = output_norm
self.output_low = output_low
self.output_high = output_high
self.state_input = state_input
self.mlp_input_ph = mlp_input_ph
self.mlp_net = mlp_net
self.v_tensor = self.mlp_net.output
def copy_from(self, obj: PlaceholderInput) -> bool:
return PlaceholderInput.copy_from(self, obj)
def forward(self, obs: (np.ndarray, list), sess=None,
feed_dict=None, *args,
**kwargs):
obs = make_batch(obs, original_shape=self.env_spec.obs_shape)
feed_dict = feed_dict if feed_dict is not None else dict()
sess = sess if sess else tf.get_default_session()
feed_dict = {
self.state_input: obs,
**feed_dict,
**self.parameters.return_tf_parameter_feed_dict()
}
v = sess.run(self.v_tensor,
feed_dict=feed_dict)
return v
def init(self, source_obj=None):
self.parameters.init()
if source_obj:
self.copy_from(obj=source_obj)
def make_copy(self, *args, **kwargs):
kwargs = _get_copy_arg_with_tf_reuse(obj=self, kwargs=kwargs)
copy_mlp_v_value = MLPVValueFunc(env_spec=self.env_spec,
input_norm=self.input_norm,
output_norm=self.output_norm,
output_low=self.output_low,
output_high=self.output_high,
mlp_config=self.mlp_config,
**kwargs)
return copy_mlp_v_value
def save(self, *args, **kwargs):
return PlaceholderInput.save(self, *args, **kwargs)
def load(self, *args, **kwargs):
return PlaceholderInput.load(self, *args, **kwargs)
| 4,350 | 37.848214 | 91 | py |
baconian-project | baconian-project-master/baconian/algo/misc/sample_processor.py | from baconian.common.sampler.sample_data import TransitionData, TrajectoryData
from baconian.algo.value_func import ValueFunction
import scipy.signal
from baconian.common.special import *
def discount(x, gamma):
"""code clip from pat-cody"""
""" Calculate discounted forward sum of a sequence at each point """
return scipy.signal.lfilter([1.0], [1.0, -gamma], x[::-1])[::-1]
class SampleProcessor(object):
@staticmethod
def add_gae(data: TrajectoryData, gamma, lam, value_func: ValueFunction = None, name='advantage_set'):
for traj in data.trajectories:
# scale if gamma less than 1
rewards = traj('reward_set') * (1 - gamma) if gamma < 0.999 else traj('reward_set')
try:
traj('v_value_set')
except ValueError:
if value_func is None:
raise ValueError('v_value_set did not existed, pass in value_func parameter to compute v_value_set')
SampleProcessor.add_estimated_v_value(
data=traj,
value_func=value_func
)
finally:
values = traj('v_value_set')
# todo better way to handle shape error (no squeeze)
tds = np.squeeze(rewards) - np.squeeze(values) + np.append(values[1:] * gamma, 0)
advantages = discount(tds, gamma * lam)
traj.append_new_set(name=name, data_set=make_batch(advantages, original_shape=[]), shape=[])
@staticmethod
def add_discount_sum_reward(data: TrajectoryData, gamma, name='discount_set'):
for traj in data.trajectories:
# scale if gamma less than 1
dis_set = traj('reward_set') * (1 - gamma) if gamma < 0.999 else traj('reward_set')
# TODO add a unit test
dis_reward_set = discount(np.reshape(dis_set, [-1, ]), gamma)
traj.append_new_set(name=name, data_set=make_batch(dis_reward_set, original_shape=[]), shape=[])
@staticmethod
def add_estimated_v_value(data: (TrajectoryData, TransitionData), value_func: ValueFunction, name='v_value_set'):
if isinstance(data, TrajectoryData):
for path in data.trajectories:
SampleProcessor._add_estimated_v_value(path, value_func, name)
else:
SampleProcessor._add_estimated_v_value(data, value_func, name)
@staticmethod
def _add_estimated_v_value(data: TransitionData, value_func: ValueFunction, name):
v_set = value_func.forward(data.state_set)
data.append_new_set(name=name, data_set=make_batch(np.array(v_set), original_shape=[]), shape=[])
@staticmethod
def normalization(data: (TransitionData, TrajectoryData), key, mean: np.ndarray = None, std_dev: np.ndarray = None):
if isinstance(data, TransitionData):
if mean is not None:
assert mean.shape == data(key).shape[1:]
assert std_dev.shape == data(key).shape[1:]
else:
mean = data(key).mean(axis=0)
std_dev = data(key).std(axis=0)
data.append_new_set(name=key, data_set=(data(key) - mean) / (std_dev + 1e-6), shape=data(key).shape[1:])
return data
elif isinstance(data, TrajectoryData):
# TODO add shape check
mean = np.mean(np.concatenate([d(key) for d in data.trajectories], axis=0), axis=0)
std_dev = np.std(np.concatenate([d(key) for d in data.trajectories], axis=0), axis=0)
for d in data.trajectories:
d.append_new_set(name=key, data_set=np.array((d(key) - mean) / (std_dev + 1e-6)),
shape=d(key).shape[1:])
return data
else:
raise TypeError('not supported sample data type')
| 3,808 | 47.21519 | 120 | py |
baconian-project | baconian-project-master/baconian/algo/misc/epsilon_greedy.py | from baconian.common.spaces.base import Space
import numpy as np
from typeguard import typechecked
from baconian.core.parameters import Parameters
from baconian.common.schedules import Scheduler
class ExplorationStrategy(object):
def __init__(self):
self.parameters = None
def predict(self, **kwargs):
raise NotImplementedError
class EpsilonGreedy(ExplorationStrategy):
@typechecked
def __init__(self, action_space: Space, init_random_prob: float, prob_scheduler: Scheduler = None):
super(ExplorationStrategy, self).__init__()
self.action_space = action_space
self.random_prob_func = lambda: init_random_prob
if prob_scheduler:
self.random_prob_func = prob_scheduler.value
self.parameters = Parameters(parameters=dict(random_prob_func=self.random_prob_func),
name='eps_greedy_params')
def predict(self, **kwargs):
if np.random.random() < self.parameters('random_prob_func')():
return self.action_space.sample()
else:
algo = kwargs.pop('algo')
return algo.predict(**kwargs)
| 1,158 | 32.114286 | 103 | py |
baconian-project | baconian-project-master/baconian/algo/misc/__init__.py | from .replay_buffer import BaseReplayBuffer, UniformRandomReplayBuffer
from .epsilon_greedy import ExplorationStrategy, EpsilonGreedy
from .placeholder_input import PlaceholderInput, MultiPlaceholderInput
from .sample_processor import SampleProcessor
| 251 | 49.4 | 70 | py |
baconian-project | baconian-project-master/baconian/algo/misc/replay_buffer.py | import numpy as np
from typeguard import typechecked
from baconian.common.sampler.sample_data import TransitionData, TrajectoryData, SampleData
from baconian.common.error import *
class RingBuffer(object):
@typechecked
def __init__(self, maxlen: int, shape: (list, tuple), dtype='float32'):
self.maxlen = maxlen
self.start = 0
self.length = 0
self.data = np.zeros((maxlen,) + shape).astype(dtype)
def __len__(self):
return self.length
def __getitem__(self, idx):
if idx < 0 or idx >= self.length:
raise KeyError()
return self.data[(self.start + idx) % self.maxlen]
def get_batch(self, idxs):
return self.data[(self.start + idxs) % self.maxlen]
def append(self, v):
if self.length < self.maxlen:
# We have space, simply increase the length.
self.length += 1
elif self.length == self.maxlen:
# No space, "remove" the first item.
self.start = (self.start + 1) % self.maxlen
else:
# This should never happen.
raise RuntimeError()
self.data[(self.start + self.length - 1) % self.maxlen] = v
def append_batch(self, v):
v = np.array(v)
if len(v.shape) == 1 or len(v.shape) == 0:
v = np.reshape(v, [-1, 1])
assert len(v.shape) == 2
for i in range(len(v)):
self.append(v[i])
def array_min2d(x):
x = np.array(x)
if x.ndim >= 2:
return x
return x.reshape(-1, 1)
class BaseReplayBuffer(object):
def __init__(self, limit, action_shape, observation_shape):
self.limit = limit
self.action_shape = action_shape
self.obs_shape = observation_shape
self.observations0 = RingBuffer(limit, shape=observation_shape)
self.actions = RingBuffer(limit, shape=action_shape)
self.rewards = RingBuffer(limit, shape=(1,))
self.terminals1 = RingBuffer(limit, shape=(1,))
self.observations1 = RingBuffer(limit, shape=observation_shape)
def sample(self, batch_size):
raise NotImplementedError
def append(self, obs0, obs1, action, reward, terminal1, training=True):
if not training:
return
self.observations0.append(obs0)
self.actions.append(action)
self.rewards.append(reward)
self.observations1.append(obs1)
self.terminals1.append(terminal1)
def append_batch(self, obs0, obs1, action, reward, terminal1, training=True):
if not training:
return
self.observations0.append_batch(obs0)
self.actions.append_batch(action)
self.rewards.append_batch(reward)
self.observations1.append_batch(obs1)
self.terminals1.append_batch(terminal1)
@property
def nb_entries(self):
return len(self.observations0)
def reset(self):
self.observations0 = RingBuffer(self.limit, shape=self.obs_shape)
self.actions = RingBuffer(self.limit, shape=self.action_shape)
self.rewards = RingBuffer(self.limit, shape=(1,))
self.terminals1 = RingBuffer(self.limit, shape=(1,))
self.observations1 = RingBuffer(self.limit, shape=self.obs_shape)
class UniformRandomReplayBuffer(BaseReplayBuffer):
def __init__(self, limit, action_shape, observation_shape):
super().__init__(limit, action_shape, observation_shape)
def sample(self, batch_size) -> SampleData:
if self.nb_entries < batch_size:
raise MemoryBufferLessThanBatchSizeError()
batch_idxs = np.random.randint(self.nb_entries - 2, size=batch_size)
pass
obs0_batch = self.observations0.get_batch(batch_idxs)
obs1_batch = self.observations1.get_batch(batch_idxs)
action_batch = self.actions.get_batch(batch_idxs)
reward_batch = self.rewards.get_batch(batch_idxs)
terminal1_batch = self.terminals1.get_batch(batch_idxs)
result = {
'obs0': array_min2d(obs0_batch),
'obs1': array_min2d(obs1_batch),
'rewards': array_min2d(reward_batch),
'actions': array_min2d(action_batch),
'terminals1': array_min2d(terminal1_batch),
}
res = TransitionData(obs_shape=self.obs_shape, action_shape=self.action_shape)
for obs0, obs1, action, terminal, re in zip(result['obs0'], result['obs1'], result['actions'],
result['terminals1'], result['rewards']):
res.append(state=obs0, new_state=obs1, action=action, done=terminal, reward=re)
return res
pass
class PrioritisedReplayBuffer(BaseReplayBuffer):
def __init__(self, limit, action_shape, observation_shape, alpha, beta, beta_increment):
super().__init__(limit, action_shape, observation_shape)
it_capacity = 1
while it_capacity < limit:
it_capacity *= 2
assert alpha >= 0
self.alpha = alpha
self.beta = beta
self.beta_increment = beta_increment
self.max_priority = 1.0
def update_priorities(self, idxes, priorities):
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self.observations0)
self.it_sum[idx] = priority ** self.alpha
self.it_min[idx] = priority ** self.alpha
self.max_priority = max(self.max_priority, priority)
def sample(self, batch_size) -> SampleData:
if self.nb_entries < batch_size:
raise MemoryBufferLessThanBatchSizeError()
# todo This will be changed to prioritised
batch_idxs = np.random.randint(self.nb_entries - 2, size=batch_size)
obs0_batch = self.observations0.get_batch(batch_idxs)
obs1_batch = self.observations1.get_batch(batch_idxs)
action_batch = self.actions.get_batch(batch_idxs)
reward_batch = self.rewards.get_batch(batch_idxs)
terminal1_batch = self.terminals1.get_batch(batch_idxs)
result = {
'obs0': array_min2d(obs0_batch),
'obs1': array_min2d(obs1_batch),
'rewards': array_min2d(reward_batch),
'actions': array_min2d(action_batch),
'terminals1': array_min2d(terminal1_batch),
}
res = TransitionData(obs_shape=self.obs_shape, action_shape=self.action_shape)
for obs0, obs1, action, terminal, re in zip(result['obs0'], result['obs1'], result['actions'],
result['terminals1'], result['rewards']):
res.append(state=obs0, new_state=obs1, action=action, done=terminal, reward=re)
return res
| 6,768 | 35.989071 | 102 | py |
baconian-project | baconian-project-master/baconian/algo/misc/placeholder_input.py | import tensorflow as tf
import typeguard as tg
import os
from baconian.common.logging import ConsoleLogger
from baconian.tf.tf_parameters import ParametersWithTensorflowVariable
from baconian.core.core import Basic
from baconian.config.global_config import GlobalConfig
class PlaceholderInput(object):
@tg.typechecked
def __init__(self, parameters: ParametersWithTensorflowVariable = None,
name_scope=None):
self.parameters = parameters
if name_scope:
self.name_scope = name_scope
def save(self, global_step, save_path=None, name=None, **kwargs):
save_path = save_path if save_path else GlobalConfig().DEFAULT_MODEL_CHECKPOINT_PATH
name = name if name else self.name
sess = kwargs['sess'] if 'sess' in kwargs else None
self.parameters.save(save_path=save_path,
global_step=global_step,
sess=sess,
name=name)
ConsoleLogger().print('info',
'model: {}, global step: {}, saved at {}-{}'.format(name, global_step, save_path,
global_step))
def load(self, path_to_model, model_name, global_step=None, **kwargs):
sess = kwargs['sess'] if 'sess' in kwargs else None
self.parameters.load(path_to_model=path_to_model,
model_name=model_name,
global_step=global_step,
sess=sess)
ConsoleLogger().print('info', 'model: {} loaded from {}'.format(model_name, path_to_model))
def copy_from(self, obj) -> bool:
if not isinstance(obj, type(self)):
raise TypeError('Wrong type of obj %s to be copied, which should be %s' % (type(obj), type(self)))
self.parameters.copy_from(source_parameter=obj.parameters)
return True
class MultiPlaceholderInput(object):
@tg.typechecked
def __init__(self, sub_placeholder_input_list: list,
parameters: ParametersWithTensorflowVariable):
self._placeholder_input_list = sub_placeholder_input_list
for param in self._placeholder_input_list:
assert isinstance(param, dict)
assert 'attr_name' in param
assert 'obj' in param and isinstance(param['obj'], PlaceholderInput) and isinstance(param['obj'], Basic)
self._own_placeholder_input_obj = PlaceholderInput(parameters=parameters)
def save(self, global_step, save_path, name, **kwargs):
sess = kwargs['sess'] if 'sess' in kwargs else None
self._own_placeholder_input_obj.parameters.save(save_path=save_path,
global_step=global_step,
sess=sess,
name=name)
for param in self._placeholder_input_list:
param['obj'].save(save_path=os.path.join(save_path, param['attr_name']),
global_step=global_step,
sess=sess,
name=param['obj'].name)
ConsoleLogger().print('info',
'model: {}, global step: {}, saved at {}-{}'.format(name, global_step, save_path,
global_step))
def load(self, path_to_model, model_name, global_step=None, **kwargs):
sess = kwargs['sess'] if 'sess' in kwargs else None
self._own_placeholder_input_obj.parameters.load(
path_to_model=path_to_model,
model_name=model_name,
global_step=global_step,
sess=sess
)
for param in self._placeholder_input_list:
param['obj'].load(path_to_model=os.path.join(path_to_model, param['attr_name']),
global_step=global_step,
model_name=param['obj'].name,
sess=sess)
ConsoleLogger().print('info', 'model: {} loaded from {}'.format(model_name, path_to_model))
def copy_from(self, obj) -> bool:
if not isinstance(obj, type(self)):
raise TypeError('Wrong type of obj %s to be copied, which should be %s' % (type(obj), type(self)))
self._own_placeholder_input_obj.copy_from(obj._own_placeholder_input_obj)
for self_param, src_param in zip(self._placeholder_input_list, obj._placeholder_input_list):
self_param['obj'].copy_from(src_param['obj'])
ConsoleLogger().print('info', 'model: {} copyed from {}'.format(self, obj))
return True
| 4,758 | 48.061856 | 116 | py |
baconian-project | baconian-project-master/baconian/tf/tensor_utils.py | from collections import Iterable
from collections import namedtuple
import numpy as np
import tensorflow as tf
def compile_function(inputs, outputs, log_name=None):
def run(*input_vals):
sess = tf.get_default_session()
return sess.run(outputs, feed_dict=dict(list(zip(inputs, input_vals))))
return run
def flatten_batch(t, name="flatten_batch"):
return tf.reshape(t, [-1] + list(t.shape[2:]), name=name)
def flatten_batch_dict(d, name=None):
with tf.name_scope(name, "flatten_batch_dict", [d]):
return {k: flatten_batch(v) for k, v in d.items()}
def filter_valids(t, valid, name="filter_valids"):
# 'valid' is either 0 or 1 with dtype of tf.float32
# Must round before cast to prevent floating-error
return tf.dynamic_partition(
t, tf.to_int32(tf.round(valid)), 2, name=name)[1]
def filter_valids_dict(d, valid, name=None):
with tf.name_scope(name, "filter_valids_dict", [d, valid]):
return {k: filter_valids(v, valid) for k, v in d.items()}
def graph_inputs(name, **kwargs):
Singleton = namedtuple(name, kwargs.keys())
return Singleton(**kwargs)
def flatten_inputs(deep):
def flatten(deep):
for d in deep:
if isinstance(d, Iterable) and not isinstance(
d, (str, bytes, tf.Tensor, np.ndarray)):
yield from flatten(d)
else:
yield d
return list(flatten(deep))
def flatten_tensor_variables(ts):
return tf.concat(
axis=0,
values=[tf.reshape(x, [-1]) for x in ts],
name="flatten_tensor_variables")
def unflatten_tensor_variables(flatarr, shapes, symb_arrs):
arrs = []
n = 0
for (shape, symb_arr) in zip(shapes, symb_arrs):
size = np.prod(list(shape))
arr = tf.reshape(flatarr[n:n + size], shape)
arrs.append(arr)
n += size
return arrs
def new_tensor(name, ndim, dtype):
return tf.placeholder(dtype=dtype, shape=[None] * ndim, name=name)
def new_tensor_like(name, arr_like):
return new_tensor(name,
arr_like.get_shape().ndims, arr_like.dtype.base_dtype)
def concat_tensor_list(tensor_list):
return np.concatenate(tensor_list, axis=0)
def concat_tensor_dict_list(tensor_dict_list):
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
if isinstance(example, dict):
v = concat_tensor_dict_list([x[k] for x in tensor_dict_list])
else:
v = concat_tensor_list([x[k] for x in tensor_dict_list])
ret[k] = v
return ret
def stack_tensor_list(tensor_list):
return np.array(tensor_list)
# tensor_shape = np.array(tensor_list[0]).shape
# if tensor_shape is tuple():
# return np.array(tensor_list)
# return np.vstack(tensor_list)
def stack_tensor_dict_list(tensor_dict_list):
"""
Stack a list of dictionaries of {tensors or dictionary of tensors}.
:param tensor_dict_list: a list of dictionaries of {tensors or dictionary
of tensors}.
:return: a dictionary of {stacked tensors or dictionary of stacked tensors}
"""
keys = list(tensor_dict_list[0].keys())
ret = dict()
for k in keys:
example = tensor_dict_list[0][k]
if isinstance(example, dict):
v = stack_tensor_dict_list([x[k] for x in tensor_dict_list])
else:
v = stack_tensor_list([x[k] for x in tensor_dict_list])
ret[k] = v
return ret
def split_tensor_dict_list(tensor_dict):
keys = list(tensor_dict.keys())
ret = None
for k in keys:
vals = tensor_dict[k]
if isinstance(vals, dict):
vals = split_tensor_dict_list(vals)
if ret is None:
ret = [{k: v} for v in vals]
else:
for v, cur_dict in zip(vals, ret):
cur_dict[k] = v
return ret
def to_onehot_sym(inds, dim):
return tf.one_hot(inds, depth=dim, on_value=1, off_value=0)
def pad_tensor(x, max_len):
return np.concatenate([
x,
np.tile(
np.zeros_like(x[0]), (max_len - len(x),) + (1,) * np.ndim(x[0]))
])
def pad_tensor_n(xs, max_len):
ret = np.zeros((len(xs), max_len) + xs[0].shape[1:], dtype=xs[0].dtype)
for idx, x in enumerate(xs):
ret[idx][:len(x)] = x
return ret
def pad_tensor_dict(tensor_dict, max_len):
keys = list(tensor_dict.keys())
ret = dict()
for k in keys:
if isinstance(tensor_dict[k], dict):
ret[k] = pad_tensor_dict(tensor_dict[k], max_len)
else:
ret[k] = pad_tensor(tensor_dict[k], max_len)
return ret
def compute_advantages(discount,
gae_lambda,
max_len,
baselines,
rewards,
name=None):
with tf.name_scope(name, "compute_advantages",
[discount, gae_lambda, max_len, baselines, rewards]):
# Calculate advantages
#
# Advantages are a discounted cumulative sum.
#
# The discount cumulative sum can be represented as an IIR
# filter ob the reversed input vectors, i.e.
# y[t] - discount*y[t+1] = x[t]
# or
# rev(y)[t] - discount*rev(y)[t-1] = rev(x)[t]
#
# Given the time-domain IIR filter step response, we can
# calculate the filter response to our signal by convolving the
# signal with the filter response function. The time-domain IIR
# step response is calculated below as discount_filter:
# discount_filter =
# [1, discount, discount^2, ..., discount^N-1]
# where the epsiode length is N.
#
# We convolve discount_filter with the reversed time-domain
# signal deltas to calculate the reversed advantages:
# rev(advantages) = discount_filter (X) rev(deltas)
#
# TensorFlow's tf.nn.conv1d op is not a true convolution, but
# actually a cross-correlation, so its input and output are
# already implicitly reversed for us.
# advantages = discount_filter (tf.nn.conv1d) deltas
# Prepare convolutional IIR filter to calculate advantages
gamma_lambda = tf.constant(
float(discount) * float(gae_lambda),
dtype=tf.float32,
shape=[max_len, 1, 1])
advantage_filter = tf.cumprod(gamma_lambda, exclusive=True)
# Calculate deltas
pad = tf.zeros_like(baselines[:, :1])
baseline_shift = tf.concat([baselines[:, 1:], pad], 1)
deltas = rewards + discount * baseline_shift - baselines
# Convolve deltas with the discount filter to get advantages
deltas_pad = tf.expand_dims(
tf.concat([deltas, tf.zeros_like(deltas[:, :-1])], axis=1), axis=2)
adv = tf.nn.conv1d(
deltas_pad, advantage_filter, stride=1, padding='VALID')
advantages = tf.reshape(adv, [-1])
return advantages
def discounted_returns(discount, max_len, rewards, name=None):
with tf.name_scope(name, "discounted_returns",
[discount, max_len, rewards]):
gamma = tf.constant(
float(discount), dtype=tf.float32, shape=[max_len, 1, 1])
return_filter = tf.cumprod(gamma, exclusive=True)
rewards_pad = tf.expand_dims(
tf.concat([rewards, tf.zeros_like(rewards[:, :-1])], axis=1),
axis=2)
returns = tf.nn.conv1d(
rewards_pad, return_filter, stride=1, padding='VALID')
return returns
| 7,669 | 31.362869 | 79 | py |
baconian-project | baconian-project-master/baconian/tf/tf_parameters.py | import tensorflow as tf
from baconian.core.parameters import Parameters
from baconian.config.global_config import GlobalConfig
from overrides.overrides import overrides
from typeguard import typechecked
import os
from baconian.common.schedules import Scheduler
import numpy as np
class ParametersWithTensorflowVariable(Parameters):
@typechecked
def __init__(self, tf_var_list: list, rest_parameters: dict, name: str,
max_to_keep=GlobalConfig().DEFAULT_MAX_TF_SAVER_KEEP,
default_save_type='tf',
source_config=None,
to_scheduler_param_tuple: list = None,
save_rest_param_flag=True,
to_ph_parameter_dict: dict = None,
require_snapshot=False):
super(ParametersWithTensorflowVariable, self).__init__(parameters=rest_parameters,
name=name,
to_scheduler_param_tuple=to_scheduler_param_tuple,
source_config=source_config)
self._tf_var_list = tf_var_list
self.snapshot_var = []
self.save_snapshot_op = []
self.load_snapshot_op = []
self.saver = None
self.max_to_keep = max_to_keep
self.require_snapshot = require_snapshot
self.default_checkpoint_type = default_save_type
self.save_rest_param_flag = save_rest_param_flag
if default_save_type != 'tf':
raise NotImplementedError('only support saving tf')
self._registered_tf_ph_dict = dict()
if to_ph_parameter_dict:
for key, val in to_ph_parameter_dict.items():
self.to_tf_ph(key=key, ph=val)
def init(self):
Parameters.init(self)
sess = tf.get_default_session()
sess.run(tf.variables_initializer(var_list=self._tf_var_list))
if self.require_snapshot is True:
if len(self.snapshot_var) == 0:
# add the snapshot op after the init
sess = tf.get_default_session()
with tf.variable_scope('snapshot'):
for var in self._tf_var_list:
snap_var = tf.Variable(initial_value=sess.run(var),
expected_shape=var.get_shape().as_list(),
name=str(var.name).split(':')[0])
self.snapshot_var.append(snap_var)
self.save_snapshot_op.append(tf.assign(snap_var, var))
self.load_snapshot_op.append(tf.assign(var, snap_var))
sess.run(tf.variables_initializer(var_list=self.snapshot_var))
sess.run(self.save_snapshot_op)
self.saver = tf.train.Saver(max_to_keep=self.max_to_keep,
var_list=self._tf_var_list)
def return_tf_parameter_feed_dict(self) -> dict:
res = dict()
for key, val in self._registered_tf_ph_dict.items():
res[val] = self(key, require_true_value=True)
return res
def save_snapshot(self):
sess = tf.get_default_session()
if len(self.save_snapshot_op) == 0:
with tf.variable_scope('snapshot'):
for var in self._tf_var_list:
snap_var = tf.Variable(initial_value=sess.run(var),
expected_shape=var.get_shape().as_list(),
name=var.name)
self.snapshot_var.append(snap_var)
self.save_snapshot_op.append(tf.assign(var, snap_var))
sess.run(self.save_snapshot_op)
def load_snapshot(self):
sess = tf.get_default_session()
if len(self.load_snapshot_op) == 0:
with tf.variable_scope('snapshot'):
for var in self._tf_var_list:
snap_var = tf.Variable(initial_value=sess.run(var),
expected_shape=var.get_shape().as_list(),
name=var.name)
self.snapshot_var.append(snap_var)
self.load_snapshot_op.append(tf.assign(snap_var, var))
sess.run(self.load_snapshot_op)
def save(self, save_path, global_step, sess=None, name=None, *args, **kwargs):
if self.default_checkpoint_type == 'tf':
self._save_to_tf(save_path=save_path,
global_step=global_step,
sess=sess,
name=name)
elif self.default_checkpoint_type == 'h5py':
raise NotImplementedError
if self.save_rest_param_flag is False:
to_save_dict = dict(_source_config=self._source_config.config_dict)
else:
to_save_dict = dict(_parameters=self._parameters, _source_config=self._source_config.config_dict)
Parameters.save(self,
save_path=save_path,
global_step=global_step,
default_save_param=to_save_dict,
name=name)
def load(self, path_to_model, global_step=None, sess=None, model_name=None, *args, **kwargs):
if not model_name:
model_name = self.name
if self.default_checkpoint_type == 'tf':
self._load_from_tf(path_to_model=path_to_model,
global_step=global_step,
sess=sess, model_name=model_name)
elif self.default_checkpoint_type == 'h5py':
self._load_from_h5py(*args, **kwargs)
Parameters.load(self,
load_path=path_to_model,
global_step=global_step,
name=model_name)
def _save_to_tf(self, save_path, global_step, sess=None, name=None):
name = name if name else self.name
sess = sess if sess else tf.get_default_session()
if not os.path.exists(save_path):
os.makedirs(save_path)
save_path = os.path.join(save_path, name)
self.saver.save(sess=sess,
save_path=save_path,
global_step=global_step)
def _load_from_tf(self, path_to_model, model_name, global_step=None, sess=None):
sess = sess if sess else tf.get_default_session()
if not global_step:
loaded_path = tf.train.latest_checkpoint(path_to_model)
else:
loaded_path = os.path.join(os.path.join(path_to_model, '{}-{}'.format(model_name, global_step)))
self.saver.restore(sess=sess,
save_path=loaded_path)
def _save_to_h5py(self, var_list, sess):
raise NotImplementedError
def _load_from_h5py(self, path_to_h5py):
raise NotImplementedError
def __call__(self, key=None, require_true_value=False):
if key in self._registered_tf_ph_dict.keys():
if require_true_value is True:
return super().__call__(key)
else:
return self._registered_tf_ph_dict[key]
else:
if key == 'tf_var_list':
return self._tf_var_list
else:
return super().__call__(key)
def set(self, key, new_val):
if not isinstance(new_val, type(self(key, require_true_value=True))):
raise TypeError('new value of parameters {} should be type {} instead of {}'.format(key, type(self(key)),
type(new_val)))
else:
if key == 'tf_var_list':
self.set_tf_var_list(new_val)
elif key in self._parameters:
self._parameters[key] = new_val
else:
self._source_config.set(key, new_val)
def set_tf_var_list(self, tf_var_list: list):
temp_var_list = list(set(tf_var_list))
if len(temp_var_list) < len(tf_var_list):
raise ValueError('Redundant tf variable in tf_var_list')
for var in tf_var_list:
assert isinstance(var, (tf.Tensor, tf.Variable))
self._tf_var_list += tf_var_list
def to_tf_ph(self, key, ph: tf.Tensor):
# call the parameters first to make sure it have an init value
self(key)
self._registered_tf_ph_dict[key] = ph
def copy_from(self, source_parameter, deep_copy=None):
if not isinstance(source_parameter, type(self)):
raise TypeError()
super(ParametersWithTensorflowVariable, self).copy_from(source_parameter)
tmp_op_list = []
for t_para, s_para in zip(self._tf_var_list, source_parameter._tf_var_list):
tmp_op_list.append(tf.assign(t_para, s_para))
sess = tf.get_default_session()
sess.run(tmp_op_list)
del tmp_op_list
def _update_dict(self, source_dict: dict, target_dict: dict):
for key, val in source_dict.items():
if isinstance(val, tf.Tensor):
continue
target_dict[key] = val
@typechecked
def set_scheduler(self, param_key: str, scheduler: Scheduler, to_tf_ph_flag=True):
ori_val = self(param_key)
if to_tf_ph_flag is True:
self.to_tf_ph(key=param_key,
ph=tf.placeholder(shape=tuple(np.array(ori_val).shape),
dtype=tf.dtypes.as_dtype(np.array(ori_val).dtype)))
scheduler.initial_p = ori_val
self._scheduler_info_dict[param_key] = dict(param_key=param_key, scheduler=scheduler)
| 9,734 | 44.27907 | 117 | py |
baconian-project | baconian-project-master/baconian/tf/mlp.py | from typeguard import typechecked
from baconian.tf.util import MLPCreator
import tensorflow as tf
import numpy as np
from baconian.tf.tf_parameters import ParametersWithTensorflowVariable
class MLP(object):
@typechecked
def __init__(self,
input_ph: tf.Tensor,
name_scope: str,
net_name: str,
reuse,
mlp_config: list,
input_norm: np.ndarray = None,
output_norm: np.ndarray = None,
output_low: np.ndarray = None,
output_high: np.ndarray = None,
):
self.input_ph = input_ph
self.name_scope = name_scope
self.mlp_config = mlp_config
self.mlp_net_name = net_name
self.net, self.output, self.var_list = MLPCreator.create_network_with_tf_layers(input=input_ph,
reuse=reuse,
network_config=mlp_config,
tf_var_scope=name_scope,
net_name=net_name,
input_norm=input_norm,
output_high=output_high,
output_low=output_low,
output_norm=output_norm)
for var in self.var_list:
assert name_scope in var.name
self._parameters = ParametersWithTensorflowVariable(tf_var_list=self.var_list,
name='parameters_{}'.format(self.mlp_net_name),
rest_parameters=dict())
def forward(self, input: np.ndarray, sess=tf.get_default_session()) -> np.ndarray:
feed_dict = {
self.input_ph: input,
**self._parameters.return_tf_parameter_feed_dict()
}
res = sess.run(self.output,
feed_dict=feed_dict)
return np.squeeze(res)
def copy_from(self, obj) -> bool:
if not isinstance(obj, type(self)):
raise TypeError('Wrong type of obj %s to be copied, which should be %s' % (type(obj), type(self)))
self._parameters.copy_from(source_parameter=obj._parameters)
return True
def init(self, source_obj=None):
self._parameters.init()
if source_obj:
self.copy_from(obj=source_obj)
| 2,861 | 46.7 | 114 | py |
baconian-project | baconian-project-master/baconian/tf/util.py | import os
import tensorflow as tf
from tensorflow.contrib.layers import variance_scaling_initializer as contrib_W_init
from typeguard import typechecked
import collections
import multiprocessing
import tensorflow.contrib as tf_contrib
from baconian.common.error import *
__all__ = ['get_tf_collection_var_list', 'MLPCreator']
def get_tf_collection_var_list(scope, key=tf.GraphKeys.GLOBAL_VARIABLES):
var_list = tf.get_collection(key, scope=scope)
return sorted(list(set(var_list)), key=lambda x: x.name)
# def create_new_tf_session(cuda_device: int):
# os.environ["CUDA_VISIBLE_DEVICES"] = str(cuda_device)
# tf_config = tf.ConfigProto()
# tf_config.gpu_options.allow_growth = True
# sess = tf.Session(config=tf_config)
# sess.__enter__()
# assert tf.get_default_session()
# return sess
def clip_grad(optimizer, loss, clip_norm: float, var_list):
grad_var_pair = optimizer.compute_gradients(loss=loss, var_list=var_list)
if clip_norm <= 0.0:
raise InappropriateParameterSetting('clip_norm should be larger than 0.0')
grad_var_pair = [(tf.clip_by_norm(grad, clip_norm=clip_norm), var) for
grad, var in grad_var_pair]
grad = [g[0] for g in grad_var_pair]
return grad_var_pair, grad
def create_new_tf_session(**kwargs):
"""Get default session or create one with a given config"""
sess = tf.get_default_session()
if sess is None:
sess = make_session(**kwargs)
sess.__enter__()
assert tf.get_default_session()
return sess
def make_session(config=None, num_cpu=None, make_default=False, graph=None):
"""Returns a session that will use <num_cpu> CPU's only"""
# os.environ["CUDA_VISIBLE_DEVICES"] = str(cuda_device)
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
if config is None:
config = tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=config, graph=graph)
else:
return tf.Session(config=config, graph=graph)
# class TensorInput(object):
# def __init__(self, **kwargs):
# for key, val in kwargs:
# setattr(self, key, val)
class MLPCreator(object):
act_dict = {
'LINEAR': tf.identity,
'RELU': tf.nn.relu,
'LEAKY_RELU': tf.nn.leaky_relu,
'SIGMOID': tf.nn.sigmoid,
'SOFTMAX': tf.nn.softmax,
'IDENTITY': tf.identity,
'TANH': tf.nn.tanh,
'ELU': tf.nn.elu
}
@staticmethod
def create_network_with_tf_layers(input: tf.Tensor, network_config: list, tf_var_scope: str, net_name='',
input_norm=None,
output_norm=None,
reuse=False,
output_low=None, output_high=None):
"""
Create a MLP network with a input tensor
warning: this will create a input net which will cut the gradients from the input tensor and its
previous op
:param input:
:param network_config:
:param net_name:
:param tf_var_scope:
:param input_norm:
:param output_norm:
:param output_low:
:param output_high:
:return:
"""
pre_var_scope_name = tf.get_variable_scope().name
tf_var_scope_context = tf.variable_scope(tf_var_scope)
tf_var_scope_context.__enter__()
if pre_var_scope_name != '':
assert tf.get_variable_scope().name == "{}/{}".format(pre_var_scope_name, tf_var_scope)
else:
assert tf.get_variable_scope().name == "{}".format(tf_var_scope)
if reuse:
tf.get_variable_scope().reuse_variables()
net = input
if input_norm:
net = (net - input_norm[0]) / input_norm[1]
last_layer_act = None
for layer_config in network_config:
if layer_config['TYPE'] == 'DENSE':
if layer_config['B_INIT_VALUE'] is None:
b_init = None
else:
b_init = tf.constant_initializer(value=layer_config['B_INIT_VALUE'])
l1_norm = layer_config['L1_NORM'] if 'L1_NORM' in layer_config else 0.0
l2_norm = layer_config['L2_NORM'] if 'L2_NORM' in layer_config else 0.0
net = tf.layers.dense(inputs=net,
units=layer_config['N_UNITS'],
activation=MLPCreator.act_dict[layer_config['ACT']],
use_bias=b_init is not None,
kernel_initializer=contrib_W_init(),
kernel_regularizer=tf_contrib.layers.l1_l2_regularizer(l1_norm, l2_norm),
bias_regularizer=tf_contrib.layers.l1_l2_regularizer(l1_norm, l2_norm),
bias_initializer=b_init,
name=net_name + '_' + layer_config['NAME'],
reuse=reuse
)
last_layer_act = layer_config['ACT']
if output_norm:
net = (net * output_norm[0]) + output_norm[1]
if output_high is not None and output_low is not None:
if last_layer_act not in ("IDENTITY", 'LINEAR'):
raise ValueError(
'Please set the last layer activation as IDENTITY/LINEAR to use output scale, TANH will added to it as default')
net = tf.tanh(net)
net = (net + 1.0) / 2.0 * (output_high - output_low) + output_low
# todo the collection may contain extra variable that is instanced by others but have same name scope
net_all_params = get_tf_collection_var_list(key=tf.GraphKeys.GLOBAL_VARIABLES,
scope=tf.get_variable_scope().name)
if tf_var_scope_context is not None:
tf_var_scope_context.__exit__(type_arg=None, value_arg=None, traceback_arg=None)
assert tf.get_variable_scope().name == pre_var_scope_name
return net, net, net_all_params
| 6,416 | 40.941176 | 132 | py |
baconian-project | baconian-project-master/baconian/tf/__init__.py | 0 | 0 | 0 | py |
|
baconian-project | baconian-project-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('.'))
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser
}
# -- Project information -----------------------------------------------------
project = 'Baconian'
copyright = '2019, Linsen Dong'
author = 'Linsen Dong'
from baconian.version import __version__
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# googleanalytics_id = 'UA-138017644-1'
# googleanalytics_enabled = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst']
# source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'baconian'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'baconian.tex', 'baconian Documentation',
'Linsen Dong', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'baconian', 'A unified and light-weight model-based reinforcment learning toolbox',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Baconian', 'Baconian Documentation',
author, 'Baconian', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
def skip(app, what, name, obj, would_skip, options):
if name == "__init__":
return False
return would_skip
def setup(app):
app.connect("autodoc-skip-member", skip)
autodoc_default_options = {
'special-members': '__init__',
'undoc-members': False,
'exclude-members': '__weakref__'
}
autodoc_inherit_docstrings = False
autodoc_mock_imports = ['mujoco_env', 'gym.envs.mujoco', 'pybullet', 'tensorflow',
'tensorflow-probability', 'scipy', 'opencv-python', 'scikit-learn', 'pandas']
| 6,083 | 28.970443 | 101 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/run_slot_filling_evaluation.py | # coding:utf-8
import argparse
import json
from source.Evaluate.slot_filling import prepare_data_to_dukehan, prepare_data_to_conll_format
from source.Evaluate.slot_filling_data_processor import cook_slot_filling_data
from set_config import refresh_config_file
import copy
import subprocess
# ============ Args Process ==========
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--task", type=str, default='weather_labeled', help="choose task: atis_labeled, navigate_labeled, schedule_labeled, weather_labeled, navigate, schedule, weather")
parser.add_argument("-gd", "--gen_data", type=str, default='none', help="generate slot_filling data to Xiaoming's model or DukeHan's: xiaoming, dukehan")
parser.add_argument("-cd", "--cook_data", action='store_true', help="cook data for Xiaoming's model")
parser.add_argument("-trn", "--train", action='store_true', help="train Xiaoming's slot filling model")
parser.add_argument("-rfo", "--refill_only", action='store_true', help="generate data for refill only data")
parser.add_argument("--topx", type=int, default=5, help="select using topx of generated sentences")
parser.add_argument("-s", "--seed", type=int, default=100, help="select random seed")
# parser.add_argument("-bs", "--batch_size", type=int, default=16, help="select batch size")
parser.add_argument("-sr", "--split_rate", type=float, default=0.1, help="select different train set size")
parser.add_argument("-cm", "--cluster_method", type=str, default='_intent', help="choose cluster method: '_intent', '_slot', '_intent_slot'")
parser.add_argument("-et", "--extend", action='store_true', help="choose whether use expand train data")
# Deep Customize
parser.add_argument('--config', default='./config.json', help="specific a config file by path")
args = parser.parse_args()
# ============ Refresh Config ==========
refresh_config_file(args.config)
# ============ Settings ==========
with open(args.config, 'r') as con_f:
CONFIG = json.load(con_f)
def dress_param_with_config(param_config, replace_table):
# replace "data_mark, data_dir, result_dir" slots in param
ret = copy.deepcopy(param_config)
for key in ret:
param_str = " ".join(ret[key])
for slot_name in replace_table:
param_str = param_str.replace(slot_name, replace_table[slot_name])
param_lst = param_str.split()
ret[key] = param_lst
return ret
def run_slot_filling(task_name, param):
print('========================== Call BI-LSTM for: %s ==========================' % task_name)
print('========================== Param ==========================\n%s' % ' '.join(param))
print('========================== BI-LSTM Output ========================== \n')
proc = subprocess.Popen(param, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
with open('log/' + 'slot-filling' + task_name + 'log', 'w') as writer:
for line in proc.stdout.readlines():
print(line)
writer.write(line.rstrip().decode("utf-8") + '\n')
if b'error' in line.lower() and b'check_error' not in line.lower():
raise RuntimeError
if __name__ == "__main__":
print('debug:', args.task)
if args.gen_data == 'dukehan':
for split_rate in CONFIG['experiment']['train_set_split_rate']:
prepare_data_to_dukehan(CONFIG, args.task, split_rate, use_topx=10)
elif args.gen_data == 'xiaoming':
for split_rate in CONFIG['experiment']['train_set_split_rate']:
# for cluster_method in CONFIG['experiment']['cluster_method']:
# for split_rate in [0.005, 0.01, 0.02, 0.03, 0.04, 0.05, 0.08, 0.1, 0.2, 0.5, 1]:
# for cluster_method in ['_intent', '_slot']:
for cluster_method in ['_intent-slot']:
prepare_data_to_conll_format(CONFIG, args.task, split_rate, cluster_method, use_topx=args.topx, refilled_only=args.refill_only)
else:
print("Wrong args!")
if args.cook_data:
print("")
if args.train:
print("")
| 4,041 | 47.119048 | 188 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/run_clustering.py | # coding:utf-8
from source.Cluster import clustering
from source.Cluster import conll_format_clustering
# from source.Cluster.clustering import slot_clustering_and_dump_dict
import argparse
import json
from set_config import refresh_config_file
# ============ Args Process ==========
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--data", type=str, default='stanford_labeled', help="choose target dataset: stanford, stanford_labeled, atis")
parser.add_argument("-cm", "--cluster_mode", type=str, default='all', help="select cluster mode: slot, intent, slot-intent, all, no_clustering")
parser.add_argument('--config', default='./config.json', help="specific a config file by path")
args = parser.parse_args()
# ============ Refresh Config ==========
refresh_config_file(args.config)
# ============ Settings ==========
with open(args.config, 'r') as con_f:
CONFIG = json.load(con_f)
def run_clustering():
if args.data == "stanford":
clustering.slot_clustering_and_dump_dict(config=CONFIG, train_set_split_rate_lst=CONFIG['experiment']['train_set_split_rate'])
elif args.data == "stanford_labeled":
conll_format_clustering.clustering_and_dump_dict(
data_dir=CONFIG['path']['RawData']['stanford_labeled'],
config=CONFIG,
cluster_mode=args.cluster_mode,
train_set_split_rate_lst=CONFIG['experiment']['train_set_split_rate'])
elif args.data == 'atis':
conll_format_clustering.clustering_and_dump_dict(
data_dir=CONFIG['path']['RawData']['atis'],
config=CONFIG,
cluster_mode=args.cluster_mode,
train_set_split_rate_lst=CONFIG['experiment']['train_set_split_rate'])
else:
print("Error: Wrong dataset args.")
if __name__ == "__main__":
run_clustering()
| 1,821 | 38.608696 | 144 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/run_onmt_generation.py | # coding:utf-8
import json
import os
import copy
import subprocess
import argparse
from source.AuxiliaryTools.nlp_tool import low_case_tokenizer, sentence_edit_distance
from source.ReFilling.re_filling import re_filling
import math
from collections import Counter
import random
from itertools import combinations
from set_config import refresh_config_file
# ============ Description ==========
# append source id in the end, and use tree bank tokenize
# ============ Args Process ==========
# General function
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--task", type=str, default='atis_labeled', help="choose task: atis_labeled, atis_baseline, navigate_labeled, schedule_labeled, weather_labeled, navigate, schedule, weather")
parser.add_argument("-gd", "--gen_data", help="generate data from cluster result", action="store_true")
parser.add_argument("-cd", "--cook_data", help="cook data for ONMT", action="store_true")
parser.add_argument("-trn", "--train", help="run train part", action="store_true")
parser.add_argument("-tst", "--test", help="run test part", action="store_true")
parser.add_argument("-rf", "--refill", help="run surface realization", action="store_true")
parser.add_argument("-f", "--full", help="run all part", action="store_true")
# Deep Customize
parser.add_argument("-pm", "--pair_mode", type=str, default='diverse_connect', help='choose mode: "full_connect", "circle", "diverse_connect","random"')
parser.add_argument("-fr", "--filter_rate", type=float, default=0.5, help='choose filtering rate in "diverse_connect" pairing, set 1 to keep all')
parser.add_argument("-ni", "--no_index", action='store_true', help='use if do not want to use index embedding ')
parser.add_argument("-nc", "--no_clustering", action='store_true', help='use if do not want to use clustered data')
# parser.add_argument("-mm", "--model_mark", type=str, default=default_model, help='select model by mark here, acc_XXXX_ppl_XXXX')
# parser.add_argument("-mm", "--model_mark", type=str, default='acc_81.25_ppl_1.76_e13', help='select model by mark here, acc_XXXX_ppl_XXXX')
parser.add_argument("-gpu", "--gpu_id", type=int, default=0, help='input gpu id for current task, -1 to not use gpu')
parser.add_argument("-et", "--expand_target", type=str, default='train', help='select target set for expanding: "test", "train"')
parser.add_argument("-svt", "--slot_value_table", type=str, default='train', help='select use which slot value table: "fill", "train"')
parser.add_argument('--config', default='./config.json', help="specific a config file by path")
args = parser.parse_args()
# ============ Refresh Config ==========
refresh_config_file(args.config)
# ============ Settings ==========
TASK_NAME = args.task
GEN_DATA = args.gen_data # or args.full
COOK_DATA = args.cook_data or args.full
RUN_TRAIN = args.train or args.full
RUN_TEST = args.test or args.full
RUN_REFILL = args.refill or args.full
with open(args.config, 'r') as con_f:
CONFIG = json.load(con_f)
def remove_dup_pair(all_pairs):
# =========== wait to move into source for better structure ============
# there are still pair duplicate
non_dup = {}
dup_num = 0
for p in all_pairs:
key = p[0] + '|||' + p[1]
if key in non_dup:
dup_num += 1
else:
non_dup[key] = p
return non_dup.values(), dup_num
def diverse_score(s, t):
"""
calculate pairing score
:param s: target str
:param t: candidate str
:return: score, edit distance, length penalty
"""
lst_s = s.split()
lst_t = t.split()
length_penalty = math.exp(-abs((len(lst_s) - len(lst_t))/len(lst_s)))
# length_penalty = math.exp(-abs((len(lst_s) - len(lst_t))/max(len(lst_s), len(lst_t))))
e_d = sentence_edit_distance(lst_t, lst_s)
# print(e_d * length_penalty, e_d, length_penalty, '\n', s, '\n', t)
return e_d * length_penalty
def get_pairs_within_cluster(all_user_temp, mode="full_connect", cov_p=0.5, append_index=True):
"""
pair utterance within a cluster
:param all_user_temp: a list of utterance of same cluster
:param mode: different mechanism for pairing: "full connect", "circle", "diverse_connect"
:param cov_p: a float as percentage, used in diverse_connect, determine number of connections
:return: paired utterance, pack in list: [[u1, p2], [u3, u4]]
"""
ret = []
if mode == "full_connect":
for comb in combinations(all_user_temp, 2):
ret.append(comb) # use combination to avoid self to self pairs
ret.append(comb[::-1]) # to get reverse of it
elif mode == "circle":
# pair each utterance with next one
for i in range(-1, len(all_user_temp) - 1):
ret.append([all_user_temp[i], all_user_temp[i + 1]])
elif mode == 'diverse_connect':
# pair each utterance with the most different x% utterance
top_x = int(len(all_user_temp) * cov_p)
expand_count = 0
for temp in all_user_temp:
top_diversity_set = sorted(all_user_temp, key=lambda x: diverse_score(temp, x), reverse=True)
top_diversity_set = top_diversity_set[:min(top_x + 1, len(all_user_temp))]
for ind, cand in enumerate(top_diversity_set):
append_word = ' <%d>' % ind if append_index else ''
ret.append([temp + append_word, cand])
expand_count += len(top_diversity_set)
# print('--- debug:', len(all_user_temp), expand_count)
elif mode == 'random':
random_target_size = 10
for u in all_user_temp:
for i in range(random_target_size):
random_target = random.choice(all_user_temp)
ret.append([u, random_target])
return ret
def generate_data(task_name, pair_mode='diverse_connect', append_index=True, no_clustering=False, filter_rate=0.5):
# =========== this function will move into source.prepare_data for better structure ============
onmt_data_path = CONFIG['path']['OnmtData'] + 'SourceData/'
raw_data_path = CONFIG['path']['ClusteringResult']
if no_clustering:
# all_file = list(filter(lambda x: '.json' in x and '_nc' in x, os.listdir(raw_data_path)))
all_file = list(filter(lambda x: '.json' in x and '_nc' in x and 'atis' in x, os.listdir(raw_data_path)))
else:
# all_file = list(filter(lambda x: '.json' in x and '_nc' not in x, os.listdir(raw_data_path)))
all_file = list(filter(lambda x: '.json' in x and '_nc' not in x and 'atis' in x, os.listdir(raw_data_path)))
if not os.path.isdir(onmt_data_path):
os.makedirs(onmt_data_path)
pair_mode_str = '' if pair_mode == 'diverse_connect' else '_' + pair_mode
no_index_str = '' if append_index else '_ni'
no_filtering_str = '' if filter_rate < 1 else '_nf'
for f in all_file:
f_mark = f.replace(".json", '')
print('=== Start to gen-data for: %s === ' % f_mark)
tmp_pair_mode = pair_mode
# tmp_pair_mode = 'circle' if 'test' in f_mark else pair_mode # no need to have multi-source in source
with open(raw_data_path + f, 'r') as reader, \
open(onmt_data_path + f_mark + pair_mode_str + no_index_str + no_filtering_str + '_tgt.txt', 'w') as tgt_writer, \
open(onmt_data_path + f_mark + pair_mode_str + no_index_str + no_filtering_str + '_src.txt', 'w') as src_writer:
json_data = json.load(reader)
all_pairs = []
dup_num, null_num, bad_num = 0, 0, 0
bad_cluster_num = 0
for cluster_item in json_data.values():
all_user_temp = []
# ======== remove dup user-templates in same cluster ========
raw_all_user_temp = [item["user_temp"] for item in cluster_item]
non_dup_all_user_temp = set(raw_all_user_temp)
# ======== remove clusters with no pairs =======
if len(non_dup_all_user_temp) < 2:
bad_cluster_num += 1
continue
dup_num += (len(raw_all_user_temp) - len(non_dup_all_user_temp))
print("Cluster size change: remove duplicate",
len(raw_all_user_temp), len(non_dup_all_user_temp))
# ======= filter temps by simple rule ======
for user_temp in non_dup_all_user_temp:
# Commented, as in fact there is no reason to do such filtering
# # remove "thanks" included meaningless utterance and bad case
# if (len(user_temp.split()) <= 7 and ('thank' in user_temp or 'thanks' in user_temp)) or \
# ('no response needed' in user_temp):
# bad_num += 1
# continue
# ====== fix null utterance bug =====
if user_temp.strip() == '':
null_num += 1
continue
all_user_temp.append(user_temp)
# ========= pair utterance in current cluster, store result into all pairs ========
all_pairs.extend(get_pairs_within_cluster(all_user_temp, mode=tmp_pair_mode, cov_p=filter_rate, append_index=append_index))
# ======== remove duplicated pairs to avoid unbalanced data ========
filtered_all_pairs, pair_dup_num = remove_dup_pair(all_pairs)
print('%d dup tmp , %d null pairs, %d bad pairs, %d pair dup, %d bad cluster' % (
dup_num, null_num, bad_num, pair_dup_num, bad_cluster_num))
for ind, p in enumerate(filtered_all_pairs):
tgt_writer.write(' '.join(low_case_tokenizer(p[1])) + '\n')
src_writer.write(' '.join(low_case_tokenizer(p[0])) + '\n')
if ind % 10000 == 0:
print(ind, 'pairs processed')
def refill_template(task, target_file, split_rate, slot_value_table):
re_filling(CONFIG, task=task, target_file_name=target_file, split_rate=split_rate, slot_value_table=slot_value_table)
def dress_param_with_config(param_config, replace_table):
# replace "data_mark, data_dir, result_dir" slots in param
ret = copy.deepcopy(param_config)
for key in ret:
param_str = " ".join(ret[key])
for slot_name in replace_table:
param_str = param_str.replace(slot_name, replace_table[slot_name])
param_lst = param_str.split()
ret[key] = param_lst
return ret
def call_onmt(task_name, param):
print('========================== Call Onmt for: %s ==========================' % task_name)
print('========================== Param ==========================\n%s' % ' '.join(param))
print('========================== Open-NMT Output ========================== \n')
proc = subprocess.Popen(param, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
with open('log/' + task_name + 'log', 'w') as writer:
for line in proc.stdout.readlines():
print(line)
writer.write(line.rstrip().decode("utf-8") + '\n')
if b'error' in line.lower() and b'check_error' not in line.lower():
raise RuntimeError
def main():
if GEN_DATA:
generate_data(
task_name=TASK_NAME,
pair_mode=args.pair_mode,
append_index=(not args.no_index),
no_clustering=args.no_clustering,
filter_rate=args.filter_rate
)
# generate_full_pair_data()
no_index_str = '_ni' if args.no_index else ''
pair_mod_str = '' if args.pair_mode == 'diverse_connect' else '_' + args.pair_mode
no_filtering_str = '' if args.filter_rate < 1 else '_nf'
all_cluster_method = ['_nc'] if args.no_clustering else ['_leak-gan']
# all_cluster_method = ['_nc'] if args.no_clustering else ['_intent-slot']
# all_cluster_method = ['_nc'] if args.no_clustering else CONFIG['experiment']['cluster_method']
# for split_rate in CONFIG['experiment']['train_set_split_rate']:
for split_rate in [4478]:
for cluster_method in all_cluster_method:
# Customize these parameters for OpenNMT tool
param_replace_table = {
'<DATA_MARK>': TASK_NAME,
'<DATA_DIR>': CONFIG['path']['OnmtData'] + 'SourceData',
'<RESULT_DIR>': CONFIG['path']['OnmtData'] + 'Result',
# '<MODEL_MARK>': args.model_mark,
'<PAIR_MOD>': pair_mod_str,
'<NO_INDEX>': no_index_str,
'<NO_FILTERING>': no_filtering_str,
'<GPU>': '' if args.gpu_id < 0 else ('-gpu %d' % args.gpu_id),
'<EXPAND_TGT>': args.expand_target,
'<SPLIT_RATE>': str(split_rate),
'<CLUSTER_METHOD>': cluster_method,
}
print('Debug', param_replace_table)
param_config = dress_param_with_config(CONFIG['onmt'], param_replace_table)
if COOK_DATA:
# to get word embedding and dict
call_onmt('prepare_data' + TASK_NAME + cluster_method + str(split_rate) + pair_mod_str + no_index_str + no_filtering_str, param_config['prepare_data'])
if RUN_TRAIN:
call_onmt('train' + TASK_NAME + cluster_method + str(split_rate) + pair_mod_str + no_index_str + no_filtering_str, param_config['train'])
if RUN_TEST:
call_onmt('test' + TASK_NAME + cluster_method + str(split_rate) + pair_mod_str + no_index_str + no_filtering_str, param_config['test'])
if RUN_REFILL:
refill_template(TASK_NAME, TASK_NAME + cluster_method + str(split_rate) + pair_mod_str + no_index_str + no_filtering_str + '_pred.txt', split_rate, args.slot_value_table)
if __name__ == "__main__":
main()
print('Notice! task option NOT affect gd, but do affect: cd, trn, tst.')
# print("!!!!!!!!!!!!!!!!!! run in debug mode !!!!!!!!!!!!!!")
print('Warn! Turn off NOISE_TRANLATE in onmt\'s translator.py')
| 14,067 | 48.886525 | 200 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/run_thesaurus.py | # coding: utf-8
import json
import argparse
from source.AuxiliaryTools.nlp_tool import low_case_tokenizer, sentence_edit_distance
from source.ReFilling.re_filling import re_filling
from set_config import refresh_config_file
# ============ Description ==========
# refill source file to test refill only
# ============ Args Process ==========
# General function
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--task", type=str, default='navigate_labeled', help="choose task: atis_labeled, navigate_labeled, schedule_labeled, weather_labeled, navigate, schedule, weather")
parser.add_argument("-rf", "--refill", help="run surface realization", action="store_true")
parser.add_argument("-f", "--full", help="run all part", action="store_true")
parser.add_argument("-svt", "--slot_value_table", type=str, default='train', help='select use which slot value table: "fill", "train"')
# Deep Customize
parser.add_argument('--config', default='./config.json', help="specific a config file by path")
args = parser.parse_args()
# ============ Refresh Config ==========
refresh_config_file(args.config)
# ============ Settings ==========
TASK_NAME = args.task
RUN_REFILL = args.refill or args.full
with open(args.config, 'r') as con_f:
CONFIG = json.load(con_f)
def refill_source_template(task, target_file, slot_value_table, split_rate):
re_filling(CONFIG, task=task, target_file_name=target_file, split_rate=split_rate, slot_value_table=slot_value_table, refill_only=True)
if __name__ == '__main__':
# for split_rate in [0.005]:
# for cluster_method in ['_intent-slot']:
for split_rate in CONFIG['experiment']['train_set_split_rate']:
for cluster_method in ['_intent-slot']:
# for cluster_method in CONFIG['experiment']['cluster_method']:
if RUN_REFILL:
print('Start to refill for', 'train_' + TASK_NAME + cluster_method + str(split_rate) + '_src.txt')
refill_source_template(TASK_NAME, 'train_' + TASK_NAME + cluster_method + str(split_rate) + '_src.txt', args.slot_value_table, split_rate)
| 2,090 | 44.456522 | 189 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/run_aug_baseline_slot_filling_for.py | # coding:utf-8
import argparse
import json
from source.Evaluate.slot_filling import prepare_data_to_dukehan, prepare_data_to_conll_format
from source.Evaluate.slot_filling_data_processor import cook_slot_filling_data
from set_config import refresh_config_file
import copy
import subprocess
# ============ Args Process ==========
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--task", type=str, default='atis_labeled', help="choose task: atis_labeled, navigate_labeled, schedule_labeled, weather_labeled, navigate, schedule, weather")
parser.add_argument("-gd", "--gen_data", type=str, default='xiaoming', help="generate slot_filling data to Xiaoming's model or DukeHan's: xiaoming, dukehan")
parser.add_argument("-cd", "--cook_data", action='store_true', help="cook data for Xiaoming's model")
parser.add_argument("-trn", "--train", action='store_true', help="train Xiaoming's slot filling model")
parser.add_argument("-rfo", "--refill_only", action='store_true', help="generate data for refill only data")
parser.add_argument("--topx", type=int, default=10, help="select using topx of generated sentences")
parser.add_argument("-s", "--seed", type=int, default=100, help="select random seed")
# parser.add_argument("-bs", "--batch_size", type=int, default=16, help="select batch size")
parser.add_argument("-sr", "--split_rate", type=float, default=0.1, help="select different train set size")
parser.add_argument("-cm", "--cluster_method", type=str, default='_intent', help="choose cluster method: '_intent', '_slot', '_intent_slot'")
parser.add_argument("-et", "--extend", action='store_true', help="choose whether use expand train data")
# Deep Customize
parser.add_argument('--config', default='./config.json', help="specific a config file by path")
args = parser.parse_args()
# ============ Refresh Config ==========
refresh_config_file(args.config)
# ============ Settings ==========
with open(args.config, 'r') as con_f:
CONFIG = json.load(con_f)
if __name__ == "__main__":
print('debug:', args.task)
if args.gen_data == 'xiaoming':
for split_rate in [4478]:
# for split_rate in CONFIG['experiment']['train_set_split_rate']:
prepare_data_to_conll_format(CONFIG, args.task, split_rate, '_leak-gan', use_topx=args.topx, refilled_only=args.refill_only, pair_mod='', no_index='', no_filter_str='')
# prepare_data_to_conll_format(CONFIG, args.task, split_rate, '_intent-slot', use_topx=args.topx, refilled_only=args.refill_only, pair_mod='', no_index='', no_filter_str='_nf')
# prepare_data_to_conll_format(CONFIG, args.task, split_rate, '_intent-slot', use_topx=args.topx, refilled_only=args.refill_only, pair_mod='_full_connect', no_index='_ni')
# prepare_data_to_conll_format(CONFIG, args.task, split_rate, '_intent-slot', use_topx=args.topx, refilled_only=args.refill_only, pair_mod='', no_index='_ni')
# prepare_data_to_conll_format(CONFIG, args.task, split_rate, '_nc', use_topx=args.topx, refilled_only=args.refill_only, pair_mod='_circle', no_index='_ni')
else:
print('Warning: not generating any data')
# if args.cook_data:
# cook_slot_filling_data(config=CONFIG, task_name=args.task, refill_only=args.refill_only)
#
# if args.train:
#
# # for split_rate in [0.08, 0.1, 0.2, 0.5, 1]:
# # for cluster_method in CONFIG['experiment']['cluster_method']:
# # for cluster_method in ['_intent', '_slot']:
# for cluster_method in ['_intent-slot']:
# param_replace_table = {
# '<TASK_NAME>': args.task,
# '<EXTEND>': '-et' if args.extend else '',
# '<REFILL_ONLY>': '-rfo' if args.refill_only else '',
# '<SPLIT_RATE>': str(split_rate),
# '<CLUSTER_METHOD>': cluster_method,
# '<SEED>': str(args.seed),
# }
# print('Debug', param_replace_table)
# param_config = dress_param_with_config(CONFIG['slot_filling'], param_replace_table)
# run_slot_filling(f"{args.task}{cluster_method}{split_rate}{'-et' if args.extend else ''}{'-rfo' if args.refill_only else ''}", param_config['train_and_test'])
# # if not args.extend:
# break # ori training set is irrelevant to cluster method, so only run it once for each rate
print('Notice! task option affect all: gd, cd ,trn! \nNotice! Extend option will only affect train & test\nrfo effect all')
# print("!!!!!!!!!!!!!!!!!! run in debug mode !!!!!!!!!!!!!!")
print('Use rfo in full model!!!!!')
# print('Not use rfo in full model!!!!!')
print('Notice: rfo results came from thesaurus method!')
| 4,786 | 59.594937 | 188 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/run_gen_with_label.py | # coding:utf-8
import json
import os
import copy
import subprocess
import argparse
from source.AuxiliaryTools.nlp_tool import low_case_tokenizer, sentence_edit_distance
from source.ReFilling.re_filling import re_filling
import math
from collections import Counter
import random
from itertools import combinations
from set_config import refresh_config_file
# ============ Description ==========
# append source id in the end, and use tree bank tokenize
# ============ Args Process ==========
# General function
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--task", type=str, default='atis_former', help="choose task: atis_former")
parser.add_argument("-gd", "--gen_data", help="generate data from cluster result", action="store_true")
parser.add_argument("-cd", "--cook_data", help="cook data for ONMT", action="store_true")
parser.add_argument("-trn", "--train", help="run train part", action="store_true")
parser.add_argument("-tst", "--test", help="run test part", action="store_true")
parser.add_argument("-gcd", "--gen_conll_data", help="convert generated data to conll format", action="store_true")
parser.add_argument("-f", "--full", help="run all part", action="store_true")
# Deep Customize
parser.add_argument("-gpu", "--gpu_id", type=int, default=0, help='input gpu id for current task, -1 to not use gpu')
parser.add_argument('--config', default='./config.json', help="specific a config file by path")
args = parser.parse_args()
# ============ Refresh Config ==========
refresh_config_file(args.config)
# ============ Settings ==========
TASK_NAME = args.task
GEN_DATA = args.gen_data # or args.full
COOK_DATA = args.cook_data or args.full
RUN_TRAIN = args.train or args.full
RUN_TEST = args.test or args.full
GEN_CONLL = args.gen_conll_data or args.full
with open(args.config, 'r') as con_f:
CONFIG = json.load(con_f)
def get_file_tail(task_name, split_rate):
return f"{task_name}-{split_rate}"
def generate_data(input_file_path, output_src_file_path, output_tgt_file_path):
with open(input_file_path, 'r') as reader, \
open(output_src_file_path, 'w') as src_writer, \
open(output_tgt_file_path, 'w') as tgt_writer:
json_data = json.load(reader)
all_pairs = []
for cluster_item in json_data.values():
for turn in cluster_item:
temp_source_word_lst = turn['user_word_lst']
temp_target_word_lst = []
for i in range(len(temp_source_word_lst)):
temp_target_word_lst.append(turn['user_word_lst'][i] + '<' + turn['label_lst'][i] + '>')
all_pairs.append([temp_source_word_lst, temp_target_word_lst])
print('Input:\n%s\nOutput to:\n%s\n%s\n' % (input_file_path,output_src_file_path,output_tgt_file_path))
for ind, pair in enumerate(all_pairs):
src_writer.write(' '.join(pair[0]) + '\n')
tgt_writer.write(' '.join(pair[1]) + '\n')
if ind % 1000 == 0:
print(ind, 'pairs writen')
print(len(all_pairs), 'pairs in total.')
def convert_to_conll(origin_file, input_file_path, output_file_path):
with open(origin_file, 'r')as ori_file, open(input_file_path, 'r') as reader, open(output_file_path, 'w') as writer:
appeared_line_set = set()
json_data = json.load(ori_file)
for cluster_item in json_data.values():
for turn in cluster_item:
temp_source_word_lst = turn['user_word_lst']
temp_target_word_lst = []
for i in range(len(temp_source_word_lst)):
temp_target_word_lst.append(turn['user_word_lst'][i] + '<' + turn['label_lst'][i] + '>')
appeared_line_set.add(' '.join(temp_target_word_lst))
for line in reader:
appeared_line_set.add(line.replace('\n', ''))
for line in appeared_line_set:
word_label_lst = line.split()
for word_label in word_label_lst:
# print('debug', word_label)
# print('debug', word_label_lst)
if '<unk>' in word_label:
word = 'unk'
label = word_label.split('><')[1].replace('>', '')
else:
word, label = word_label.split('<')
label = label.replace('>', '')
writer.write('%s\t%s\n' % (word, label))
writer.write('\n')
def dress_param_with_config(param_config, replace_table):
# replace "data_mark, data_dir, result_dir" slots in param
ret = copy.deepcopy(param_config)
for key in ret:
param_str = " ".join(ret[key])
for slot_name in replace_table:
param_str = param_str.replace(slot_name, replace_table[slot_name])
param_lst = param_str.split()
ret[key] = param_lst
return ret
def call_onmt(task_name, param):
print('========================== Call Onmt for: %s ==========================' % task_name)
print('========================== Param ==========================\n%s' % ' '.join(param))
print('========================== Open-NMT Output ========================== \n')
proc = subprocess.Popen(param, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
with open('log/' + task_name + 'log', 'w') as writer:
has_error = False
for line in proc.stdout.readlines():
print(line)
writer.write(line.rstrip().decode("utf-8") + '\n')
if b'error' in line.lower() and b'check_error' not in line.lower():
has_error = True
if has_error:
raise RuntimeError
def main():
all_split_rate = CONFIG['experiment']['train_set_split_rate']
if GEN_DATA:
clustering_results_dir = CONFIG['path']['ClusteringResult']
onmt_source_dir = CONFIG['path']['OnmtData'] + 'SourceData/'
# gen train data
for split_rate in all_split_rate:
input_file_path = clustering_results_dir + f'train_atis_labeled_intent-slot{split_rate}.json'
output_src_file_path = onmt_source_dir + 'train_' + get_file_tail(TASK_NAME, split_rate) + '_src.txt'
output_tgt_file_path = onmt_source_dir + 'train_' + get_file_tail(TASK_NAME, split_rate) + '_tgt.txt'
generate_data(input_file_path,output_src_file_path, output_tgt_file_path)
# gen dev data
input_file_path_for_dev = clustering_results_dir + 'dev_atis_labeled_intent-slot1.json'
output_src_file_path_for_dev = onmt_source_dir + 'dev_' + get_file_tail(TASK_NAME, 1) + '_src.txt'
output_tgt_file_path_for_dev = onmt_source_dir + 'dev_' + get_file_tail(TASK_NAME, 1) + '_tgt.txt'
generate_data(input_file_path_for_dev, output_src_file_path_for_dev, output_tgt_file_path_for_dev)
# gen test data
input_file_path_for_test = clustering_results_dir + 'dev_atis_labeled_intent-slot1.json'
output_src_file_path_for_test = onmt_source_dir + 'test_' + get_file_tail(TASK_NAME, 1) + '_src.txt'
output_tgt_file_path_for_test = onmt_source_dir + 'test_' + get_file_tail(TASK_NAME, 1) + '_tgt.txt'
generate_data(input_file_path_for_test, output_src_file_path_for_test, output_tgt_file_path_for_test)
for split_rate in all_split_rate:
# Customize these parameters for OpenNMT tool
train_file_tail = get_file_tail(task_name=TASK_NAME, split_rate=split_rate)
dev_file_tail = get_file_tail(task_name=TASK_NAME, split_rate=1)
param_replace_table = {
'<DATA_DIR>': CONFIG['path']['OnmtData'] + 'SourceData',
'<RESULT_DIR>': CONFIG['path']['OnmtData'] + 'Result',
'<TRAIN_FILE_TAIL>': train_file_tail,
'<DEV_FILE_TAIL>': dev_file_tail,
'<GPU>': '' if args.gpu_id < 0 else ('-gpu %d' % args.gpu_id),
}
print('Debug', param_replace_table)
param_config = dress_param_with_config(CONFIG['gen_with_label'], param_replace_table)
if COOK_DATA:
# to get word embedding and dict
call_onmt('prepare_data: ' + train_file_tail, param_config['prepare_data'])
if RUN_TRAIN:
call_onmt('train: ' + train_file_tail, param_config['train'])
if RUN_TEST:
call_onmt('test: ' + train_file_tail, param_config['test'])
if GEN_CONLL:
origin_file_path = CONFIG['path']['ClusteringResult'] + f'train_atis_labeled_intent-slot{split_rate}.json'
input_file_path = CONFIG['path']['OnmtData'] + 'Result/' + train_file_tail + '_pred.txt'
output_file_path = CONFIG['path']['Evaluate'] + 'SlotFilling/Source/' + 'extend_train_' + train_file_tail + '.conll'
convert_to_conll(origin_file_path, input_file_path, output_file_path)
if __name__ == "__main__":
main()
print('Warn! Turn on NOISE_TRANLATE in onmt\'s translator.py')
| 8,920 | 46.452128 | 128 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/set_config.py | # coding:utf-8
from source.AuxiliaryTools.ConfigTool import update_config
def refresh_config_file(config_path='./config.json'):
print('Config Position:', config_path)
# For my linux server setting
update_config("/users4/ythou/Projects/TaskOrientedDialogue/data/", config_path=config_path)
# For my windows setting
# update_config("E:/Projects/Research/TaskOrientedDialogue/data/", config_path='./config.json')
if __name__ == "__main__":
refresh_config_file()
| 486 | 33.785714 | 99 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/run_gen_evaluation.py | # coding: utf-8
import argparse
import json
from source.Evaluate.gen_eval import appearance_check
from set_config import refresh_config_file
import copy
import subprocess
from multiprocessing import Process, Queue, current_process, freeze_support, Manager
N_THREAD = 20
# ============ Args Process ==========
parser = argparse.ArgumentParser()
# parser.add_argument("-t", "--task", type=str, default='weather_labeled', help="choose task: navigate_labeled, schedule_labeled, weather_labeled, navigate, schedule, weather")
# parser.add_argument("--topx", type=int, default=5, help="select using topx of generated sentences")
# Deep Customize
parser.add_argument('--config', default='./config.json', help="specific a config file by path")
args = parser.parse_args()
# ============ Refresh Config ==========
refresh_config_file(args.config)
# ============ Settings ==========
with open(args.config, 'r') as con_f:
CONFIG = json.load(con_f)
TASK_NAME_LST = ['atis_labeled']
# TASK_NAME_LST = ['navigate_labeled', 'schedule_labeled', 'weather_labeled']
CLUSTER_METHOD_LST = ['_intent-slot', '_nc', '_leak-gan']
# CLUSTER_METHOD_LST = CONFIG['experiment']['cluster_method'] + ['_nc']
# SPLIT_RATE_LST = [1]
SPLIT_RATE_LST = [4478]
# SPLIT_RATE_LST = [515]
PAIRING_MODE_LST = ['', '_full_connect', '_circle', '_random']
INDEX_CHOICE_LST = ['', '_ni', '_nf']
TOP_X = 10
USE_METRIC = [
# "Not Appeared",
# "Total",
# "Unique",
"Unique New",
"Avg. Distance for new",
"Avg. Distance for augmented",
"Avg. Distance for source",
# 'Avg. Length',
'source_distinct_1',
'source_distinct_2',
'source_unigram',
'source_bigram',
'source_total_word',
'augmented_distinct_1',
'augmented_distinct_2',
'augmented_unigram',
'augmented_bigram',
'augmented_total_word',
'source_size',
'generated_new_size',
'augmented_size',
]
def get_file_tail(task_name, cluster_method, split_rate, pairing_mod, index_choice):
file_tail = f"{task_name}{cluster_method}{str(split_rate)}{pairing_mod}{index_choice}"
return file_tail
def gen_evaluation_thread(task_queue, done_queue):
for param in iter(task_queue.get, 'STOP'):
file_tail = get_file_tail(** param)
ret = copy.copy(param)
ret['eval_result'] = appearance_check(
result_file=CONFIG['path']["OnmtData"] + "Result/" + file_tail + '_gen_eval.log',
test_what_file=CONFIG['path']["OnmtData"] + "Result/" + file_tail + '_pred.txt',
in_what_file=CONFIG['path']["OnmtData"] + "SourceData/train_" + file_tail + '_src.txt',
top_x=TOP_X
)
done_queue.put(ret)
def format_output(result_table, output_file='./log/gen_eval_table.log'):
output_table = []
all_column_name = ['model_name']
for row_name in result_table:
temp_row = [row_name]
for task_name in result_table[row_name]:
for metric in USE_METRIC:
column_name = f"{task_name}_{metric}"
if column_name not in all_column_name:
all_column_name.append(column_name)
temp_row.append('%.2f' % (result_table[row_name][task_name][metric]))
output_table.append(temp_row)
output_table = sorted(output_table, key=lambda x:x[0])
with open(output_file, 'w') as writer:
print('\t'.join(all_column_name))
writer.write('\t'.join(all_column_name) + '\n')
for row in output_table:
writer.write('\t'.join(row) + '\n')
print('\t'.join(row))
def gen_evaluation(task_name_lst, cluster_method_lst, split_rate_lst, pairing_mode_lst, index_choice_lst):
result_table = {}
task_queue, done_queue, task_n = Queue(), Queue(), 0
for task_name in task_name_lst:
for cluster_method in cluster_method_lst:
for split_rate in split_rate_lst:
for pairing_mod in pairing_mode_lst:
for index_choice in index_choice_lst:
param = {
"task_name": task_name,
"cluster_method": cluster_method,
'split_rate': split_rate,
'pairing_mod': pairing_mod,
'index_choice': index_choice,
}
task_queue.put(param)
task_n += 1
print(task_n,'Tasks Building')
for t in range(N_THREAD):
task_queue.put('STOP')
for t in range(N_THREAD):
Process(target=gen_evaluation_thread, args=(task_queue, done_queue)).start()
print("Start multi-thread Processing")
# collect the results below
for t in range(task_n):
thread_return = done_queue.get()
# print('=== thread return ===', thread_return)
if 'no_file' in thread_return['eval_result']:
print('--- debug ---', (thread_return['eval_result']['no_file']))
pass
else:
model_name = thread_return['cluster_method'] + str(thread_return['split_rate']) + thread_return['pairing_mod'] + thread_return['index_choice']
if model_name not in result_table:
result_table[model_name] = {}
# if thread_return['task_name'] not in result_table[model_name]:
result_table[model_name][thread_return['task_name']] = thread_return['eval_result']
print(t + 1, 'task finished.')
# print(result_table)
format_output(result_table)
if __name__ == "__main__":
gen_evaluation(
task_name_lst=TASK_NAME_LST,
cluster_method_lst=CLUSTER_METHOD_LST,
split_rate_lst=SPLIT_RATE_LST,
pairing_mode_lst=PAIRING_MODE_LST,
index_choice_lst=INDEX_CHOICE_LST
)
| 5,770 | 35.99359 | 177 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/setup.py | #!/usr/bin/env python
from setuptools import setup
setup(name='OpenNMT',
description='A python implementation of OpenNMT',
version='0.1',
packages=['onmt', 'onmt.modules'])
| 193 | 20.555556 | 55 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/opts.py | import argparse
from onmt.modules.SRU import CheckSRU
def model_opts(parser):
"""
These options are passed to the construction of the model.
Be careful with these as they will be used during translation.
"""
# Model options
parser.add_argument('-model_type', default='text',
help="Type of encoder to use. Options are [text|img].")
# Embedding Options
parser.add_argument('-word_vec_size', type=int, default=-1,
help='Word embedding for both.')
parser.add_argument('-src_word_vec_size', type=int, default=500,
help='Src word embedding sizes')
parser.add_argument('-tgt_word_vec_size', type=int, default=500,
help='Tgt word embedding sizes')
parser.add_argument('-feat_merge', type=str, default='concat',
choices=['concat', 'sum', 'mlp'],
help='Merge action for the features embeddings')
parser.add_argument('-feat_vec_size', type=int, default=-1,
help="""If specified, feature embedding sizes
will be set to this. Otherwise, feat_vec_exponent
will be used.""")
parser.add_argument('-feat_vec_exponent', type=float, default=0.7,
help="""If -feat_merge_size is not set, feature
embedding sizes will be set to N^feat_vec_exponent
where N is the number of values the feature takes.""")
parser.add_argument('-position_encoding', action='store_true',
help='Use a sin to mark relative words positions.')
parser.add_argument('-share_decoder_embeddings', action='store_true',
help='Share the word and out embeddings for decoder.')
parser.add_argument('-share_embeddings', action='store_true',
help="""Share the word embeddings between encoder
and decoder.""")
# RNN Options
parser.add_argument('-encoder_type', type=str, default='rnn',
choices=['rnn', 'brnn', 'mean', 'transformer', 'cnn'],
help="""Type of encoder layer to use.""")
parser.add_argument('-decoder_type', type=str, default='rnn',
choices=['rnn', 'transformer', 'cnn'],
help='Type of decoder layer to use.')
parser.add_argument('-layers', type=int, default=-1,
help='Number of layers in enc/dec.')
parser.add_argument('-enc_layers', type=int, default=2,
help='Number of layers in the encoder')
parser.add_argument('-dec_layers', type=int, default=2,
help='Number of layers in the decoder')
parser.add_argument('-cnn_kernel_width', type=int, default=3,
help="""Size of windows in the cnn, the kernel_size is
(cnn_kernel_width, 1) in conv layer""")
parser.add_argument('-rnn_size', type=int, default=500,
help='Size of LSTM hidden states')
parser.add_argument('-input_feed', type=int, default=1,
help="""Feed the context vector at each time step as
additional input (via concatenation with the word
embeddings) to the decoder.""")
parser.add_argument('-rnn_type', type=str, default='LSTM',
choices=['LSTM', 'GRU', 'SRU'],
action=CheckSRU,
help="""The gate type to use in the RNNs""")
# parser.add_argument('-residual', action="store_true",
# help="Add residual connections between RNN layers.")
parser.add_argument('-brnn', action=DeprecateAction,
help="Deprecated, use `encoder_type`.")
parser.add_argument('-brnn_merge', default='concat',
choices=['concat', 'sum'],
help="Merge action for the bidir hidden states")
parser.add_argument('-context_gate', type=str, default=None,
choices=['source', 'target', 'both'],
help="""Type of context gate to use.
Do not select for no context gate.""")
# Attention options
parser.add_argument('-global_attention', type=str, default='general',
choices=['dot', 'general', 'mlp'],
help="""The attention type to use:
dotprot or general (Luong) or MLP (Bahdanau)""")
# Genenerator and loss options.
parser.add_argument('-copy_attn', action="store_true",
help='Train copy attention layer.')
parser.add_argument('-copy_attn_force', action="store_true",
help='When available, train to copy.')
parser.add_argument('-coverage_attn', action="store_true",
help='Train a coverage attention layer.')
parser.add_argument('-lambda_coverage', type=float, default=1,
help='Lambda value for coverage.')
def preprocess_opts(parser):
# Dictionary Options
parser.add_argument('-src_vocab_size', type=int, default=50000,
help="Size of the source vocabulary")
parser.add_argument('-tgt_vocab_size', type=int, default=50000,
help="Size of the target vocabulary")
parser.add_argument('-src_words_min_frequency', type=int, default=0)
parser.add_argument('-tgt_words_min_frequency', type=int, default=0)
# Truncation options
parser.add_argument('-src_seq_length', type=int, default=50,
help="Maximum source sequence length")
parser.add_argument('-src_seq_length_trunc', type=int, default=0,
help="Truncate source sequence length.")
parser.add_argument('-tgt_seq_length', type=int, default=50,
help="Maximum target sequence length to keep.")
parser.add_argument('-tgt_seq_length_trunc', type=int, default=0,
help="Truncate target sequence length.")
# Data processing options
parser.add_argument('-shuffle', type=int, default=1,
help="Shuffle data")
parser.add_argument('-lower', action='store_true', help='lowercase data')
# Options most relevant to summarization
parser.add_argument('-dynamic_dict', action='store_true',
help="Create dynamic dictionaries")
parser.add_argument('-share_vocab', action='store_true',
help="Share source and target vocabulary")
def train_opts(parser):
# Model loading/saving options
parser.add_argument('-data', required=True,
help="""Path prefix to the ".train.pt" and
".valid.pt" file path from preprocess.py""")
parser.add_argument('-save_model', default='model',
help="""Model filename (the model will be saved as
<save_model>_epochN_PPL.pt where PPL is the
validation perplexity""")
parser.add_argument('-train_from', default='', type=str,
help="""If training from a checkpoint then this is the
path to the pretrained model's state_dict.""")
# GPU
parser.add_argument('-gpuid', default=[], nargs='+', type=int,
help="Use CUDA on the listed devices.")
parser.add_argument('-seed', type=int, default=-1,
help="""Random seed used for the experiments
reproducibility.""")
# Init options
parser.add_argument('-start_epoch', type=int, default=1,
help='The epoch from which to start')
parser.add_argument('-param_init', type=float, default=0.1,
help="""Parameters are initialized over uniform distribution
with support (-param_init, param_init).
Use 0 to not use initialization""")
# Pretrained word vectors
parser.add_argument('-pre_word_vecs_enc',
help="""If a valid path is specified, then this will load
pretrained word embeddings on the encoder side.
See README for specific formatting instructions.""")
parser.add_argument('-pre_word_vecs_dec',
help="""If a valid path is specified, then this will load
pretrained word embeddings on the decoder side.
See README for specific formatting instructions.""")
# Fixed word vectors
parser.add_argument('-fix_word_vecs_enc',
action='store_true',
help="Fix word embeddings on the encoder side.")
parser.add_argument('-fix_word_vecs_dec',
action='store_true',
help="Fix word embeddings on the encoder side.")
# Optimization options
parser.add_argument('-batch_size', type=int, default=64,
help='Maximum batch size')
parser.add_argument('-max_generator_batches', type=int, default=32,
help="""Maximum batches of words in a sequence to run
the generator on in parallel. Higher is faster, but
uses more memory.""")
parser.add_argument('-epochs', type=int, default=13,
help='Number of training epochs')
parser.add_argument('-optim', default='sgd',
choices=['sgd', 'adagrad', 'adadelta', 'adam'],
help="""Optimization method.""")
parser.add_argument('-max_grad_norm', type=float, default=5,
help="""If the norm of the gradient vector exceeds this,
renormalize it to have the norm equal to
max_grad_norm""")
parser.add_argument('-dropout', type=float, default=0.3,
help="Dropout probability; applied in LSTM stacks.")
parser.add_argument('-truncated_decoder', type=int, default=0,
help="""Truncated bptt.""")
# learning rate
parser.add_argument('-learning_rate', type=float, default=1.0,
help="""Starting learning rate.
Recommended settings: sgd = 1, adagrad = 0.1,
adadelta = 1, adam = 0.001""")
parser.add_argument('-learning_rate_decay', type=float, default=0.5,
help="""If update_learning_rate, decay learning rate by
this much if (i) perplexity does not decrease on the
validation set or (ii) epoch has gone past
start_decay_at""")
parser.add_argument('-start_decay_at', type=int, default=8,
help="""Start decaying every epoch after and including this
epoch""")
parser.add_argument('-start_checkpoint_at', type=int, default=0,
help="""Start checkpointing every epoch after and including
this epoch""")
parser.add_argument('-decay_method', type=str, default="",
choices=['noam'], help="Use a custom decay rate.")
parser.add_argument('-warmup_steps', type=int, default=4000,
help="""Number of warmup steps for custom decay.""")
parser.add_argument('-report_every', type=int, default=50,
help="Print stats at this interval.")
parser.add_argument('-exp_host', type=str, default="",
help="Send logs to this crayon server.")
parser.add_argument('-exp', type=str, default="",
help="Name of the experiment for logging.")
def translate_opts(parser):
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-src', required=True,
help="""Source sequence to decode (one line per
sequence)""")
parser.add_argument('-src_img_dir', default="",
help='Source image directory')
parser.add_argument('-tgt',
help='True target sequence (optional)')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-beam_size', type=int, default=5,
help='Beam size')
parser.add_argument('-batch_size', type=int, default=30,
help='Batch size')
parser.add_argument('-max_sent_length', type=int, default=100,
help='Maximum sentence length.')
parser.add_argument('-replace_unk', action="store_true",
help="""Replace the generated UNK tokens with the
source token that had highest attention weight. If
phrase_table is provided, it will lookup the
identified source token and give the corresponding
target token. If it is not provided(or the identified
source token does not exist in the table) then it
will copy the source token""")
parser.add_argument('-verbose', action="store_true",
help='Print scores and predictions for each sentence')
parser.add_argument('-attn_debug', action="store_true",
help='Print best attn for each word')
parser.add_argument('-dump_beam', type=str, default="",
help='File to dump beam information to.')
parser.add_argument('-n_best', type=int, default=1,
help="""If verbose is set, will output the n_best
decoded sentences""")
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
# Options most relevant to summarization.
parser.add_argument('-dynamic_dict', action='store_true',
help="Create dynamic dictionaries")
parser.add_argument('-share_vocab', action='store_true',
help="Share source and target vocabulary")
def add_md_help_argument(parser):
parser.add_argument('-md', action=MarkdownHelpAction,
help='print Markdown-formatted help text and exit.')
# MARKDOWN boilerplate
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class MarkdownHelpFormatter(argparse.HelpFormatter):
"""A really bare-bones argparse help formatter that generates valid markdown.
This will generate something like:
usage
# **section heading**:
## **--argument-one**
```
argument-one help text
```
"""
def _format_usage(self, usage, actions, groups, prefix):
usage_text = super(MarkdownHelpFormatter, self)._format_usage(
usage, actions, groups, prefix)
return '\n```\n%s\n```\n\n' % usage_text
def format_help(self):
self._root_section.heading = '# %s' % self._prog
return super(MarkdownHelpFormatter, self).format_help()
def start_section(self, heading):
super(MarkdownHelpFormatter, self).start_section('## **%s**' % heading)
def _format_action(self, action):
lines = []
action_header = self._format_action_invocation(action)
lines.append('### **%s** ' % action_header)
if action.help:
lines.append('')
lines.append('```')
help_text = self._expand_help(action)
lines.extend(self._split_lines(help_text, 80))
lines.append('```')
lines.extend(['', ''])
return '\n'.join(lines)
class MarkdownHelpAction(argparse.Action):
def __init__(self, option_strings,
dest=argparse.SUPPRESS, default=argparse.SUPPRESS,
**kwargs):
super(MarkdownHelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
parser.formatter_class = MarkdownHelpFormatter
parser.print_help()
parser.exit()
class DeprecateAction(argparse.Action):
def __init__(self, option_strings, dest, help=None, **kwargs):
super(DeprecateAction, self).__init__(option_strings, dest, nargs=0,
help=help, **kwargs)
def __call__(self, parser, namespace, values, flag_name):
help = self.help if self.help is not None else ""
msg = "Flag '%s' is deprecated. %s" % (flag_name, help)
raise argparse.ArgumentTypeError(msg)
| 17,059 | 48.449275 | 84 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/translate.py | #!/usr/bin/env python
from __future__ import division
from builtins import bytes
import os
import argparse
import math
import codecs
import torch
import onmt
import onmt.IO
import opts
from itertools import takewhile, count
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
parser = argparse.ArgumentParser(
description='translate.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
opts.add_md_help_argument(parser)
opts.translate_opts(parser)
opt = parser.parse_args()
if opt.batch_size != 1:
print("WARNING: -batch_size isn't supported currently, "
"we set it to 1 for now!")
opt.batch_size = 1
def report_score(name, score_total, words_total):
print("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, score_total / words_total,
name, math.exp(-score_total/words_total)))
def get_src_words(src_indices, index2str):
words = []
raw_words = (index2str[i] for i in src_indices)
words = takewhile(lambda w: w != onmt.IO.PAD_WORD, raw_words)
return " ".join(words)
def main():
dummy_parser = argparse.ArgumentParser(description='train.py')
opts.model_opts(dummy_parser)
dummy_opt = dummy_parser.parse_known_args([])[0]
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
translator = onmt.Translator(opt, dummy_opt.__dict__)
out_file = codecs.open(opt.output, 'w', 'utf-8')
pred_score_total, pred_words_total = 0, 0
gold_score_total, gold_words_total = 0, 0
if opt.dump_beam != "":
import json
translator.initBeamAccum()
data = onmt.IO.ONMTDataset(
opt.src, opt.tgt, translator.fields,
use_filter_pred=False)
test_data = onmt.IO.OrderedIterator(
dataset=data, device=opt.gpu,
batch_size=opt.batch_size, train=False, sort=False,
shuffle=False)
counter = count(1)
for batch in test_data:
pred_batch, gold_batch, pred_scores, gold_scores, attn, src \
= translator.translate(batch, data)
pred_score_total += sum(score[0] for score in pred_scores)
pred_words_total += sum(len(x[0]) for x in pred_batch)
if opt.tgt:
gold_score_total += sum(gold_scores)
gold_words_total += sum(len(x) for x in batch.tgt[1:])
# z_batch: an iterator over the predictions, their scores,
# the gold sentence, its score, and the source sentence for each
# sentence in the batch. It has to be zip_longest instead of
# plain-old zip because the gold_batch has length 0 if the target
# is not included.
z_batch = zip_longest(
pred_batch, gold_batch,
pred_scores, gold_scores,
(sent.squeeze(1) for sent in src.split(1, dim=1)))
for pred_sents, gold_sent, pred_score, gold_score, src_sent in z_batch:
n_best_preds = [" ".join(pred) for pred in pred_sents[:opt.n_best]]
out_file.write('\n'.join(n_best_preds))
out_file.write('\n')
out_file.flush()
if opt.verbose:
sent_number = next(counter)
words = get_src_words(
src_sent, translator.fields["src"].vocab.itos)
os.write(1, bytes('\nSENT %d: %s\n' %
(sent_number, words), 'UTF-8'))
best_pred = n_best_preds[0]
best_score = pred_score[0]
os.write(1, bytes('PRED %d: %s\n' %
(sent_number, best_pred), 'UTF-8'))
print("PRED SCORE: %.4f" % best_score)
if opt.tgt:
tgt_sent = ' '.join(gold_sent)
os.write(1, bytes('GOLD %d: %s\n' %
(sent_number, tgt_sent), 'UTF-8'))
print("GOLD SCORE: %.4f" % gold_score)
if len(n_best_preds) > 1:
print('\nBEST HYP:')
for score, sent in zip(pred_score, n_best_preds):
os.write(1, bytes("[%.4f] %s\n" % (score, sent),
'UTF-8'))
report_score('PRED', pred_score_total, pred_words_total)
if opt.tgt:
report_score('GOLD', gold_score_total, gold_words_total)
if opt.dump_beam:
json.dump(translator.beam_accum,
codecs.open(opt.dump_beam, 'w', 'utf-8'))
if __name__ == "__main__":
main()
| 4,516 | 32.708955 | 79 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/train.py | #!/usr/bin/env python
from __future__ import division
import os
import sys
import argparse
import torch
import torch.nn as nn
from torch import cuda
import onmt
import onmt.Models
import onmt.ModelConstructor
import onmt.modules
from onmt.Utils import aeq, use_gpu
import opts
parser = argparse.ArgumentParser(
description='train.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# opts.py
opts.add_md_help_argument(parser)
opts.model_opts(parser)
opts.train_opts(parser)
opt = parser.parse_args()
if opt.word_vec_size != -1:
opt.src_word_vec_size = opt.word_vec_size
opt.tgt_word_vec_size = opt.word_vec_size
if opt.layers != -1:
opt.enc_layers = opt.layers
opt.dec_layers = opt.layers
opt.brnn = (opt.encoder_type == "brnn")
if opt.seed > 0:
torch.manual_seed(opt.seed)
if opt.rnn_type == "SRU" and not opt.gpuid:
raise AssertionError("Using SRU requires -gpuid set.")
if torch.cuda.is_available() and not opt.gpuid:
print("WARNING: You have a CUDA device, should run with -gpuid 0")
if opt.gpuid:
cuda.set_device(opt.gpuid[0])
if opt.seed > 0:
torch.cuda.manual_seed(opt.seed)
if len(opt.gpuid) > 1:
sys.stderr.write("Sorry, multigpu isn't supported yet, coming soon!\n")
sys.exit(1)
# Set up the Crayon logging server.
if opt.exp_host != "":
from pycrayon import CrayonClient
cc = CrayonClient(hostname=opt.exp_host)
experiments = cc.get_experiment_names()
print(experiments)
if opt.exp in experiments:
cc.remove_experiment(opt.exp)
experiment = cc.create_experiment(opt.exp)
def report_func(epoch, batch, num_batches,
start_time, lr, report_stats):
"""
This is the user-defined batch-level traing progress
report function.
Args:
epoch(int): current epoch count.
batch(int): current batch count.
num_batches(int): total number of batches.
start_time(float): last report time.
lr(float): current learning rate.
report_stats(Statistics): old Statistics instance.
Returns:
report_stats(Statistics): updated Statistics instance.
"""
if batch % opt.report_every == -1 % opt.report_every:
report_stats.output(epoch, batch+1, num_batches, start_time)
if opt.exp_host:
report_stats.log("progress", experiment, lr)
report_stats = onmt.Statistics()
return report_stats
def make_train_data_iter(train_data, opt):
"""
This returns user-defined train data iterator for the trainer
to iterate over during each train epoch. We implement simple
ordered iterator strategy here, but more sophisticated strategy
like curriculum learning is ok too.
"""
return onmt.IO.OrderedIterator(
dataset=train_data, batch_size=opt.batch_size,
device=opt.gpuid[0] if opt.gpuid else -1,
repeat=False)
def make_valid_data_iter(valid_data, opt):
"""
This returns user-defined validate data iterator for the trainer
to iterate over during each validate epoch. We implement simple
ordered iterator strategy here, but more sophisticated strategy
is ok too.
"""
return onmt.IO.OrderedIterator(
dataset=valid_data, batch_size=opt.batch_size,
device=opt.gpuid[0] if opt.gpuid else -1,
train=False, sort=True)
def make_loss_compute(model, tgt_vocab, dataset, opt):
"""
This returns user-defined LossCompute object, which is used to
compute loss in train/validate process. You can implement your
own *LossCompute class, by subclassing LossComputeBase.
"""
if opt.copy_attn:
compute = onmt.modules.CopyGeneratorLossCompute(
model.generator, tgt_vocab, dataset, opt.copy_attn_force)
else:
compute = onmt.Loss.NMTLossCompute(model.generator, tgt_vocab)
if use_gpu(opt):
compute.cuda()
return compute
def train_model(model, train_data, valid_data, fields, optim):
min_ppl, max_accuracy = float('inf'), -1
train_iter = make_train_data_iter(train_data, opt)
valid_iter = make_valid_data_iter(valid_data, opt)
train_loss = make_loss_compute(model, fields["tgt"].vocab,
train_data, opt)
valid_loss = make_loss_compute(model, fields["tgt"].vocab,
valid_data, opt)
trunc_size = opt.truncated_decoder # Badly named...
shard_size = opt.max_generator_batches
trainer = onmt.Trainer(model, train_iter, valid_iter,
train_loss, valid_loss, optim,
trunc_size, shard_size)
for epoch in range(opt.start_epoch, opt.epochs + 1):
print('')
# 1. Train for one epoch on the training set.
train_stats = trainer.train(epoch, report_func)
print('Train perplexity: %g' % train_stats.ppl())
print('Train accuracy: %g' % train_stats.accuracy())
# 2. Validate on the validation set.
valid_stats = trainer.validate()
print('Validation perplexity: %g' % valid_stats.ppl())
print('Validation accuracy: %g' % valid_stats.accuracy())
# 3. Log to remote server.
if opt.exp_host:
train_stats.log("train", experiment, optim.lr)
valid_stats.log("valid", experiment, optim.lr)
# 4. Update the learning rate
trainer.epoch_step(valid_stats.ppl(), epoch)
# 5. Drop a checkpoint if needed.
if epoch >= opt.start_checkpoint_at:
if valid_stats.accuracy() > max_accuracy:
# 5.1 drop checkpoint when bigger accuracy is achieved.
min_ppl = min(valid_stats.ppl(), min_ppl)
max_accuracy = max(valid_stats.accuracy(), max_accuracy)
trainer.drop_checkpoint(opt, epoch, fields, valid_stats)
print('Save model according to biggest-ever accuracy: acc: {0}, ppl: {1}'.format(max_accuracy, min_ppl))
elif valid_stats.ppl() < min_ppl:
# 5.2 drop checkpoint when smaller ppl is achieved.
min_ppl = min(valid_stats.ppl(), min_ppl)
max_accuracy = max(valid_stats.accuracy(), max_accuracy)
trainer.drop_checkpoint(opt, epoch, fields, valid_stats)
print('Save model according to lowest-ever ppl: acc: {0}, ppl: {1}'.format(max_accuracy, min_ppl))
def check_save_model_path():
save_model_path = os.path.abspath(opt.save_model)
model_dirname = os.path.dirname(save_model_path)
if not os.path.exists(model_dirname):
os.makedirs(model_dirname)
def tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
print('* number of parameters: %d' % n_params)
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
elif 'decoder' or 'generator' in name:
dec += param.nelement()
print('encoder: ', enc)
print('decoder: ', dec)
def load_fields(train, valid, checkpoint):
fields = onmt.IO.load_fields(
torch.load(opt.data + '.vocab.pt'))
fields = dict([(k, f) for (k, f) in fields.items()
if k in train.examples[0].__dict__])
train.fields = fields
valid.fields = fields
if opt.train_from:
print('Loading vocab from checkpoint at %s.' % opt.train_from)
fields = onmt.IO.load_fields(checkpoint['vocab'])
print(' * vocabulary size. source = %d; target = %d' %
(len(fields['src'].vocab), len(fields['tgt'].vocab)))
return fields
def collect_features(train, fields):
# TODO: account for target features.
# Also, why does fields need to have the structure it does?
src_features = onmt.IO.collect_features(fields)
aeq(len(src_features), train.n_src_feats)
return src_features
def build_model(model_opt, opt, fields, checkpoint):
print('Building model...')
model = onmt.ModelConstructor.make_base_model(model_opt, fields,
use_gpu(opt), checkpoint)
if len(opt.gpuid) > 1:
print('Multi gpu training: ', opt.gpuid)
model = nn.DataParallel(model, device_ids=opt.gpuid, dim=1)
print(model)
return model
def build_optim(model, checkpoint):
if opt.train_from:
print('Loading optimizer from checkpoint.')
optim = checkpoint['optim']
optim.optimizer.load_state_dict(
checkpoint['optim'].optimizer.state_dict())
else:
# what members of opt does Optim need?
optim = onmt.Optim(
opt.optim, opt.learning_rate, opt.max_grad_norm,
lr_decay=opt.learning_rate_decay,
start_decay_at=opt.start_decay_at,
opt=opt
)
optim.set_parameters(model.parameters())
return optim
def main():
# Load train and validate data.
print("Loading train and validate data from '%s'" % opt.data)
train = torch.load(opt.data + '.train.pt')
valid = torch.load(opt.data + '.valid.pt')
print(' * number of training sentences: %d' % len(train))
print(' * maximum batch size: %d' % opt.batch_size)
# Load checkpoint if we resume from a previous training.
if opt.train_from:
print('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
model_opt = checkpoint['opt']
# I don't like reassigning attributes of opt: it's not clear
opt.start_epoch = checkpoint['epoch'] + 1
else:
checkpoint = None
model_opt = opt
# Load fields generated from preprocess phase.
fields = load_fields(train, valid, checkpoint)
# Collect features.
src_features = collect_features(train, fields)
for j, feat in enumerate(src_features):
print(' * src feature %d size = %d' % (j, len(fields[feat].vocab)))
# Build model.
model = build_model(model_opt, opt, fields, checkpoint)
tally_parameters(model)
check_save_model_path()
# Build optimizer.
optim = build_optim(model, checkpoint)
# Do training.
train_model(model, train, valid, fields, optim)
if __name__ == "__main__":
main()
| 10,352 | 31.556604 | 120 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/preprocess.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import codecs
import torch
import onmt
import onmt.IO
import opts
parser = argparse.ArgumentParser(
description='preprocess.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
opts.add_md_help_argument(parser)
# **Preprocess Options**
parser.add_argument('-config', help="Read options from this file")
parser.add_argument('-data_type', default="text",
help="Type of the source input. Options are [text|img].")
parser.add_argument('-data_img_dir', default=".",
help="Location of source images")
parser.add_argument('-train_src', required=True,
help="Path to the training source data")
parser.add_argument('-train_tgt', required=True,
help="Path to the training target data")
parser.add_argument('-valid_src', required=True,
help="Path to the validation source data")
parser.add_argument('-valid_tgt', required=True,
help="Path to the validation target data")
parser.add_argument('-save_data', required=True,
help="Output file for the prepared data")
parser.add_argument('-src_vocab',
help="Path to an existing source vocabulary")
parser.add_argument('-tgt_vocab',
help="Path to an existing target vocabulary")
parser.add_argument('-features_vocabs_prefix', type=str, default='',
help="Path prefix to existing features vocabularies")
parser.add_argument('-seed', type=int, default=3435,
help="Random seed")
parser.add_argument('-report_every', type=int, default=100000,
help="Report status every this many sentences")
opts.preprocess_opts(parser)
opt = parser.parse_args()
torch.manual_seed(opt.seed)
def main():
print('Preparing training ...')
with codecs.open(opt.train_src, "r", "utf-8") as src_file:
src_line = src_file.readline().strip().split()
_, _, n_src_features = onmt.IO.extract_features(src_line)
with codecs.open(opt.train_tgt, "r", "utf-8") as tgt_file:
tgt_line = tgt_file.readline().strip().split()
_, _, n_tgt_features = onmt.IO.extract_features(tgt_line)
fields = onmt.IO.get_fields(n_src_features, n_tgt_features)
print("Building Training...")
train = onmt.IO.ONMTDataset(
opt.train_src, opt.train_tgt, fields,
opt.src_seq_length, opt.tgt_seq_length,
src_seq_length_trunc=opt.src_seq_length_trunc,
tgt_seq_length_trunc=opt.tgt_seq_length_trunc,
dynamic_dict=opt.dynamic_dict)
print("Building Vocab...")
onmt.IO.build_vocab(train, opt)
print("Building Valid...")
valid = onmt.IO.ONMTDataset(
opt.valid_src, opt.valid_tgt, fields,
opt.src_seq_length, opt.tgt_seq_length,
src_seq_length_trunc=opt.src_seq_length_trunc,
tgt_seq_length_trunc=opt.tgt_seq_length_trunc,
dynamic_dict=opt.dynamic_dict)
print("Saving train/valid/fields")
# Can't save fields, so remove/reconstruct at training time.
torch.save(onmt.IO.save_vocab(fields),
open(opt.save_data + '.vocab.pt', 'wb'))
train.fields = []
valid.fields = []
torch.save(train, open(opt.save_data + '.train.pt', 'wb'))
torch.save(valid, open(opt.save_data + '.valid.pt', 'wb'))
if __name__ == "__main__":
main()
| 3,411 | 34.915789 | 77 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/tools/extract_embeddings.py | from __future__ import division
import torch
import argparse
from onmt.ModelConstructor import make_embeddings, \
make_encoder, make_decoder
parser = argparse.ArgumentParser(description='translate.py')
parser.add_argument('-model', required=True,
help='Path to model .pt file')
parser.add_argument('-output_dir', default='.',
help="""Path to output the embeddings""")
parser.add_argument('-gpu', type=int, default=-1,
help="Device to run on")
def write_embeddings(filename, dict, embeddings):
with open(filename, 'w') as file:
for i in range(len(embeddings)):
str = dict.idxToLabel[i].encode("utf-8")
for j in range(len(embeddings[0])):
str = str + " %5f" % (embeddings[i][j])
file.write(str + "\n")
def main():
opt = parser.parse_args()
checkpoint = torch.load(opt.model)
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
model_opt = checkpoint['opt']
src_dict = checkpoint['dicts']['src']
tgt_dict = checkpoint['dicts']['tgt']
feature_dicts = []
embeddings = make_embeddings(model_opt, src_dict, feature_dicts)
encoder = make_encoder(model_opt, embeddings)
embeddings = make_embeddings(model_opt, tgt_dict, feature_dicts,
for_encoder=False)
decoder = make_decoder(model_opt, embeddings)
encoder_embeddings = encoder.word_lut.weight.data.tolist()
decoder_embeddings = decoder.word_lut.weight.data.tolist()
print("Writing source embeddings")
write_embeddings(opt.output_dir + "/src_embeddings.txt", src_dict,
encoder_embeddings)
print("Writing target embeddings")
write_embeddings(opt.output_dir + "/tgt_embeddings.txt", tgt_dict,
decoder_embeddings)
print('... done.')
print('Converting model...')
if __name__ == "__main__":
main()
| 1,987 | 30.0625 | 70 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/test/test_simple.py | import onmt
def test_load():
onmt
pass
| 49 | 6.142857 | 16 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/test/test_models.py | import argparse
import copy
import unittest
import torch
from torch.autograd import Variable
import onmt
import opts
from onmt.ModelConstructor import make_embeddings, \
make_encoder, make_decoder
parser = argparse.ArgumentParser(description='train.py')
opts.model_opts(parser)
opts.train_opts(parser)
# -data option is required, but not used in this test, so dummy.
opt = parser.parse_known_args(['-data', 'dummy'])[0]
print(opt)
class TestModel(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestModel, self).__init__(*args, **kwargs)
self.opt = opt
# Helper to generate a vocabulary
def get_vocab(self):
src = onmt.IO.get_fields(0, 0)["src"]
src.build_vocab([])
return src.vocab
def get_batch(self, sourceL=3, bsize=1):
# len x batch x nfeat
test_src = Variable(torch.ones(sourceL, bsize, 1)).long()
test_tgt = Variable(torch.ones(sourceL, bsize, 1)).long()
test_length = torch.ones(bsize).fill_(sourceL)
return test_src, test_tgt, test_length
def embeddings_forward(self, opt, sourceL=3, bsize=1):
'''
Tests if the embeddings works as expected
args:
opt: set of options
sourceL: Length of generated input sentence
bsize: Batchsize of generated input
'''
word_dict = self.get_vocab()
feature_dicts = []
emb = make_embeddings(opt, word_dict, feature_dicts)
test_src, _, __ = self.get_batch(sourceL=sourceL,
bsize=bsize)
if opt.decoder_type == 'transformer':
input = torch.cat([test_src, test_src], 0)
res = emb(input)
compare_to = torch.zeros(sourceL * 2, bsize, opt.src_word_vec_size)
else:
res = emb(test_src)
compare_to = torch.zeros(sourceL, bsize, opt.src_word_vec_size)
self.assertEqual(res.size(), compare_to.size())
def encoder_forward(self, opt, sourceL=3, bsize=1):
'''
Tests if the encoder works as expected
args:
opt: set of options
sourceL: Length of generated input sentence
bsize: Batchsize of generated input
'''
word_dict = self.get_vocab()
feature_dicts = []
embeddings = make_embeddings(opt, word_dict, feature_dicts)
enc = make_encoder(opt, embeddings)
test_src, test_tgt, test_length = self.get_batch(sourceL=sourceL,
bsize=bsize)
hidden_t, outputs = enc(test_src, test_length)
# Initialize vectors to compare size with
test_hid = torch.zeros(self.opt.enc_layers, bsize, opt.rnn_size)
test_out = torch.zeros(sourceL, bsize, opt.rnn_size)
# Ensure correct sizes and types
self.assertEqual(test_hid.size(),
hidden_t[0].size(),
hidden_t[1].size())
self.assertEqual(test_out.size(), outputs.size())
self.assertEqual(type(outputs), torch.autograd.Variable)
self.assertEqual(type(outputs.data), torch.FloatTensor)
def ntmmodel_forward(self, opt, sourceL=3, bsize=1):
"""
Creates a ntmmodel with a custom opt function.
Forwards a testbatch anc checks output size.
Args:
opt: Namespace with options
sourceL: length of input sequence
bsize: batchsize
"""
word_dict = self.get_vocab()
feature_dicts = []
embeddings = make_embeddings(opt, word_dict, feature_dicts)
enc = make_encoder(opt, embeddings)
embeddings = make_embeddings(opt, word_dict, feature_dicts,
for_encoder=False)
dec = make_decoder(opt, embeddings)
model = onmt.Models.NMTModel(enc, dec)
test_src, test_tgt, test_length = self.get_batch(sourceL=sourceL,
bsize=bsize)
outputs, attn, _ = model(test_src,
test_tgt,
test_length)
outputsize = torch.zeros(sourceL - 1, bsize, opt.rnn_size)
# Make sure that output has the correct size and type
self.assertEqual(outputs.size(), outputsize.size())
self.assertEqual(type(outputs), torch.autograd.Variable)
self.assertEqual(type(outputs.data), torch.FloatTensor)
def _add_test(paramSetting, methodname):
"""
Adds a Test to TestModel according to settings
Args:
paramSetting: list of tuples of (param, setting)
methodname: name of the method that gets called
"""
def test_method(self):
if paramSetting:
opt = copy.deepcopy(self.opt)
for param, setting in paramSetting:
setattr(opt, param, setting)
else:
opt = self.opt
getattr(self, methodname)(opt)
if paramSetting:
name = 'test_' + methodname + "_" + "_".join(str(paramSetting).split())
else:
name = 'test_' + methodname + '_standard'
setattr(TestModel, name, test_method)
test_method.__name__ = name
'''
TEST PARAMETERS
'''
test_embeddings = [[],
[('decoder_type', 'transformer')]
]
for p in test_embeddings:
_add_test(p, 'embeddings_forward')
tests_encoder = [[],
[('encoder_type', 'mean')],
# [('encoder_type', 'transformer'),
# ('word_vec_size', 16), ('rnn_size', 16)],
[]
]
for p in tests_encoder:
_add_test(p, 'encoder_forward')
tests_ntmodel = [[('rnn_type', 'GRU')],
[('layers', 10)],
[('input_feed', 0)],
[('decoder_type', 'transformer'),
('encoder_type', 'transformer'),
('src_word_vec_size', 16),
('tgt_word_vec_size', 16),
('rnn_size', 16)],
# [('encoder_type', 'transformer'),
# ('word_vec_size', 16),
# ('rnn_size', 16)],
[('decoder_type', 'transformer'),
('encoder_type', 'transformer'),
('src_word_vec_size', 16),
('tgt_word_vec_size', 16),
('rnn_size', 16),
('position_encoding', True)],
[('coverage_attn', True)],
[('copy_attn', True)],
[('global_attention', 'mlp')],
[('context_gate', 'both')],
[('context_gate', 'target')],
[('context_gate', 'source')],
[('encoder_type', "brnn"),
('brnn_merge', 'sum')],
[('encoder_type', "brnn")],
[('decoder_type', 'cnn'),
('encoder_type', 'cnn')],
[]
]
if onmt.modules.check_sru_requirement():
""" Only do SRU test if requirment is safisfied. """
# SRU doesn't support input_feed.
tests_ntmodel.append([('rnn_type', 'SRU'), ('input_feed', 0)])
for p in tests_ntmodel:
_add_test(p, 'ntmmodel_forward')
| 7,275 | 32.84186 | 79 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/test/test_preprocess.py | import argparse
import copy
import unittest
import onmt
import opts
import torchtext
from collections import Counter
parser = argparse.ArgumentParser(description='preprocess.py')
opts.preprocess_opts(parser)
opt = parser.parse_known_args()[0]
opt.train_src = 'data/src-train.txt'
opt.train_tgt = 'data/tgt-train.txt'
opt.valid_src = 'data/src-val.txt'
opt.valid_tgt = 'data/tgt-val.txt'
print(opt)
class TestData(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestData, self).__init__(*args, **kwargs)
self.opt = opt
def dataset_build(self, opt):
fields = onmt.IO.get_fields(0, 0)
train = onmt.IO.ONMTDataset(
opt.train_src, opt.train_tgt, fields,
opt.src_seq_length, opt.tgt_seq_length,
src_seq_length_trunc=opt.src_seq_length_trunc,
tgt_seq_length_trunc=opt.tgt_seq_length_trunc,
dynamic_dict=opt.dynamic_dict)
onmt.IO.build_vocab(train, opt)
onmt.IO.ONMTDataset(
opt.valid_src, opt.valid_tgt, fields,
opt.src_seq_length, opt.tgt_seq_length,
src_seq_length_trunc=opt.src_seq_length_trunc,
tgt_seq_length_trunc=opt.tgt_seq_length_trunc,
dynamic_dict=opt.dynamic_dict)
def test_merge_vocab(self):
va = torchtext.vocab.Vocab(Counter('abbccc'))
vb = torchtext.vocab.Vocab(Counter('eeabbcccf'))
merged = onmt.IO.merge_vocabs([va, vb], 2)
self.assertEqual(Counter({'c': 6, 'b': 4, 'a': 2, 'e': 2, 'f': 1}),
merged.freqs)
self.assertEqual(6, len(merged.itos))
self.assertTrue('b' in merged.itos)
def _add_test(paramSetting, methodname):
"""
Adds a Test to TestData according to settings
Args:
paramSetting: list of tuples of (param, setting)
methodname: name of the method that gets called
"""
def test_method(self):
if paramSetting:
opt = copy.deepcopy(self.opt)
for param, setting in paramSetting:
setattr(opt, param, setting)
else:
opt = self.opt
getattr(self, methodname)(opt)
if paramSetting:
name = 'test_' + methodname + "_" + "_".join(str(paramSetting).split())
else:
name = 'test_' + methodname + '_standard'
setattr(TestData, name, test_method)
test_method.__name__ = name
test_databuild = [[],
[('src_vocab_size', 1),
('tgt_vocab_size', 1)],
[('src_vocab_size', 10000),
('tgt_vocab_size', 10000)],
[('src_seq_length', 1)],
[('src_seq_length', 5000)],
[('src_seq_length_trunc', 1)],
[('src_seq_length_trunc', 5000)],
[('tgt_seq_length', 1)],
[('tgt_seq_length', 5000)],
[('tgt_seq_length_trunc', 1)],
[('tgt_seq_length_trunc', 5000)],
[('shuffle', 0)],
[('lower', True)],
[('dynamic_dict', True)],
[('share_vocab', True)],
[('dynamic_dict', True),
('share_vocab', True)],
]
for p in test_databuild:
_add_test(p, 'dataset_build')
| 3,317 | 30.009346 | 79 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/test/__init__.py | 0 | 0 | 0 | py |
|
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/test/test_attention.py | """
Here come the tests for attention types and their compatibility
"""
| 72 | 17.25 | 63 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/Loss.py | """
This file handles the details of the loss function during training.
This includes: LossComputeBase and the standard NMTLossCompute, and
sharded loss compute stuff.
"""
from __future__ import division
import torch
import torch.nn as nn
from torch.autograd import Variable
import onmt
class LossComputeBase(nn.Module):
"""
This is the loss criterion base class. Users can implement their own
loss computation strategy by making subclass of this one.
Users need to implement the compute_loss() and make_shard_state() methods.
We inherits from nn.Module to leverage the cuda behavior.
"""
def __init__(self, generator, tgt_vocab):
super(LossComputeBase, self).__init__()
self.generator = generator
self.tgt_vocab = tgt_vocab
self.padding_idx = tgt_vocab.stoi[onmt.IO.PAD_WORD]
def make_shard_state(self, batch, output, range_, attns=None):
"""
Make shard state dictionary for shards() to return iterable
shards for efficient loss computation. Subclass must define
this method to match its own compute_loss() interface.
Args:
batch: the current batch.
output: the predict output from the model.
range_: the range of examples for computing, the whole
batch or a trunc of it?
attns: the attns dictionary returned from the model.
"""
return NotImplementedError
def compute_loss(self, batch, output, target, **kwargs):
"""
Compute the loss. Subclass must define this method.
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
**kwargs(optional): additional info for computing loss.
"""
return NotImplementedError
def monolithic_compute_loss(self, batch, output, attns):
"""
Compute the loss monolithically, not dividing into shards.
"""
range_ = (0, batch.tgt.size(0))
shard_state = self.make_shard_state(batch, output, range_, attns)
_, batch_stats = self.compute_loss(batch, **shard_state)
return batch_stats
def sharded_compute_loss(self, batch, output, attns,
cur_trunc, trunc_size, shard_size):
"""
Compute the loss in shards for efficiency.
"""
batch_stats = onmt.Statistics()
range_ = (cur_trunc, cur_trunc + trunc_size)
shard_state = self.make_shard_state(batch, output, range_, attns)
for shard in shards(shard_state, shard_size):
loss, stats = self.compute_loss(batch, **shard)
loss.div(batch.batch_size).backward()
batch_stats.update(stats)
return batch_stats
def stats(self, loss, scores, target):
"""
Compute and return a Statistics object.
Args:
loss(Tensor): the loss computed by the loss criterion.
scores(Tensor): a sequence of predict output with scores.
"""
pred = scores.max(1)[1]
non_padding = target.ne(self.padding_idx)
num_correct = pred.eq(target) \
.masked_select(non_padding) \
.sum()
return onmt.Statistics(loss[0], non_padding.sum(), num_correct)
def bottle(self, v):
return v.view(-1, v.size(2))
def unbottle(self, v, batch_size):
return v.view(-1, batch_size, v.size(1))
class NMTLossCompute(LossComputeBase):
"""
Standard NMT Loss Computation.
"""
def __init__(self, generator, tgt_vocab):
super(NMTLossCompute, self).__init__(generator, tgt_vocab)
weight = torch.ones(len(tgt_vocab))
weight[self.padding_idx] = 0
self.criterion = nn.NLLLoss(weight, size_average=False)
def make_shard_state(self, batch, output, range_, attns=None):
""" See base class for args description. """
return {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1]],
}
def compute_loss(self, batch, output, target):
""" See base class for args description. """
scores = self.generator(self.bottle(output))
target = target.view(-1)
loss = self.criterion(scores, target)
loss_data = loss.data.clone()
stats = self.stats(loss_data, scores.data, target.data)
return loss, stats
def filter_shard_state(state):
for k, v in state.items():
if v is not None:
if isinstance(v, Variable) and v.requires_grad:
v = Variable(v.data, requires_grad=True, volatile=False)
yield k, v
def shards(state, shard_size, eval=False):
"""
Args:
state: A dictionary which corresponds to the output of
*LossCompute.make_shard_state(). The values for
those keys are Tensor-like or None.
shard_size: The maximum size of the shards yielded by the model.
eval: If True, only yield the state, nothing else.
Otherwise, yield shards.
Yields:
Each yielded shard is a dict.
Side effect:
After the last shard, this function does back-propagation.
"""
if eval:
yield state
else:
# non_none: the subdict of the state dictionary where the values
# are not None.
non_none = dict(filter_shard_state(state))
# Now, the iteration:
# state is a dictionary of sequences of tensor-like but we
# want a sequence of dictionaries of tensors.
# First, unzip the dictionary into a sequence of keys and a
# sequence of tensor-like sequences.
keys, values = zip(*((k, torch.split(v, shard_size))
for k, v in non_none.items()))
# Now, yield a dictionary for each shard. The keys are always
# the same. values is a sequence of length #keys where each
# element is a sequence of length #shards. We want to iterate
# over the shards, not over the keys: therefore, the values need
# to be re-zipped by shard and then each shard can be paired
# with the keys.
for shard_tensors in zip(*values):
yield dict(zip(keys, shard_tensors))
# Assumed backprop'd
variables = ((state[k], v.grad.data) for k, v in non_none.items()
if isinstance(v, Variable) and v.grad is not None)
inputs, grads = zip(*variables)
torch.autograd.backward(inputs, grads)
| 6,611 | 34.548387 | 78 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/Beam.py | from __future__ import division
import torch
import onmt
"""
Class for managing the internals of the beam search process.
Takes care of beams, back pointers, and scores.
"""
class Beam(object):
def __init__(self, size, n_best=1, cuda=False, vocab=None,
global_scorer=None):
self.size = size
self.tt = torch.cuda if cuda else torch
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
self.allScores = []
# The backpointers at each time-step.
self.prevKs = []
# The outputs at each time-step.
self.nextYs = [self.tt.LongTensor(size)
.fill_(vocab.stoi[onmt.IO.PAD_WORD])]
self.nextYs[0][0] = vocab.stoi[onmt.IO.BOS_WORD]
self.vocab = vocab
# Has EOS topped the beam yet.
self._eos = self.vocab.stoi[onmt.IO.EOS_WORD]
self.eosTop = False
# The attentions (matrix) for each time.
self.attn = []
# Time and k pair for finished.
self.finished = []
self.n_best = n_best
# Information for global scoring.
self.globalScorer = global_scorer
self.globalState = {}
def getCurrentState(self):
"Get the outputs for the current timestep."
return self.nextYs[-1]
def getCurrentOrigin(self):
"Get the backpointers for the current timestep."
return self.prevKs[-1]
def advance(self, wordLk, attnOut):
"""
Given prob over words for every last beam `wordLk` and attention
`attnOut`: Compute and update the beam search.
Parameters:
* `wordLk`- probs of advancing from the last step (K x words)
* `attnOut`- attention at the last step
Returns: True if beam search is complete.
"""
numWords = wordLk.size(1)
# Sum the previous scores.
if len(self.prevKs) > 0:
beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
# Don't let EOS have children.
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
beamLk[i] = -1e20
else:
beamLk = wordLk[0]
flatBeamLk = beamLk.view(-1)
bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
self.allScores.append(self.scores)
self.scores = bestScores
# bestScoresId is flattened beam x word array, so calculate which
# word and beam each score came from
prevK = bestScoresId / numWords
self.prevKs.append(prevK)
self.nextYs.append((bestScoresId - prevK * numWords))
self.attn.append(attnOut.index_select(0, prevK))
if self.globalScorer is not None:
self.globalScorer.updateGlobalState(self)
for i in range(self.nextYs[-1].size(0)):
if self.nextYs[-1][i] == self._eos:
s = self.scores[i]
if self.globalScorer is not None:
globalScores = self.globalScorer.score(self, self.scores)
s = globalScores[i]
self.finished.append((s, len(self.nextYs) - 1, i))
# End condition is when top-of-beam is EOS and no global score.
if self.nextYs[-1][0] == self.vocab.stoi[onmt.IO.EOS_WORD]:
# self.allScores.append(self.scores)
self.eosTop = True
def done(self):
return self.eosTop and len(self.finished) >= self.n_best
def sortFinished(self, minimum=None):
if minimum is not None:
i = 0
# Add from beam until we have minimum outputs.
while len(self.finished) < minimum:
s = self.scores[i]
if self.globalScorer is not None:
globalScores = self.globalScorer.score(self, self.scores)
s = globalScores[i]
self.finished.append((s, len(self.nextYs) - 1, i))
self.finished.sort(key=lambda a: -a[0])
scores = [sc for sc, _, _ in self.finished]
ks = [(t, k) for _, t, k in self.finished]
return scores, ks
def getHyp(self, timestep, k):
"""
Walk back to construct the full hypothesis.
"""
hyp, attn = [], []
for j in range(len(self.prevKs[:timestep]) - 1, -1, -1):
hyp.append(self.nextYs[j+1][k])
attn.append(self.attn[j][k])
k = self.prevKs[j][k]
return hyp[::-1], torch.stack(attn[::-1])
class GNMTGlobalScorer(object):
"""
Google NMT ranking score from Wu et al.
"""
def __init__(self, alpha, beta):
self.alpha = alpha
self.beta = beta
def score(self, beam, logprobs):
"Additional term add to log probability"
cov = beam.globalState["coverage"]
pen = self.beta * torch.min(cov, cov.clone().fill_(1.0)).log().sum(1)
l_term = (((5 + len(beam.nextYs)) ** self.alpha) /
((5 + 1) ** self.alpha))
return (logprobs / l_term) + pen
def updateGlobalState(self, beam):
"Keeps the coverage vector as sum of attens"
if len(beam.prevKs) == 1:
beam.globalState["coverage"] = beam.attn[-1]
else:
beam.globalState["coverage"] = beam.globalState["coverage"] \
.index_select(0, beam.prevKs[-1]).add(beam.attn[-1])
| 5,428 | 32.512346 | 77 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/Translator.py | import torch
from torch.autograd import Variable
import onmt
import onmt.Models
import onmt.ModelConstructor
import onmt.modules
import onmt.IO
from onmt.Utils import use_gpu
NOISE_TRANSELATE = False
class Translator(object):
def __init__(self, opt, dummy_opt={}):
# Add in default model arguments, possibly added since training.
self.opt = opt
checkpoint = torch.load(opt.model,
map_location=lambda storage, loc: storage)
self.fields = onmt.IO.load_fields(checkpoint['vocab'])
model_opt = checkpoint['opt']
for arg in dummy_opt:
if arg not in model_opt:
model_opt.__dict__[arg] = dummy_opt[arg]
self._type = model_opt.encoder_type
self.copy_attn = model_opt.copy_attn
self.model = onmt.ModelConstructor.make_base_model(
model_opt, self.fields, use_gpu(opt), checkpoint)
self.model.eval()
self.model.generator.eval()
# for debugging
self.beam_accum = None
def initBeamAccum(self):
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
def buildTargetTokens(self, pred, src, attn, copy_vocab):
vocab = self.fields["tgt"].vocab
tokens = []
for tok in pred:
if tok < len(vocab):
tokens.append(vocab.itos[tok])
else:
tokens.append(copy_vocab.itos[tok - len(vocab)])
if tokens[-1] == onmt.IO.EOS_WORD:
tokens = tokens[:-1]
break
if self.opt.replace_unk and attn is not None:
for i in range(len(tokens)):
if tokens[i] == vocab.itos[onmt.IO.UNK]:
_, maxIndex = attn[i].max(0)
tokens[i] = self.fields["src"].vocab.itos[src[maxIndex[0]]]
return tokens
def _runTarget(self, batch, data):
_, src_lengths = batch.src
src = onmt.IO.make_features(batch, 'src')
tgt_in = onmt.IO.make_features(batch, 'tgt')[:-1]
# (1) run the encoder on the src
encStates, context = self.model.encoder(src, src_lengths)
decStates = self.model.decoder.init_decoder_state(
src, context, encStates)
# (2) if a target is specified, compute the 'goldScore'
# (i.e. log likelihood) of the target under the model
tt = torch.cuda if self.opt.cuda else torch
goldScores = tt.FloatTensor(batch.batch_size).fill_(0)
decOut, decStates, attn = self.model.decoder(
tgt_in, context, decStates)
tgt_pad = self.fields["tgt"].vocab.stoi[onmt.IO.PAD_WORD]
for dec, tgt in zip(decOut, batch.tgt[1:].data):
# Log prob of each word.
out = self.model.generator.forward(dec)
tgt = tgt.unsqueeze(1)
scores = out.data.gather(1, tgt)
scores.masked_fill_(tgt.eq(tgt_pad), 0)
goldScores += scores
return goldScores
def translateBatch(self, batch, dataset):
beam_size = self.opt.beam_size
batch_size = batch.batch_size
# (1) Run the encoder on the src.
_, src_lengths = batch.src
src = onmt.IO.make_features(batch, 'src')
encStates, context = self.model.encoder(src, src_lengths) # return hidden_t, outputs
print(type(torch.autograd.Variable(torch.FloatTensor(encStates[0].data.shape).uniform_(-0.2, 0.2))))
print(type(encStates[0]))
newEncStates = (
encStates[0] + torch.autograd.Variable(torch.FloatTensor(encStates[0].data.shape).uniform_(-0.2, 0.2)).cuda(),
encStates[1] + torch.autograd.Variable(torch.FloatTensor(encStates[1].data.shape).uniform_(-0.2, 0.2).cuda())
)
if NOISE_TRANSELATE:
decStates = self.model.decoder.init_decoder_state(src, context, newEncStates)
else:
decStates = self.model.decoder.init_decoder_state(src, context, encStates)
# (1b) Initialize for the decoder.
def var(a): return Variable(a, volatile=True)
def rvar(a): return var(a.repeat(1, beam_size, 1))
# Repeat everything beam_size times.
context = rvar(context.data)
src = rvar(src.data)
srcMap = rvar(batch.src_map.data)
decStates.repeat_beam_size_times(beam_size)
scorer = None
# scorer=onmt.GNMTGlobalScorer(0.3, 0.4)
beam = [onmt.Beam(beam_size, n_best=self.opt.n_best,
cuda=self.opt.cuda,
vocab=self.fields["tgt"].vocab,
global_scorer=scorer)
for __ in range(batch_size)]
# (2) run the decoder to generate sentences, using beam search.
def bottle(m):
return m.view(batch_size * beam_size, -1)
def unbottle(m):
return m.view(beam_size, batch_size, -1)
for i in range(self.opt.max_sent_length):
if all((b.done() for b in beam)):
break
# Construct batch x beam_size nxt words.
# Get all the pending current beam words and arrange for forward.
inp = var(torch.stack([b.getCurrentState() for b in beam])
.t().contiguous().view(1, -1))
# Turn any copied words to UNKs
# 0 is unk
if self.copy_attn:
inp = inp.masked_fill(
inp.gt(len(self.fields["tgt"].vocab) - 1), 0)
# Temporary kludge solution to handle changed dim expectation
# in the decoder
inp = inp.unsqueeze(2)
# Run one step.
decOut, decStates, attn = \
self.model.decoder(inp, context, decStates)
decOut = decOut.squeeze(0)
# decOut: beam x rnn_size
# (b) Compute a vector of batch*beam word scores.
if not self.copy_attn:
out = self.model.generator.forward(decOut).data
out = unbottle(out)
# beam x tgt_vocab
else:
out = self.model.generator.forward(decOut,
attn["copy"].squeeze(0),
srcMap)
# beam x (tgt_vocab + extra_vocab)
out = dataset.collapse_copy_scores(
unbottle(out.data),
batch, self.fields["tgt"].vocab)
# beam x tgt_vocab
out = out.log()
# (c) Advance each beam.
for j, b in enumerate(beam):
b.advance(out[:, j], unbottle(attn["std"]).data[:, j])
decStates.beam_update(j, b.getCurrentOrigin(), beam_size)
if "tgt" in batch.__dict__:
allGold = self._runTarget(batch, dataset)
else:
allGold = [0] * batch_size
# (3) Package everything up.
allHyps, allScores, allAttn = [], [], []
for b in beam:
n_best = self.opt.n_best
scores, ks = b.sortFinished(minimum=n_best)
hyps, attn = [], []
for i, (times, k) in enumerate(ks[:n_best]):
hyp, att = b.getHyp(times, k)
hyps.append(hyp)
attn.append(att)
allHyps.append(hyps)
allScores.append(scores)
allAttn.append(attn)
return allHyps, allScores, allAttn, allGold
def translate(self, batch, data):
# (1) convert words to indexes
batch_size = batch.batch_size
# (2) translate
pred, predScore, attn, goldScore = self.translateBatch(batch, data)
assert(len(goldScore) == len(pred))
pred, predScore, attn, goldScore, i = list(zip(
*sorted(zip(pred, predScore, attn, goldScore,
batch.indices.data),
key=lambda x: x[-1])))
inds, perm = torch.sort(batch.indices.data)
# (3) convert indexes to words
predBatch, goldBatch = [], []
src = batch.src[0].data.index_select(1, perm)
if self.opt.tgt:
tgt = batch.tgt.data.index_select(1, perm)
for b in range(batch_size):
src_vocab = data.src_vocabs[inds[b]]
predBatch.append(
[self.buildTargetTokens(pred[b][n], src[:, b],
attn[b][n], src_vocab)
for n in range(self.opt.n_best)])
if self.opt.tgt:
goldBatch.append(
self.buildTargetTokens(tgt[1:, b], src[:, b],
None, None))
return predBatch, goldBatch, predScore, goldScore, attn, src
| 8,875 | 36.610169 | 122 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/Utils.py | def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments), \
"Not all arguments have the same value: " + str(args)
def use_gpu(opt):
return (hasattr(opt, 'gpuid') and len(opt.gpuid) > 0) or \
(hasattr(opt, 'gpu') and opt.gpu > -1)
| 388 | 26.785714 | 62 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/IO.py | # -*- coding: utf-8 -*-
import codecs
from collections import Counter, defaultdict
from itertools import chain, count
import torch
import torchtext.data
import torchtext.vocab
PAD_WORD = '<blank>'
UNK = 0
BOS_WORD = '<s>'
EOS_WORD = '</s>'
def __getstate__(self):
return dict(self.__dict__, stoi=dict(self.stoi))
def __setstate__(self, state):
self.__dict__.update(state)
self.stoi = defaultdict(lambda: 0, self.stoi)
torchtext.vocab.Vocab.__getstate__ = __getstate__
torchtext.vocab.Vocab.__setstate__ = __setstate__
def load_fields(vocab):
vocab = dict(vocab)
n_src_features = len(collect_features(vocab, 'src'))
n_tgt_features = len(collect_features(vocab, 'tgt'))
fields = get_fields(n_src_features, n_tgt_features)
for k, v in vocab.items():
# Hack. Can't pickle defaultdict :(
v.stoi = defaultdict(lambda: 0, v.stoi)
fields[k].vocab = v
return fields
def collect_features(fields, side="src"):
assert side in ["src", "tgt"]
feats = []
for j in count():
key = side + "_feat_" + str(j)
if key not in fields:
break
feats.append(key)
return feats
def extract_features(tokens):
"""
tokens: A list of tokens, where each token consists of a word,
optionally followed by u"│"-delimited features.
returns: A sequence of words, A sequence of features, and
"""
if not tokens:
return [], [], -1
split_tokens = [token.split(u"│") for token in tokens]
split_tokens = [token for token in split_tokens if token[0]]
token_size = len(split_tokens[0])
assert all(len(token) == token_size for token in split_tokens), \
"all words must have the same number of features"
words_and_features = list(zip(*split_tokens))
words = words_and_features[0]
features = words_and_features[1:]
return words, features, token_size - 1
def read_corpus_file(path, truncate, side):
"""
path: location of a src or tgt file
truncate: maximum sequence length (0 for unlimited)
yields: (word, features, nfeat) triples for each line
"""
with codecs.open(path, "r", "utf-8") as corpus_file:
for i, line in enumerate(corpus_file):
line = line.split()
if truncate:
line = line[:truncate]
words, feats, n_feats = extract_features(line)
example_dict = {side: words, "indices": i}
if feats:
prefix = side + "_feat_"
example_dict.update((prefix + str(j), f)
for j, f in enumerate(feats))
yield example_dict, n_feats
def merge_vocabs(vocabs, vocab_size=None):
"""
Merge individual vocabularies (assumed to be generated from disjoint
documents) into a larger vocabulary.
Args:
vocabs: `torchtext.vocab.Vocab` vocabularies to be merged
vocab_size: `int` the final vocabulary size. `None` for no limit.
Return:
`torchtext.vocab.Vocab`
"""
merged = sum([vocab.freqs for vocab in vocabs], Counter())
return torchtext.vocab.Vocab(merged,
specials=[PAD_WORD, BOS_WORD, EOS_WORD],
max_size=vocab_size)
def make_features(batch, side):
"""
Args:
batch (Variable): a batch of source or target data.
side (str): for source or for target.
Returns:
A sequence of src/tgt tensors with optional feature tensors
of size (len x batch).
"""
assert side in ['src', 'tgt']
if isinstance(batch.__dict__[side], tuple):
data = batch.__dict__[side][0]
else:
data = batch.__dict__[side]
feat_start = side + "_feat_"
features = sorted(batch.__dict__[k]
for k in batch.__dict__ if feat_start in k)
levels = [data] + features
return torch.cat([level.unsqueeze(2) for level in levels], 2)
def save_vocab(fields):
vocab = []
for k, f in fields.items():
if 'vocab' in f.__dict__:
f.vocab.stoi = dict(f.vocab.stoi)
vocab.append((k, f.vocab))
return vocab
def collect_feature_dicts(fields, side):
assert side in ['src', 'tgt']
feature_dicts = []
for j in count():
key = side + "_feat_" + str(j)
if key not in fields:
break
feature_dicts.append(fields[key].vocab)
return feature_dicts
def get_fields(n_src_features, n_tgt_features):
"""
n_src_features: the number of source features to create Field objects for.
n_tgt_features: the number of target features to create Field objects for.
returns: A dictionary whose keys are strings and whose values are the
corresponding Field objects.
"""
fields = {}
fields["src"] = torchtext.data.Field(
pad_token=PAD_WORD,
include_lengths=True)
# fields = [("src_img", torchtext.data.Field(
# include_lengths=True))]
for j in range(n_src_features):
fields["src_feat_"+str(j)] = \
torchtext.data.Field(pad_token=PAD_WORD)
fields["tgt"] = torchtext.data.Field(
init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
for j in range(n_tgt_features):
fields["tgt_feat_"+str(j)] = \
torchtext.data.Field(init_token=BOS_WORD, eos_token=EOS_WORD,
pad_token=PAD_WORD)
def make_src(data, _):
src_size = max([t.size(0) for t in data])
src_vocab_size = max([t.max() for t in data]) + 1
alignment = torch.zeros(src_size, len(data), src_vocab_size)
for i, sent in enumerate(data):
for j, t in enumerate(sent):
alignment[j, i, t] = 1
return alignment
fields["src_map"] = torchtext.data.Field(
use_vocab=False, tensor_type=torch.FloatTensor,
postprocessing=make_src, sequential=False)
def make_tgt(data, _):
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(tgt_size, len(data)).long()
for i, sent in enumerate(data):
alignment[:sent.size(0), i] = sent
return alignment
fields["alignment"] = torchtext.data.Field(
use_vocab=False, tensor_type=torch.LongTensor,
postprocessing=make_tgt, sequential=False)
fields["indices"] = torchtext.data.Field(
use_vocab=False, tensor_type=torch.LongTensor,
sequential=False)
return fields
def build_vocab(train, opt):
"""
train: an ONMTDataset
"""
fields = train.fields
fields["src"].build_vocab(train, max_size=opt.src_vocab_size,
min_freq=opt.src_words_min_frequency)
for j in range(train.n_src_feats):
fields["src_feat_" + str(j)].build_vocab(train)
fields["tgt"].build_vocab(train, max_size=opt.tgt_vocab_size,
min_freq=opt.tgt_words_min_frequency)
for j in range(train.n_tgt_feats):
fields["tgt_feat_" + str(j)].build_vocab(train)
# Merge the input and output vocabularies.
if opt.share_vocab:
# `tgt_vocab_size` is ignored when sharing vocabularies
merged_vocab = merge_vocabs(
[fields["src"].vocab, fields["tgt"].vocab],
vocab_size=opt.src_vocab_size)
fields["src"].vocab = merged_vocab
fields["tgt"].vocab = merged_vocab
def join_dicts(*args):
"""
args: dictionaries with disjoint keys
returns: a single dictionary that has the union of these keys
"""
return dict(chain(*[d.items() for d in args]))
def peek(seq):
"""
sequence: an iterator
returns: the first thing returned by calling next() on the iterator
and an iterator created by re-chaining that value to the beginning
of the iterator.
"""
first = next(seq)
return first, chain([first], seq)
class OrderedIterator(torchtext.data.Iterator):
def create_batches(self):
if self.train:
self.batches = torchtext.data.pool(
self.data(), self.batch_size,
self.sort_key, self.batch_size_fn,
random_shuffler=self.random_shuffler)
else:
self.batches = []
for b in torchtext.data.batch(self.data(), self.batch_size,
self.batch_size_fn):
self.batches.append(sorted(b, key=self.sort_key))
class ONMTDataset(torchtext.data.Dataset):
"""
Defines a dataset for machine translation.
An ONMTDataset is a collection that supports iteration over its
examples. The parent class supports indexing as well, but future
developments here may make that difficult (lazy iteration over
examples because of large datasets, for example).
"""
@staticmethod
def sort_key(ex):
"Sort in reverse size order"
return -len(ex.src)
def __init__(self, src_path, tgt_path, fields,
src_seq_length=0, tgt_seq_length=0,
src_seq_length_trunc=0, tgt_seq_length_trunc=0,
use_filter_pred=True, dynamic_dict=True,
src_img_dir=None, **kwargs):
"""
Create a translation dataset given paths and fields.
src_path: location of source-side data
tgt_path: location of target-side data or None. If should be the
same length as the source-side data if it exists, but
at present this is not checked.
fields: a dictionary. keys are things like 'src', 'tgt', 'src_map',
and 'alignment'
src_img_dir: raises an error if not None because images are not
supported yet.
Initializes an ONMTDataset object with the following attributes:
self.examples (might be a generator, might be a list, hard to say):
A sequence of torchtext Example objects.
self.fields (dict):
A dictionary associating str keys with Field objects. Does not
necessarily have the same keys as the input fields.
A dataset basically supports iteration over all the examples it
contains.
"""
assert src_img_dir is None, "img data is not finished"
# self.src_vocabs: mutated in dynamic_dict, used in
# collapse_copy_scores and in Translator.py
self.src_vocabs = []
src_truncate = src_seq_length_trunc
src_examples = read_corpus_file(src_path, src_truncate, "src")
(_, src_feats), src_examples = peek(src_examples)
src_examples = (ex for ex, nfeats in src_examples)
self.n_src_feats = src_feats
# if tgt_path exists, then we need to do the same thing as we did
# for the source data
if tgt_path is not None:
tgt_truncate = tgt_seq_length_trunc
tgt_examples = read_corpus_file(tgt_path, tgt_truncate, "tgt")
(_, tgt_feats), tgt_examples = peek(tgt_examples)
tgt_examples = (ex for ex, nfeats in tgt_examples)
self.n_tgt_feats = tgt_feats
else:
self.n_tgt_feats = 0
tgt_examples = None
# examples: one for each src line or (src, tgt) line pair.
# Each element is a dictionary whose keys represent at minimum
# the src tokens and their indices and potentially also the
# src and tgt features and alignment information.
if tgt_examples is not None:
examples = (join_dicts(src, tgt)
for src, tgt in zip(src_examples, tgt_examples))
else:
examples = src_examples
if dynamic_dict:
examples = self.dynamic_dict(examples)
# Peek at the first to see which fields are used.
ex, examples = peek(examples)
keys = ex.keys()
fields = [(k, fields[k]) for k in keys]
example_values = ([ex[k] for k in keys] for ex in examples)
out_examples = (torchtext.data.Example.fromlist(ex_values, fields)
for ex_values in example_values)
def filter_pred(example):
return 0 < len(example.src) <= src_seq_length \
and 0 < len(example.tgt) <= tgt_seq_length
super(ONMTDataset, self).__init__(
out_examples,
fields,
filter_pred if use_filter_pred else None
)
def dynamic_dict(self, examples):
for example in examples:
src = example["src"]
src_vocab = torchtext.vocab.Vocab(Counter(src))
self.src_vocabs.append(src_vocab)
# mapping source tokens to indices in the dynamic dict
src_map = torch.LongTensor([src_vocab.stoi[w] for w in src])
example["src_map"] = src_map
if "tgt" in example:
tgt = example["tgt"]
mask = torch.LongTensor(
[0] + [src_vocab.stoi[w] for w in tgt] + [0])
example["alignment"] = mask
yield example
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
def __reduce_ex__(self, proto):
"This is a hack. Something is broken with torch pickle."
return super(ONMTDataset, self).__reduce_ex__()
def collapse_copy_scores(self, scores, batch, tgt_vocab):
"""
Given scores from an expanded dictionary
corresponeding to a batch, sums together copies,
with a dictionary word when it is ambigious.
"""
offset = len(tgt_vocab)
for b in range(batch.batch_size):
index = batch.indices.data[b]
src_vocab = self.src_vocabs[index]
for i in range(1, len(src_vocab)):
sw = src_vocab.itos[i]
ti = tgt_vocab.stoi[sw]
if ti != 0:
scores[:, b, ti] += scores[:, b, offset + i]
scores[:, b, offset + i].fill_(1e-20)
return scores
def load_image_libs():
"Conditional import of torch image libs."
global Image, transforms
from PIL import Image
from torchvision import transforms
| 14,171 | 33.231884 | 78 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/ModelConstructor.py | """
This file is for models creation, which consults options
and creates each encoder and decoder accordingly.
"""
import torch.nn as nn
import onmt
import onmt.Models
import onmt.modules
from onmt.Models import NMTModel, MeanEncoder, RNNEncoder, \
StdRNNDecoder, InputFeedRNNDecoder
from onmt.modules import Embeddings, ImageEncoder, CopyGenerator, \
TransformerEncoder, TransformerDecoder, \
CNNEncoder, CNNDecoder
def make_embeddings(opt, word_dict, feature_dicts, for_encoder=True):
"""
Make an Embeddings instance.
Args:
opt: the option in current environment.
word_dict(Vocab): words dictionary.
feature_dicts([Vocab], optional): a list of feature dictionary.
for_encoder(bool): make Embeddings for encoder or decoder?
"""
if for_encoder:
embedding_dim = opt.src_word_vec_size
else:
embedding_dim = opt.tgt_word_vec_size
word_padding_idx = word_dict.stoi[onmt.IO.PAD_WORD]
num_word_embeddings = len(word_dict)
feats_padding_idx = [feat_dict.stoi[onmt.IO.PAD_WORD]
for feat_dict in feature_dicts]
num_feat_embeddings = [len(feat_dict) for feat_dict in
feature_dicts]
return Embeddings(embedding_dim,
opt.position_encoding,
opt.feat_merge,
opt.feat_vec_exponent,
opt.feat_vec_size,
opt.dropout,
word_padding_idx,
feats_padding_idx,
num_word_embeddings,
num_feat_embeddings)
def make_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
if opt.encoder_type == "transformer":
return TransformerEncoder(opt.enc_layers, opt.rnn_size,
opt.dropout, embeddings)
elif opt.encoder_type == "cnn":
return CNNEncoder(opt.enc_layers, opt.rnn_size,
opt.cnn_kernel_width,
opt.dropout, embeddings)
elif opt.encoder_type == "mean":
return MeanEncoder(opt.enc_layers, embeddings)
else:
# "rnn" or "brnn"
return RNNEncoder(opt.rnn_type, opt.brnn, opt.enc_layers,
opt.rnn_size, opt.dropout, embeddings)
def make_decoder(opt, embeddings):
"""
Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder.
"""
if opt.decoder_type == "transformer":
return TransformerDecoder(opt.dec_layers, opt.rnn_size,
opt.global_attention, opt.copy_attn,
opt.dropout, embeddings)
elif opt.decoder_type == "cnn":
return CNNDecoder(opt.dec_layers, opt.rnn_size,
opt.global_attention, opt.copy_attn,
opt.cnn_kernel_width, opt.dropout,
embeddings)
elif opt.input_feed:
return InputFeedRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.rnn_size,
opt.global_attention,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings)
else:
return StdRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.rnn_size,
opt.global_attention,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings)
def make_base_model(model_opt, fields, gpu, checkpoint=None):
"""
Args:
model_opt: the option loaded from checkpoint.
fields: `Field` objects for the model.
gpu(bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
Returns:
the NMTModel.
"""
assert model_opt.model_type in ["text", "img"], \
("Unsupported model type %s" % (model_opt.model_type))
# Make encoder.
if model_opt.model_type == "text":
src_dict = fields["src"].vocab
feature_dicts = onmt.IO.collect_feature_dicts(fields, 'src')
src_embeddings = make_embeddings(model_opt, src_dict,
feature_dicts)
encoder = make_encoder(model_opt, src_embeddings)
else:
encoder = ImageEncoder(model_opt.layers,
model_opt.brnn,
model_opt.rnn_size,
model_opt.dropout)
# Make decoder.
tgt_dict = fields["tgt"].vocab
# TODO: prepare for a future where tgt features are possible.
feature_dicts = onmt.IO.collect_feature_dicts(fields, 'tgt')
tgt_embeddings = make_embeddings(model_opt, tgt_dict,
feature_dicts, for_encoder=False)
# Share the embedding matrix - preprocess with share_vocab required
if model_opt.share_embeddings:
tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight
decoder = make_decoder(model_opt, tgt_embeddings)
# Make NMTModel(= encoder + decoder).
model = NMTModel(encoder, decoder)
# Make Generator.
if not model_opt.copy_attn:
generator = nn.Sequential(
nn.Linear(model_opt.rnn_size, len(fields["tgt"].vocab)),
nn.LogSoftmax())
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
generator = CopyGenerator(model_opt, fields["src"].vocab,
fields["tgt"].vocab)
# Load the model states from checkpoint or initialize them.
if checkpoint is not None:
print('Loading model parameters.')
model.load_state_dict(checkpoint['model'])
generator.load_state_dict(checkpoint['generator'])
else:
if model_opt.param_init != 0.0:
print('Intializing model parameters.')
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc)
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec)
# Add generator to model (this registers it as parameter of model).
model.generator = generator
# Make the whole model leverage GPU if indicated to do so.
if gpu:
model.cuda()
else:
model.cpu()
return model
| 7,331 | 37.589474 | 76 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/__init__.py | import onmt.IO
import onmt.Models
import onmt.Loss
from onmt.Trainer import Trainer, Statistics
from onmt.Translator import Translator
from onmt.Optim import Optim
from onmt.Beam import Beam, GNMTGlobalScorer
# For flake8 compatibility
__all__ = [onmt.Loss, onmt.IO, onmt.Models, Trainer, Translator,
Optim, Beam, Statistics, GNMTGlobalScorer]
| 357 | 26.538462 | 64 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/Trainer.py | from __future__ import division
"""
This is the loadable seq2seq trainer library that is
in charge of training details, loss compute, and statistics.
See train.py for a use case of this library.
Note!!! To make this a general library, we implement *only*
mechanism things here(i.e. what to do), and leave the strategy
things to users(i.e. how to do it). Also see train.py(one of the
users of this library) for the strategy things we do.
"""
import time
import sys
import math
import torch
import torch.nn as nn
import onmt
import onmt.modules
class Statistics(object):
"""
Train/validate loss statistics.
"""
def __init__(self, loss=0, n_words=0, n_correct=0):
self.loss = loss
self.n_words = n_words
self.n_correct = n_correct
self.n_src_words = 0
self.start_time = time.time()
def update(self, stat):
self.loss += stat.loss
self.n_words += stat.n_words
self.n_correct += stat.n_correct
def accuracy(self):
return 100 * (self.n_correct / self.n_words)
def ppl(self):
return math.exp(min(self.loss / self.n_words, 100))
def elapsed_time(self):
return time.time() - self.start_time
def output(self, epoch, batch, n_batches, start):
t = self.elapsed_time()
print(("Epoch %2d, %5d/%5d; acc: %6.2f; ppl: %6.2f; " +
"%3.0f src tok/s; %3.0f tgt tok/s; %6.0f s elapsed") %
(epoch, batch, n_batches,
self.accuracy(),
self.ppl(),
self.n_src_words / (t + 1e-5),
self.n_words / (t + 1e-5),
time.time() - start))
sys.stdout.flush()
def log(self, prefix, experiment, lr):
t = self.elapsed_time()
experiment.add_scalar_value(prefix + "_ppl", self.ppl())
experiment.add_scalar_value(prefix + "_accuracy", self.accuracy())
experiment.add_scalar_value(prefix + "_tgtper", self.n_words / t)
experiment.add_scalar_value(prefix + "_lr", lr)
class Trainer(object):
def __init__(self, model, train_iter, valid_iter,
train_loss, valid_loss, optim,
trunc_size, shard_size):
"""
Args:
model: the seq2seq model.
train_iter: the train data iterator.
valid_iter: the validate data iterator.
train_loss: the train side LossCompute object for computing loss.
valid_loss: the valid side LossCompute object for computing loss.
optim: the optimizer responsible for lr update.
trunc_size: a batch is divided by several truncs of this size.
shard_size: compute loss in shards of this size for efficiency.
"""
# Basic attributes.
self.model = model
self.train_iter = train_iter
self.valid_iter = valid_iter
self.train_loss = train_loss
self.valid_loss = valid_loss
self.optim = optim
self.trunc_size = trunc_size
self.shard_size = shard_size
# Set model in training mode.
self.model.train()
def train(self, epoch, report_func=None):
""" Called for each epoch to train. """
total_stats = Statistics()
report_stats = Statistics()
for i, batch in enumerate(self.train_iter):
target_size = batch.tgt.size(0)
# Truncated BPTT
trunc_size = self.trunc_size if self.trunc_size else target_size
dec_state = None
_, src_lengths = batch.src
src = onmt.IO.make_features(batch, 'src')
tgt_outer = onmt.IO.make_features(batch, 'tgt')
report_stats.n_src_words += src_lengths.sum()
for j in range(0, target_size-1, trunc_size):
# 1. Create truncated target.
tgt = tgt_outer[j: j + trunc_size]
# 2. F-prop all but generator.
self.model.zero_grad()
outputs, attns, dec_state = \
self.model(src, tgt, src_lengths, dec_state)
# 3. Compute loss in shards for memory efficiency.
batch_stats = self.train_loss.sharded_compute_loss(
batch, outputs, attns, j,
trunc_size, self.shard_size)
# 4. Update the parameters and statistics.
self.optim.step()
total_stats.update(batch_stats)
report_stats.update(batch_stats)
# If truncated, don't backprop fully.
if dec_state is not None:
dec_state.detach()
if report_func is not None:
report_stats = report_func(
epoch, i, len(self.train_iter),
total_stats.start_time, self.optim.lr, report_stats)
return total_stats
def validate(self):
""" Called for each epoch to validate. """
# Set model in validating mode.
self.model.eval()
stats = Statistics()
for batch in self.valid_iter:
_, src_lengths = batch.src
src = onmt.IO.make_features(batch, 'src')
tgt = onmt.IO.make_features(batch, 'tgt')
# F-prop through the model.
outputs, attns, _ = self.model(src, tgt, src_lengths)
# Compute loss.
batch_stats = self.valid_loss.monolithic_compute_loss(
batch, outputs, attns)
# Update statistics.
stats.update(batch_stats)
# Set model back to training mode.
self.model.train()
return stats
def epoch_step(self, ppl, epoch):
""" Called for each epoch to update learning rate. """
return self.optim.updateLearningRate(ppl, epoch)
def drop_checkpoint(self, opt, epoch, fields, valid_stats):
""" Called conditionally each epoch to save a snapshot. """
real_model = (self.model.module
if isinstance(self.model, nn.DataParallel)
else self.model)
real_generator = (real_model.generator.module
if isinstance(real_model.generator, nn.DataParallel)
else real_model.generator)
model_state_dict = real_model.state_dict()
model_state_dict = {k: v for k, v in model_state_dict.items()
if 'generator' not in k}
generator_state_dict = real_generator.state_dict()
checkpoint = {
'model': model_state_dict,
'generator': generator_state_dict,
'vocab': onmt.IO.save_vocab(fields),
'opt': opt,
'epoch': epoch,
'optim': self.optim
}
torch.save(checkpoint, '%s.pt' % opt.save_model)
| 6,823 | 33.994872 | 78 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/Optim.py | import torch.optim as optim
from torch.nn.utils import clip_grad_norm
class Optim(object):
def set_parameters(self, params):
self.params = [p for p in params if p.requires_grad]
if self.method == 'sgd':
self.optimizer = optim.SGD(self.params, lr=self.lr)
elif self.method == 'adagrad':
self.optimizer = optim.Adagrad(self.params, lr=self.lr)
elif self.method == 'adadelta':
self.optimizer = optim.Adadelta(self.params, lr=self.lr)
elif self.method == 'adam':
self.optimizer = optim.Adam(self.params, lr=self.lr,
betas=self.betas, eps=1e-9)
else:
raise RuntimeError("Invalid optim method: " + self.method)
def __init__(self, method, lr, max_grad_norm,
lr_decay=1, start_decay_at=None,
beta1=0.9, beta2=0.98,
opt=None):
self.last_ppl = None
self.lr = lr
self.max_grad_norm = max_grad_norm
self.method = method
self.lr_decay = lr_decay
self.start_decay_at = start_decay_at
self.start_decay = False
self._step = 0
self.betas = [beta1, beta2]
self.opt = opt
def _setRate(self, lr):
self.lr = lr
self.optimizer.param_groups[0]['lr'] = self.lr
def step(self):
"Compute gradients norm."
self._step += 1
# Decay method used in tensor2tensor.
if self.opt.__dict__.get("decay_method", "") == "noam":
self._setRate(
self.opt.learning_rate *
(self.opt.rnn_size ** (-0.5) *
min(self._step ** (-0.5),
self._step * self.opt.warmup_steps**(-1.5))))
if self.max_grad_norm:
clip_grad_norm(self.params, self.max_grad_norm)
self.optimizer.step()
def updateLearningRate(self, ppl, epoch):
"""
Decay learning rate if val perf does not improve
or we hit the start_decay_at limit.
"""
if self.start_decay_at is not None and epoch >= self.start_decay_at:
self.start_decay = True
if self.last_ppl is not None and ppl > self.last_ppl:
self.start_decay = True
if self.start_decay:
self.lr = self.lr * self.lr_decay
print("Decaying learning rate to %g" % self.lr)
self.last_ppl = ppl
self.optimizer.param_groups[0]['lr'] = self.lr
| 2,490 | 33.123288 | 76 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/Models.py | from __future__ import division
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
import onmt
from onmt.Utils import aeq
class EncoderBase(nn.Module):
"""
EncoderBase class for sharing code among various encoder.
"""
def _check_args(self, input, lengths=None, hidden=None):
s_len, n_batch, n_feats = input.size()
if lengths is not None:
n_batch_, = lengths.size()
aeq(n_batch, n_batch_)
def forward(self, input, lengths=None, hidden=None):
"""
Args:
input (LongTensor): len x batch x nfeat.
lengths (LongTensor): batch
hidden: Initial hidden state.
Returns:
hidden_t (Variable): Pair of layers x batch x rnn_size - final
encoder state
outputs (FloatTensor): len x batch x rnn_size - Memory bank
"""
raise NotImplementedError
class MeanEncoder(EncoderBase):
""" A trivial encoder without RNN, just takes mean as final state. """
def __init__(self, num_layers, embeddings):
super(MeanEncoder, self).__init__()
self.num_layers = num_layers
self.embeddings = embeddings
def forward(self, input, lengths=None, hidden=None):
""" See EncoderBase.forward() for description of args and returns. """
self._check_args(input, lengths, hidden)
emb = self.embeddings(input)
s_len, batch, emb_dim = emb.size()
mean = emb.mean(0).expand(self.num_layers, batch, emb_dim)
return (mean, mean), emb
class RNNEncoder(EncoderBase):
""" The standard RNN encoder. """
def __init__(self, rnn_type, bidirectional, num_layers,
hidden_size, dropout, embeddings):
super(RNNEncoder, self).__init__()
num_directions = 2 if bidirectional else 1
assert hidden_size % num_directions == 0
hidden_size = hidden_size // num_directions
self.embeddings = embeddings
self.no_pack_padded_seq = False
# Use pytorch version when available.
if rnn_type == "SRU":
# SRU doesn't support PackedSequence.
self.no_pack_padded_seq = True
self.rnn = onmt.modules.SRU(
input_size=embeddings.embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional)
else:
self.rnn = getattr(nn, rnn_type)(
input_size=embeddings.embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional)
def forward(self, input, lengths=None, hidden=None):
""" See EncoderBase.forward() for description of args and returns."""
self._check_args(input, lengths, hidden)
emb = self.embeddings(input)
s_len, batch, emb_dim = emb.size()
packed_emb = emb
if lengths is not None and not self.no_pack_padded_seq:
# Lengths data is wrapped inside a Variable.
lengths = lengths.view(-1).tolist()
packed_emb = pack(emb, lengths)
outputs, hidden_t = self.rnn(packed_emb, hidden)
if lengths is not None and not self.no_pack_padded_seq:
outputs = unpack(outputs)[0]
return hidden_t, outputs
class RNNDecoderBase(nn.Module):
"""
RNN decoder base class.
"""
def __init__(self, rnn_type, bidirectional_encoder, num_layers,
hidden_size, attn_type, coverage_attn, context_gate,
copy_attn, dropout, embeddings):
super(RNNDecoderBase, self).__init__()
# Basic attributes.
self.decoder_type = 'rnn'
self.bidirectional_encoder = bidirectional_encoder
self.num_layers = num_layers
self.hidden_size = hidden_size
self.embeddings = embeddings
self.dropout = nn.Dropout(dropout)
# Build the RNN.
self.rnn = self._build_rnn(rnn_type, self._input_size, hidden_size,
num_layers, dropout)
# Set up the context gate.
self.context_gate = None
if context_gate is not None:
self.context_gate = onmt.modules.ContextGateFactory(
context_gate, self._input_size,
hidden_size, hidden_size, hidden_size
)
# Set up the standard attention.
self._coverage = coverage_attn
self.attn = onmt.modules.GlobalAttention(
hidden_size, coverage=coverage_attn,
attn_type=attn_type
)
# Set up a separated copy attention layer, if needed.
self._copy = False
if copy_attn:
self.copy_attn = onmt.modules.GlobalAttention(
hidden_size, attn_type=attn_type
)
self._copy = True
def forward(self, input, context, state):
"""
Forward through the decoder.
Args:
input (LongTensor): a sequence of input tokens tensors
of size (len x batch x nfeats).
context (FloatTensor): output(tensor sequence) from the encoder
RNN of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder RNN for
initializing the decoder.
Returns:
outputs (FloatTensor): a Tensor sequence of output from the decoder
of shape (len x batch x hidden_size).
state (FloatTensor): final hidden state from the decoder.
attns (dict of (str, FloatTensor)): a dictionary of different
type of attention Tensor from the decoder
of shape (src_len x batch).
"""
# Args Check
assert isinstance(state, RNNDecoderState)
input_len, input_batch, _ = input.size()
contxt_len, contxt_batch, _ = context.size()
aeq(input_batch, contxt_batch)
# END Args Check
# Run the forward pass of the RNN.
hidden, outputs, attns, coverage = \
self._run_forward_pass(input, context, state)
# Update the state with the result.
final_output = outputs[-1]
state.update_state(hidden, final_output.unsqueeze(0),
coverage.unsqueeze(0)
if coverage is not None else None)
# Concatenates sequence of tensors along a new dimension.
outputs = torch.stack(outputs)
for k in attns:
attns[k] = torch.stack(attns[k])
return outputs, state, attns
def _fix_enc_hidden(self, h):
"""
The encoder hidden is (layers*directions) x batch x dim.
We need to convert it to layers x batch x (directions*dim).
"""
if self.bidirectional_encoder:
h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)
return h
def init_decoder_state(self, src, context, enc_hidden):
if isinstance(enc_hidden, tuple): # LSTM
return RNNDecoderState(context, self.hidden_size,
tuple([self._fix_enc_hidden(enc_hidden[i])
for i in range(len(enc_hidden))]))
else: # GRU
return RNNDecoderState(context, self.hidden_size,
self._fix_enc_hidden(enc_hidden))
class StdRNNDecoder(RNNDecoderBase):
"""
Stardard RNN decoder, with Attention.
Currently no 'coverage_attn' and 'copy_attn' support.
"""
def _run_forward_pass(self, input, context, state):
"""
Private helper for running the specific RNN forward pass.
Must be overriden by all subclasses.
Args:
input (LongTensor): a sequence of input tokens tensors
of size (len x batch x nfeats).
context (FloatTensor): output(tensor sequence) from the encoder
RNN of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder RNN for
initializing the decoder.
Returns:
hidden (Variable): final hidden state from the decoder.
outputs ([FloatTensor]): an array of output of every time
step from the decoder.
attns (dict of (str, [FloatTensor]): a dictionary of different
type of attention Tensor array of every time
step from the decoder.
coverage (FloatTensor, optional): coverage from the decoder.
"""
assert not self._copy # TODO, no support yet.
assert not self._coverage # TODO, no support yet.
# Initialize local and return variables.
outputs = []
attns = {"std": []}
coverage = None
emb = self.embeddings(input)
# Run the forward pass of the RNN.
if isinstance(self.rnn, nn.GRU):
rnn_output, hidden = self.rnn(emb, state.hidden[0])
else:
rnn_output, hidden = self.rnn(emb, state.hidden)
# Result Check
input_len, input_batch, _ = input.size()
output_len, output_batch, _ = rnn_output.size()
aeq(input_len, output_len)
aeq(input_batch, output_batch)
# END Result Check
# Calculate the attention.
attn_outputs, attn_scores = self.attn(
rnn_output.transpose(0, 1).contiguous(), # (output_len, batch, d)
context.transpose(0, 1) # (contxt_len, batch, d)
)
attns["std"] = attn_scores
# Calculate the context gate.
if self.context_gate is not None:
outputs = self.context_gate(
emb.view(-1, emb.size(2)),
rnn_output.view(-1, rnn_output.size(2)),
attn_outputs.view(-1, attn_outputs.size(2))
)
outputs = outputs.view(input_len, input_batch, self.hidden_size)
outputs = self.dropout(outputs)
else:
outputs = self.dropout(attn_outputs) # (input_len, batch, d)
# Return result.
return hidden, outputs, attns, coverage
def _build_rnn(self, rnn_type, input_size,
hidden_size, num_layers, dropout):
"""
Private helper for building standard decoder RNN.
"""
# Use pytorch version when available.
if rnn_type == "SRU":
return onmt.modules.SRU(
input_size, hidden_size,
num_layers=num_layers,
dropout=dropout)
return getattr(nn, rnn_type)(
input_size, hidden_size,
num_layers=num_layers,
dropout=dropout)
@property
def _input_size(self):
"""
Private helper returning the number of expected features.
"""
return self.embeddings.embedding_size
class InputFeedRNNDecoder(RNNDecoderBase):
"""
Stardard RNN decoder, with Input Feed and Attention.
"""
def _run_forward_pass(self, input, context, state):
"""
See StdRNNDecoder._run_forward_pass() for description
of arguments and return values.
"""
# Additional args check.
output = state.input_feed.squeeze(0)
output_batch, _ = output.size()
input_len, input_batch, _ = input.size()
aeq(input_batch, output_batch)
# END Additional args check.
# Initialize local and return variables.
outputs = []
attns = {"std": []}
if self._copy:
attns["copy"] = []
if self._coverage:
attns["coverage"] = []
emb = self.embeddings(input)
assert emb.dim() == 3 # len x batch x embedding_dim
hidden = state.hidden
coverage = state.coverage.squeeze(0) \
if state.coverage is not None else None
# Input feed concatenates hidden state with
# input at every time step.
for i, emb_t in enumerate(emb.split(1)):
emb_t = emb_t.squeeze(0)
emb_t = torch.cat([emb_t, output], 1)
rnn_output, hidden = self.rnn(emb_t, hidden)
attn_output, attn = self.attn(rnn_output,
context.transpose(0, 1))
if self.context_gate is not None:
output = self.context_gate(
emb_t, rnn_output, attn_output
)
output = self.dropout(output)
else:
output = self.dropout(attn_output)
outputs += [output]
attns["std"] += [attn]
# Update the coverage attention.
if self._coverage:
coverage = coverage + attn \
if coverage is not None else attn
attns["coverage"] += [coverage]
# Run the forward pass of the copy attention layer.
if self._copy:
_, copy_attn = self.copy_attn(output,
context.transpose(0, 1))
attns["copy"] += [copy_attn]
# Return result.
return hidden, outputs, attns, coverage
def _build_rnn(self, rnn_type, input_size,
hidden_size, num_layers, dropout):
assert not rnn_type == "SRU", "SRU doesn't support input feed! " \
"Please set -input_feed 0!"
if rnn_type == "LSTM":
stacked_cell = onmt.modules.StackedLSTM
else:
stacked_cell = onmt.modules.StackedGRU
return stacked_cell(num_layers, input_size,
hidden_size, dropout)
@property
def _input_size(self):
"""
Using input feed by concatenating input with attention vectors.
"""
return self.embeddings.embedding_size + self.hidden_size
class NMTModel(nn.Module):
"""
The encoder + decoder Neural Machine Translation Model.
"""
def __init__(self, encoder, decoder, multigpu=False):
"""
Args:
encoder(*Encoder): the various encoder.
decoder(*Decoder): the various decoder.
multigpu(bool): run parellel on multi-GPU?
"""
self.multigpu = multigpu
super(NMTModel, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, src, tgt, lengths, dec_state=None):
"""
Args:
src(FloatTensor): a sequence of source tensors with
optional feature tensors of size (len x batch).
tgt(FloatTensor): a sequence of target tensors with
optional feature tensors of size (len x batch).
lengths([int]): an array of the src length.
dec_state: A decoder state object
Returns:
outputs (FloatTensor): (len x batch x hidden_size): decoder outputs
attns (FloatTensor): Dictionary of (src_len x batch)
dec_hidden (FloatTensor): tuple (1 x batch x hidden_size)
Init hidden state
"""
src = src
tgt = tgt[:-1] # exclude last target from inputs
enc_hidden, context = self.encoder(src, lengths)
enc_state = self.decoder.init_decoder_state(src, context, enc_hidden)
out, dec_state, attns = self.decoder(tgt, context,
enc_state if dec_state is None
else dec_state)
if self.multigpu:
# Not yet supported on multi-gpu
dec_state = None
attns = None
return out, attns, dec_state
class DecoderState(object):
"""
DecoderState is a base class for models, used during translation
for storing translation states.
"""
def detach(self):
"""
Detaches all Variables from the graph
that created it, making it a leaf.
"""
for h in self._all:
if h is not None:
h.detach_()
def beam_update(self, idx, positions, beam_size):
""" Update when beam advances. """
for e in self._all:
a, br, d = e.size()
sentStates = e.view(a, beam_size, br // beam_size, d)[:, :, idx]
sentStates.data.copy_(
sentStates.data.index_select(1, positions))
class RNNDecoderState(DecoderState):
def __init__(self, context, hidden_size, rnnstate):
"""
Args:
context (FloatTensor): output from the encoder of size
len x batch x rnn_size.
hidden_size (int): the size of hidden layer of the decoder.
rnnstate (Variable): final hidden state from the encoder.
transformed to shape: layers x batch x (directions*dim).
input_feed (FloatTensor): output from last layer of the decoder.
coverage (FloatTensor): coverage output from the decoder.
"""
if not isinstance(rnnstate, tuple):
self.hidden = (rnnstate,)
else:
self.hidden = rnnstate
self.coverage = None
# Init the input feed.
batch_size = context.size(1)
h_size = (batch_size, hidden_size)
self.input_feed = Variable(context.data.new(*h_size).zero_(),
requires_grad=False).unsqueeze(0)
@property
def _all(self):
return self.hidden + (self.input_feed,)
def update_state(self, rnnstate, input_feed, coverage):
if not isinstance(rnnstate, tuple):
self.hidden = (rnnstate,)
else:
self.hidden = rnnstate
self.input_feed = input_feed
self.coverage = coverage
def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
vars = [Variable(e.data.repeat(1, beam_size, 1), volatile=True)
for e in self._all]
self.hidden = tuple(vars[:-1])
self.input_feed = vars[-1]
| 18,492 | 36.209256 | 79 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/ConvMultiStepAttention.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.Utils import aeq
SCALE_WEIGHT = 0.5 ** 0.5
def seq_linear(linear, x):
# linear transform for 3-d tensor
batch, hidden_size, length, _ = x.size()
h = linear(torch.transpose(x, 1, 2).contiguous().view(
batch * length, hidden_size))
return torch.transpose(h.view(batch, length, hidden_size, 1), 1, 2)
class ConvMultiStepAttention(nn.Module):
def __init__(self, input_size):
super(ConvMultiStepAttention, self).__init__()
self.linear_in = nn.Linear(input_size, input_size)
self.mask = None
def applyMask(self, mask):
self.mask = mask
def forward(self, base_target_emb, input, encoder_out_top,
encoder_out_combine):
"""
It's like Luong Attetion.
Conv attention takes a key matrix, a value matrix and a query vector.
Attention weight is calculated by key matrix with the query vector
and sum on the value matrix. And the same operation is applied
in each decode conv layer.
Args:
base_target_emb: target emb tensor
input: output of decode conv
encoder_out_t: the key matrix for calculation of attetion weight,
which is the top output of encode conv
encoder_out_c: the value matrix for the attention-weighted sum,
which is the combination of base emb and top output of encode
"""
# checks
batch, channel, height, width = base_target_emb.size()
batch_, channel_, height_, width_ = input.size()
aeq(batch, batch_)
aeq(height, height_)
enc_batch, enc_channel, enc_height = encoder_out_top.size()
enc_batch_, enc_channel_, enc_height_ = encoder_out_combine.size()
aeq(enc_batch, enc_batch_)
aeq(enc_height, enc_height_)
preatt = seq_linear(self.linear_in, input)
target = (base_target_emb + preatt) * SCALE_WEIGHT
target = torch.squeeze(target, 3)
target = torch.transpose(target, 1, 2)
pre_attn = torch.bmm(target, encoder_out_top)
if self.mask is not None:
pre_attn.data.masked_fill_(self.mask, -float('inf'))
pre_attn = pre_attn.transpose(0, 2)
attn = F.softmax(pre_attn)
attn = attn.transpose(0, 2).contiguous()
context_output = torch.bmm(
attn, torch.transpose(encoder_out_combine, 1, 2))
context_output = torch.transpose(
torch.unsqueeze(context_output, 3), 1, 2)
return context_output, attn
| 2,610 | 34.767123 | 77 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/Transformer.py | """
Implementation of "Attention is All You Need"
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import onmt
from onmt.Models import EncoderBase
from onmt.Models import DecoderState
from onmt.Utils import aeq
MAX_SIZE = 5000
class PositionwiseFeedForward(nn.Module):
""" A two-layer Feed-Forward-Network."""
def __init__(self, size, hidden_size, dropout=0.1):
"""
Args:
size(int): the size of input for the first-layer of the FFN.
hidden_size(int): the hidden layer size of the second-layer
of the FNN.
droput(float): dropout probability(0-1.0).
"""
super(PositionwiseFeedForward, self).__init__()
self.w_1 = onmt.modules.BottleLinear(size, hidden_size)
self.w_2 = onmt.modules.BottleLinear(hidden_size, size)
self.layer_norm = onmt.modules.BottleLayerNorm(size)
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
def forward(self, x):
residual = x
output = self.dropout(self.w_2(self.relu(self.w_1(x))))
return self.layer_norm(output + residual)
class TransformerEncoderLayer(nn.Module):
def __init__(self, size, dropout,
head_count=8, hidden_size=2048):
"""
Args:
size(int): the dimension of keys/values/queries in
MultiHeadedAttention, also the input size of
the first-layer of the PositionwiseFeedForward.
droput(float): dropout probability(0-1.0).
head_count(int): the number of head for MultiHeadedAttention.
hidden_size(int): the second-layer of the PositionwiseFeedForward.
"""
super(TransformerEncoderLayer, self).__init__()
self.self_attn = onmt.modules.MultiHeadedAttention(
head_count, size, p=dropout)
self.feed_forward = PositionwiseFeedForward(size,
hidden_size,
dropout)
def forward(self, input, mask):
mid, _ = self.self_attn(input, input, input, mask=mask)
out = self.feed_forward(mid)
return out
class TransformerEncoder(EncoderBase):
"""
The Transformer encoder from "Attention is All You Need".
"""
def __init__(self, num_layers, hidden_size,
dropout, embeddings):
super(TransformerEncoder, self).__init__()
self.num_layers = num_layers
self.embeddings = embeddings
self.transformer = nn.ModuleList(
[TransformerEncoderLayer(hidden_size, dropout)
for i in range(num_layers)])
def forward(self, input, lengths=None, hidden=None):
""" See EncoderBase.forward() for description of args and returns."""
self._check_args(input, lengths, hidden)
emb = self.embeddings(input)
s_len, n_batch, emb_dim = emb.size()
out = emb.transpose(0, 1).contiguous()
words = input[:, :, 0].transpose(0, 1)
# CHECKS
out_batch, out_len, _ = out.size()
w_batch, w_len = words.size()
aeq(out_batch, w_batch)
aeq(out_len, w_len)
# END CHECKS
# Make mask.
padding_idx = self.embeddings.word_padding_idx
mask = words.data.eq(padding_idx).unsqueeze(1) \
.expand(w_batch, w_len, w_len)
# Run the forward pass of every layer of the tranformer.
for i in range(self.num_layers):
out = self.transformer[i](out, mask)
return Variable(emb.data), out.transpose(0, 1).contiguous()
class TransformerDecoderLayer(nn.Module):
def __init__(self, size, dropout,
head_count=8, hidden_size=2048):
"""
Args:
size(int): the dimension of keys/values/queries in
MultiHeadedAttention, also the input size of
the first-layer of the PositionwiseFeedForward.
droput(float): dropout probability(0-1.0).
head_count(int): the number of head for MultiHeadedAttention.
hidden_size(int): the second-layer of the PositionwiseFeedForward.
"""
super(TransformerDecoderLayer, self).__init__()
self.self_attn = onmt.modules.MultiHeadedAttention(
head_count, size, p=dropout)
self.context_attn = onmt.modules.MultiHeadedAttention(
head_count, size, p=dropout)
self.feed_forward = PositionwiseFeedForward(size,
hidden_size,
dropout)
self.dropout = dropout
mask = self._get_attn_subsequent_mask(MAX_SIZE)
# Register self.mask as a buffer in TransformerDecoderLayer, so
# it gets TransformerDecoderLayer's cuda behavior automatically.
self.register_buffer('mask', mask)
def forward(self, input, context, src_pad_mask, tgt_pad_mask):
# Args Checks
input_batch, input_len, _ = input.size()
contxt_batch, contxt_len, _ = context.size()
aeq(input_batch, contxt_batch)
src_batch, t_len, s_len = src_pad_mask.size()
tgt_batch, t_len_, t_len__ = tgt_pad_mask.size()
aeq(input_batch, contxt_batch, src_batch, tgt_batch)
aeq(t_len, t_len_, t_len__, input_len)
aeq(s_len, contxt_len)
# END Args Checks
dec_mask = torch.gt(tgt_pad_mask + self.mask[:, :tgt_pad_mask.size(1),
:tgt_pad_mask.size(1)]
.expand_as(tgt_pad_mask), 0)
query, attn = self.self_attn(input, input, input, mask=dec_mask)
mid, attn = self.context_attn(context, context, query,
mask=src_pad_mask)
output = self.feed_forward(mid)
# CHECKS
output_batch, output_len, _ = output.size()
aeq(input_len, output_len)
aeq(contxt_batch, output_batch)
n_batch_, t_len_, s_len_ = attn.size()
aeq(input_batch, n_batch_)
aeq(contxt_len, s_len_)
aeq(input_len, t_len_)
# END CHECKS
return output, attn
def _get_attn_subsequent_mask(self, size):
''' Get an attention mask to avoid using the subsequent info.'''
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
subsequent_mask = torch.from_numpy(subsequent_mask)
return subsequent_mask
class TransformerDecoder(nn.Module):
"""
The Transformer decoder from "Attention is All You Need".
"""
def __init__(self, num_layers, hidden_size, attn_type,
copy_attn, dropout, embeddings):
super(TransformerDecoder, self).__init__()
# Basic attributes.
self.decoder_type = 'transformer'
self.num_layers = num_layers
self.embeddings = embeddings
# Build TransformerDecoder.
self.transformer_layers = nn.ModuleList(
[TransformerDecoderLayer(hidden_size, dropout)
for _ in range(num_layers)])
# TransformerDecoder has its own attention mechanism.
# Set up a separated copy attention layer, if needed.
self._copy = False
if copy_attn:
self.copy_attn = onmt.modules.GlobalAttention(
hidden_size, attn_type=attn_type)
self._copy = True
def forward(self, input, context, state):
"""
Forward through the TransformerDecoder.
Args:
input (LongTensor): a sequence of input tokens tensors
of size (len x batch x nfeats).
context (FloatTensor): output(tensor sequence) from the encoder
of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder RNN for
initializing the decoder.
Returns:
outputs (FloatTensor): a Tensor sequence of output from the decoder
of shape (len x batch x hidden_size).
state (FloatTensor): final hidden state from the decoder.
attns (dict of (str, FloatTensor)): a dictionary of different
type of attention Tensor from the decoder
of shape (src_len x batch).
"""
# CHECKS
assert isinstance(state, TransformerDecoderState)
input_len, input_batch, _ = input.size()
contxt_len, contxt_batch, _ = context.size()
aeq(input_batch, contxt_batch)
if state.previous_input is not None:
input = torch.cat([state.previous_input, input], 0)
src = state.src
src_words = src[:, :, 0].transpose(0, 1)
tgt_words = input[:, :, 0].transpose(0, 1)
src_batch, src_len = src_words.size()
tgt_batch, tgt_len = tgt_words.size()
aeq(input_batch, contxt_batch, src_batch, tgt_batch)
aeq(contxt_len, src_len)
# aeq(input_len, tgt_len)
# END CHECKS
# Initialize return variables.
outputs = []
attns = {"std": []}
if self._copy:
attns["copy"] = []
# Run the forward pass of the TransformerDecoder.
emb = self.embeddings(input)
assert emb.dim() == 3 # len x batch x embedding_dim
output = emb.transpose(0, 1).contiguous()
src_context = context.transpose(0, 1).contiguous()
padding_idx = self.embeddings.word_padding_idx
src_pad_mask = src_words.data.eq(padding_idx).unsqueeze(1) \
.expand(src_batch, tgt_len, src_len)
tgt_pad_mask = tgt_words.data.eq(padding_idx).unsqueeze(1) \
.expand(tgt_batch, tgt_len, tgt_len)
for i in range(self.num_layers):
output, attn \
= self.transformer_layers[i](output, src_context,
src_pad_mask, tgt_pad_mask)
# Process the result and update the attentions.
outputs = output.transpose(0, 1).contiguous()
if state.previous_input is not None:
outputs = outputs[state.previous_input.size(0):]
attn = attn[:, state.previous_input.size(0):].squeeze()
attn = torch.stack([attn])
attns["std"] = attn
if self._copy:
attns["copy"] = attn
# Update the state.
state.update_state(input)
return outputs, state, attns
def init_decoder_state(self, src, context, enc_hidden):
return TransformerDecoderState(src)
class TransformerDecoderState(DecoderState):
def __init__(self, src):
"""
Args:
src (FloatTensor): a sequence of source words tensors
with optional feature tensors, of size (len x batch).
"""
self.src = src
self.previous_input = None
@property
def _all(self):
"""
Contains attributes that need to be updated in self.beam_update().
"""
return (self.previous_input, self.src)
def update_state(self, input):
""" Called for every decoder forward pass. """
self.previous_input = input
def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
self.src = Variable(self.src.data.repeat(1, beam_size, 1),
volatile=True)
| 11,553 | 36.391586 | 79 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/Embeddings.py | import torch
import torch.nn as nn
from torch.autograd import Variable
from onmt.modules import BottleLinear, Elementwise
from onmt.Utils import aeq
class PositionalEncoding(nn.Module):
def __init__(self, dropout, dim, max_len=5000):
pe = torch.arange(0, max_len).unsqueeze(1).expand(max_len, dim)
div_term = 1 / torch.pow(10000, torch.arange(0, dim * 2, 2) / dim)
pe = pe * div_term.expand_as(pe)
pe[:, 0::2] = torch.sin(pe[:, 0::2])
pe[:, 1::2] = torch.cos(pe[:, 1::2])
pe = pe.unsqueeze(1)
super(PositionalEncoding, self).__init__()
self.register_buffer('pe', pe)
self.dropout = nn.Dropout(p=dropout)
def forward(self, emb):
# We must wrap the self.pe in Variable to compute, not the other
# way - unwrap emb(i.e. emb.data). Otherwise the computation
# wouldn't be watched to build the compute graph.
emb = emb + Variable(self.pe[:emb.size(0), :1, :emb.size(2)]
.expand_as(emb), requires_grad=False)
emb = self.dropout(emb)
return emb
class Embeddings(nn.Module):
"""
Words embeddings dictionary for encoder/decoder.
Args:
word_vec_size (int): size of the dictionary of embeddings.
position_encoding (bool): use a sin to mark relative words positions.
feat_merge (string): merge action for the features embeddings:
concat, sum or mlp.
feat_vec_exponent (float): when using '-feat_merge concat', feature
embedding size is N^feat_dim_exponent, where N is the
number of values of feature takes.
feat_vec_size (int): embedding dimension for features when using
'-feat_merge mlp'
dropout (float): dropout probability.
word_padding_idx (int): padding index for words in the embeddings.
feats_padding_idx ([int]): padding index for a list of features
in the embeddings.
word_vocab_size (int): size of dictionary of embeddings for words.
feat_vocab_sizes ([int], optional): list of size of dictionary
of embeddings for each feature.
"""
def __init__(self, word_vec_size, position_encoding, feat_merge,
feat_vec_exponent, feat_vec_size, dropout,
word_padding_idx, feat_padding_idx,
word_vocab_size, feat_vocab_sizes=[]):
self.word_padding_idx = word_padding_idx
# Dimensions and padding for constructing the word embedding matrix
vocab_sizes = [word_vocab_size]
emb_dims = [word_vec_size]
pad_indices = [word_padding_idx]
# Dimensions and padding for feature embedding matrices
# (these have no effect if feat_vocab_sizes is empty)
if feat_merge == 'sum':
feat_dims = [word_vec_size] * len(feat_vocab_sizes)
elif feat_vec_size > 0:
feat_dims = [feat_vec_size] * len(feat_vocab_sizes)
else:
feat_dims = [int(vocab ** feat_vec_exponent)
for vocab in feat_vocab_sizes]
vocab_sizes.extend(feat_vocab_sizes)
emb_dims.extend(feat_dims)
pad_indices.extend(feat_padding_idx)
# The embedding matrix look-up tables. The first look-up table
# is for words. Subsequent ones are for features, if any exist.
emb_params = zip(vocab_sizes, emb_dims, pad_indices)
embeddings = [nn.Embedding(vocab, dim, padding_idx=pad)
for vocab, dim, pad in emb_params]
emb_luts = Elementwise(feat_merge, embeddings)
# The final output size of word + feature vectors. This can vary
# from the word vector size if and only if features are defined.
# This is the attribute you should access if you need to know
# how big your embeddings are going to be.
self.embedding_size = (sum(emb_dims) if feat_merge == 'concat'
else word_vec_size)
# The sequence of operations that converts the input sequence
# into a sequence of embeddings. At minimum this consists of
# looking up the embeddings for each word and feature in the
# input. Model parameters may require the sequence to contain
# additional operations as well.
super(Embeddings, self).__init__()
self.make_embedding = nn.Sequential()
self.make_embedding.add_module('emb_luts', emb_luts)
if feat_merge == 'mlp':
in_dim = sum(emb_dims)
out_dim = word_vec_size
mlp = nn.Sequential(BottleLinear(in_dim, out_dim), nn.ReLU())
self.make_embedding.add_module('mlp', mlp)
if position_encoding:
pe = PositionalEncoding(dropout, self.embedding_size)
self.make_embedding.add_module('pe', pe)
@property
def word_lut(self):
return self.make_embedding[0][0]
@property
def emb_luts(self):
return self.make_embedding[0]
def load_pretrained_vectors(self, emb_file, fixed):
if emb_file:
pretrained = torch.load(emb_file)
self.word_lut.weight.data.copy_(pretrained)
if fixed:
self.word_lut.weight.requires_grad = False
def forward(self, input):
"""
Return the embeddings for words, and features if there are any.
Args:
input (LongTensor): len x batch x nfeat
Return:
emb (FloatTensor): len x batch x self.embedding_size
"""
in_length, in_batch, nfeat = input.size()
aeq(nfeat, len(self.emb_luts))
emb = self.make_embedding(input)
out_length, out_batch, emb_size = emb.size()
aeq(in_length, out_length)
aeq(in_batch, out_batch)
aeq(emb_size, self.embedding_size)
return emb
| 5,928 | 39.609589 | 77 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/CopyGenerator.py | import torch.nn as nn
import torch.nn.functional as F
import torch
import torch.cuda
import onmt
from onmt.Utils import aeq
class CopyGenerator(nn.Module):
"""
Generator module that additionally considers copying
words directly from the source.
"""
def __init__(self, opt, src_dict, tgt_dict):
super(CopyGenerator, self).__init__()
self.linear = nn.Linear(opt.rnn_size, len(tgt_dict))
self.linear_copy = nn.Linear(opt.rnn_size, 1)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
def forward(self, hidden, attn, src_map):
"""
Computes p(w) = p(z=1) p_{copy}(w|z=0) + p(z=0) * p_{softmax}(w|z=0)
"""
# CHECKS
batch_by_tlen, _ = hidden.size()
batch_by_tlen_, slen = attn.size()
slen_, batch, cvocab = src_map.size()
aeq(batch_by_tlen, batch_by_tlen_)
aeq(slen, slen_)
# Original probabilities.
logits = self.linear(hidden)
logits[:, self.tgt_dict.stoi[onmt.IO.PAD_WORD]] = -float('inf')
prob = F.softmax(logits)
# Probability of copying p(z=1) batch.
copy = F.sigmoid(self.linear_copy(hidden))
# Probibility of not copying: p_{word}(w) * (1 - p(z))
out_prob = torch.mul(prob, 1 - copy.expand_as(prob))
mul_attn = torch.mul(attn, copy.expand_as(attn))
copy_prob = torch.bmm(mul_attn.view(-1, batch, slen)
.transpose(0, 1),
src_map.transpose(0, 1)).transpose(0, 1)
copy_prob = copy_prob.contiguous().view(-1, cvocab)
return torch.cat([out_prob, copy_prob], 1)
class CopyGeneratorCriterion(object):
def __init__(self, vocab_size, force_copy, pad, eps=1e-20):
self.force_copy = force_copy
self.eps = eps
self.offset = vocab_size
self.pad = pad
def __call__(self, scores, align, target):
align = align.view(-1)
# Copy prob.
out = scores.gather(1, align.view(-1, 1) + self.offset) \
.view(-1).mul(align.ne(0).float())
tmp = scores.gather(1, target.view(-1, 1)).view(-1)
# Regular prob (no unks and unks that can't be copied)
if not self.force_copy:
out = out + self.eps + tmp.mul(target.ne(0).float()) + \
tmp.mul(align.eq(0).float()).mul(target.eq(0).float())
else:
# Forced copy.
out = out + self.eps + tmp.mul(align.eq(0).float())
# Drop padding.
loss = -out.log().mul(target.ne(self.pad).float()).sum()
return loss
class CopyGeneratorLossCompute(onmt.Loss.LossComputeBase):
"""
Copy Generator Loss Computation.
"""
def __init__(self, generator, tgt_vocab, dataset,
force_copy, eps=1e-20):
super(CopyGeneratorLossCompute, self).__init__(generator, tgt_vocab)
self.dataset = dataset
self.force_copy = force_copy
self.criterion = CopyGeneratorCriterion(len(tgt_vocab), force_copy,
self.padding_idx)
def make_shard_state(self, batch, output, range_, attns):
""" See base class for args description. """
if getattr(batch, "alignment", None) is None:
raise AssertionError("using -copy_attn you need to pass in "
"-dynamic_dict during preprocess stage.")
return {
"output": output,
"target": batch.tgt[range_[0] + 1: range_[1]],
"copy_attn": attns.get("copy"),
"align": batch.alignment[range_[0] + 1: range_[1]]
}
def compute_loss(self, batch, output, target, copy_attn, align):
"""
Compute the loss. The args must match self.make_shard_state().
Args:
batch: the current batch.
output: the predict output from the model.
target: the validate target to compare output with.
copy_attn: the copy attention value.
align: the align info.
"""
target = target.view(-1)
align = align.view(-1)
scores = self.generator(self.bottle(output),
self.bottle(copy_attn),
batch.src_map)
loss = self.criterion(scores, align, target)
scores_data = scores.data.clone()
scores_data = self.dataset.collapse_copy_scores(
self.unbottle(scores_data, batch.batch_size),
batch, self.tgt_vocab)
scores_data = self.bottle(scores_data)
# Correct target is copy when only option.
# TODO: replace for loop with masking or boolean indexing
target_data = target.data.clone()
for i in range(target_data.size(0)):
if target_data[i] == 0 and align.data[i] != 0:
target_data[i] = align.data[i] + len(self.tgt_vocab)
# Coverage loss term.
loss_data = loss.data.clone()
stats = self.stats(loss_data, scores_data, target_data)
return loss, stats
| 5,090 | 34.852113 | 78 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/StackedRNN.py | import torch
import torch.nn as nn
class StackedLSTM(nn.Module):
"""
Our own implementation of stacked LSTM.
Needed for the decoder, because we do input feeding.
"""
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedLSTM, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.LSTMCell(input_size, rnn_size))
input_size = rnn_size
def forward(self, input, hidden):
h_0, c_0 = hidden
h_1, c_1 = [], []
for i, layer in enumerate(self.layers):
h_1_i, c_1_i = layer(input, (h_0[i], c_0[i]))
input = h_1_i
if i + 1 != self.num_layers:
input = self.dropout(input)
h_1 += [h_1_i]
c_1 += [c_1_i]
h_1 = torch.stack(h_1)
c_1 = torch.stack(c_1)
return input, (h_1, c_1)
class StackedGRU(nn.Module):
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedGRU, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.GRUCell(input_size, rnn_size))
input_size = rnn_size
def forward(self, input, hidden):
h_1 = []
for i, layer in enumerate(self.layers):
h_1_i = layer(input, hidden[0][i])
input = h_1_i
if i + 1 != self.num_layers:
input = self.dropout(input)
h_1 += [h_1_i]
h_1 = torch.stack(h_1)
return input, (h_1,)
| 1,755 | 28.266667 | 66 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/MultiHeadedAttn.py | import math
import torch
import torch.nn as nn
from torch.autograd import Variable
from onmt.Utils import aeq
from onmt.modules.UtilClass import BottleLinear, \
BottleLayerNorm, BottleSoftmax
class MultiHeadedAttention(nn.Module):
''' Multi-Head Attention module from
"Attention is All You Need".
'''
def __init__(self, head_count, model_dim, p=0.1):
"""
Args:
head_count(int): number of parallel heads.
model_dim(int): the dimension of keys/values/queries in this
MultiHeadedAttention, must be divisible by head_count.
"""
assert model_dim % head_count == 0
self.dim_per_head = model_dim // head_count
self.model_dim = model_dim
super(MultiHeadedAttention, self).__init__()
self.head_count = head_count
self.linear_keys = BottleLinear(model_dim,
head_count * self.dim_per_head,
bias=False)
self.linear_values = BottleLinear(model_dim,
head_count * self.dim_per_head,
bias=False)
self.linear_query = BottleLinear(model_dim,
head_count * self.dim_per_head,
bias=False)
self.sm = BottleSoftmax()
self.activation = nn.ReLU()
self.layer_norm = BottleLayerNorm(model_dim)
self.dropout = nn.Dropout(p)
self.res_dropout = nn.Dropout(p)
def forward(self, key, value, query, mask=None):
# CHECKS
batch, k_len, d = key.size()
batch_, k_len_, d_ = value.size()
aeq(batch, batch_)
aeq(k_len, k_len_)
aeq(d, d_)
batch_, q_len, d_ = query.size()
aeq(batch, batch_)
aeq(d, d_)
aeq(self.model_dim % 8, 0)
if mask is not None:
batch_, q_len_, k_len_ = mask.size()
aeq(batch_, batch)
aeq(k_len_, k_len)
aeq(q_len_ == q_len)
# END CHECKS
def shape_projection(x):
b, l, d = x.size()
return x.view(b, l, self.head_count, self.dim_per_head) \
.transpose(1, 2).contiguous() \
.view(b * self.head_count, l, self.dim_per_head)
def unshape_projection(x, q):
b, l, d = q.size()
return x.view(b, self.head_count, l, self.dim_per_head) \
.transpose(1, 2).contiguous() \
.view(b, l, self.head_count * self.dim_per_head)
residual = query
key_up = shape_projection(self.linear_keys(key))
value_up = shape_projection(self.linear_values(value))
query_up = shape_projection(self.linear_query(query))
scaled = torch.bmm(query_up, key_up.transpose(1, 2))
scaled = scaled / math.sqrt(self.dim_per_head)
bh, l, dim_per_head = scaled.size()
b = bh // self.head_count
if mask is not None:
scaled = scaled.view(b, self.head_count, l, dim_per_head)
mask = mask.unsqueeze(1).expand_as(scaled)
scaled = scaled.masked_fill(Variable(mask), -float('inf')) \
.view(bh, l, dim_per_head)
attn = self.sm(scaled)
# Return one attn
top_attn = attn \
.view(b, self.head_count, l, dim_per_head)[:, 0, :, :] \
.contiguous()
drop_attn = self.dropout(self.sm(scaled))
# values : (batch * 8) x qlen x dim
out = unshape_projection(torch.bmm(drop_attn, value_up), residual)
# Residual and layer norm
res = self.res_dropout(out) + residual
ret = self.layer_norm(res)
# CHECK
batch_, q_len_, d_ = ret.size()
aeq(q_len, q_len_)
aeq(batch, batch_)
aeq(d, d_)
# END CHECK
return ret, top_attn
| 3,966 | 34.738739 | 74 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/Gate.py | """
Context gate is a decoder module that takes as input the previous word
embedding, the current decoder state and the attention state, and produces a
gate.
The gate can be used to select the input from the target side context
(decoder state), from the source context (attention state) or both.
"""
import torch
import torch.nn as nn
def ContextGateFactory(type, embeddings_size, decoder_size,
attention_size, output_size):
"""Returns the correct ContextGate class"""
gate_types = {'source': SourceContextGate,
'target': TargetContextGate,
'both': BothContextGate}
assert type in gate_types, "Not valid ContextGate type: {0}".format(type)
return gate_types[type](embeddings_size, decoder_size, attention_size,
output_size)
class ContextGate(nn.Module):
"""Implement up to the computation of the gate"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(ContextGate, self).__init__()
input_size = embeddings_size + decoder_size + attention_size
self.gate = nn.Linear(input_size, output_size, bias=True)
self.sig = nn.Sigmoid()
self.source_proj = nn.Linear(attention_size, output_size)
self.target_proj = nn.Linear(embeddings_size + decoder_size,
output_size)
def forward(self, prev_emb, dec_state, attn_state):
input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1)
z = self.sig(self.gate(input_tensor))
proj_source = self.source_proj(attn_state)
proj_target = self.target_proj(
torch.cat((prev_emb, dec_state), dim=1))
return z, proj_source, proj_target
class SourceContextGate(nn.Module):
"""Apply the context gate only to the source context"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(SourceContextGate, self).__init__()
self.context_gate = ContextGate(embeddings_size, decoder_size,
attention_size, output_size)
self.tanh = nn.Tanh()
def forward(self, prev_emb, dec_state, attn_state):
z, source, target = self.context_gate(
prev_emb, dec_state, attn_state)
return self.tanh(target + z * source)
class TargetContextGate(nn.Module):
"""Apply the context gate only to the target context"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(TargetContextGate, self).__init__()
self.context_gate = ContextGate(embeddings_size, decoder_size,
attention_size, output_size)
self.tanh = nn.Tanh()
def forward(self, prev_emb, dec_state, attn_state):
z, source, target = self.context_gate(prev_emb, dec_state, attn_state)
return self.tanh(z * target + source)
class BothContextGate(nn.Module):
"""Apply the context gate to both contexts"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(BothContextGate, self).__init__()
self.context_gate = ContextGate(embeddings_size, decoder_size,
attention_size, output_size)
self.tanh = nn.Tanh()
def forward(self, prev_emb, dec_state, attn_state):
z, source, target = self.context_gate(prev_emb, dec_state, attn_state)
return self.tanh((1. - z) * target + z * source)
| 3,596 | 38.527473 | 78 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/UtilClass.py | import torch
import torch.nn as nn
class Bottle(nn.Module):
def forward(self, input):
if len(input.size()) <= 2:
return super(Bottle, self).forward(input)
size = input.size()[:2]
out = super(Bottle, self).forward(input.view(size[0]*size[1], -1))
return out.contiguous().view(size[0], size[1], -1)
class Bottle2(nn.Module):
def forward(self, input):
if len(input.size()) <= 3:
return super(Bottle2, self).forward(input)
size = input.size()
out = super(Bottle2, self).forward(input.view(size[0]*size[1],
size[2], size[3]))
return out.contiguous().view(size[0], size[1], size[2], size[3])
class LayerNorm(nn.Module):
''' Layer normalization module '''
def __init__(self, d_hid, eps=1e-3):
super(LayerNorm, self).__init__()
self.eps = eps
self.a_2 = nn.Parameter(torch.ones(d_hid), requires_grad=True)
self.b_2 = nn.Parameter(torch.zeros(d_hid), requires_grad=True)
def forward(self, z):
if z.size(1) == 1:
return z
mu = torch.mean(z, dim=1)
sigma = torch.std(z, dim=1)
# HACK. PyTorch is changing behavior
if mu.dim() == 1:
mu = mu.unsqueeze(1)
sigma = sigma.unsqueeze(1)
ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)
ln_out = ln_out.mul(self.a_2.expand_as(ln_out)) \
+ self.b_2.expand_as(ln_out)
return ln_out
class BottleLinear(Bottle, nn.Linear):
pass
class BottleLayerNorm(Bottle, LayerNorm):
pass
class BottleSoftmax(Bottle, nn.Softmax):
pass
class Elementwise(nn.ModuleList):
"""
A simple network container.
Parameters are a list of modules.
Inputs are a 3d Variable whose last dimension is the same length
as the list.
Outputs are the result of applying modules to inputs elementwise.
An optional merge parameter allows the outputs to be reduced to a
single Variable.
"""
def __init__(self, merge=None, *args):
assert merge in [None, 'first', 'concat', 'sum', 'mlp']
self.merge = merge
super(Elementwise, self).__init__(*args)
def forward(self, input):
inputs = [feat.squeeze(2) for feat in input.split(1, dim=2)]
assert len(self) == len(inputs)
outputs = [f(x) for f, x in zip(self, inputs)]
if self.merge == 'first':
return outputs[0]
elif self.merge == 'concat' or self.merge == 'mlp':
return torch.cat(outputs, 2)
elif self.merge == 'sum':
return sum(outputs)
else:
return outputs
| 2,769 | 30.123596 | 78 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/StructuredAttention.py | import torch.nn as nn
import torch
import torch.cuda
from torch.autograd import Variable
class MatrixTree(nn.Module):
"""Implementation of the matrix-tree theorem for computing marginals
of non-projective dependency parsing. This attention layer is used
in the paper "Learning Structured Text Representations."
"""
def __init__(self, eps=1e-5):
self.eps = eps
super(MatrixTree, self).__init__()
def forward(self, input):
laplacian = input.exp() + self.eps
output = input.clone()
for b in range(input.size(0)):
lap = laplacian[b].masked_fill(
Variable(torch.eye(input.size(1)).cuda().ne(0)), 0)
lap = -lap + torch.diag(lap.sum(0))
# store roots on diagonal
lap[0] = input[b].diag().exp()
inv_laplacian = lap.inverse()
factor = inv_laplacian.diag().unsqueeze(1)\
.expand_as(input[b]).transpose(0, 1)
term1 = input[b].exp().mul(factor).clone()
term2 = input[b].exp().mul(inv_laplacian.transpose(0, 1)).clone()
term1[:, 0] = 0
term2[0] = 0
output[b] = term1 - term2
roots_output = input[b].diag().exp().mul(
inv_laplacian.transpose(0, 1)[0])
output[b] = output[b] + torch.diag(roots_output)
return output
if __name__ == "__main__":
dtree = MatrixTree()
q = torch.rand(1, 5, 5).cuda()
marg = dtree.forward(Variable(q))
print(marg.sum(1))
| 1,556 | 33.6 | 77 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/Conv2Conv.py | """
Implementation of "Convolutional Sequence to Sequence Learning"
"""
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Variable
import onmt.modules
from onmt.modules.WeightNorm import WeightNormConv2d
from onmt.Models import EncoderBase
from onmt.Models import DecoderState
from onmt.Utils import aeq
SCALE_WEIGHT = 0.5 ** 0.5
def shape_transform(x):
""" Tranform the size of the tensors to fit for conv input. """
return torch.unsqueeze(torch.transpose(x, 1, 2), 3)
class GatedConv(nn.Module):
def __init__(self, input_size, width=3, dropout=0.2, nopad=False):
super(GatedConv, self).__init__()
self.conv = WeightNormConv2d(input_size, 2 * input_size,
kernel_size=(width, 1), stride=(1, 1),
padding=(width // 2 * (1 - nopad), 0))
init.xavier_uniform(self.conv.weight, gain=(4 * (1 - dropout))**0.5)
self.dropout = nn.Dropout(dropout)
def forward(self, x_var, hidden=None):
x_var = self.dropout(x_var)
x_var = self.conv(x_var)
out, gate = x_var.split(int(x_var.size(1) / 2), 1)
out = out * F.sigmoid(gate)
return out
class StackedCNN(nn.Module):
def __init__(self, num_layers, input_size, cnn_kernel_width=3,
dropout=0.2):
super(StackedCNN, self).__init__()
self.dropout = dropout
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(
GatedConv(input_size, cnn_kernel_width, dropout))
def forward(self, x, hidden=None):
for conv in self.layers:
x = x + conv(x)
x *= SCALE_WEIGHT
return x
class CNNEncoder(EncoderBase):
"""
Encoder built on CNN.
"""
def __init__(self, num_layers, hidden_size,
cnn_kernel_width, dropout, embeddings):
super(CNNEncoder, self).__init__()
self.embeddings = embeddings
input_size = embeddings.embedding_size
self.linear = nn.Linear(input_size, hidden_size)
self.cnn = StackedCNN(num_layers, hidden_size,
cnn_kernel_width, dropout)
def forward(self, input, lengths=None, hidden=None):
""" See EncoderBase.forward() for description of args and returns."""
self._check_args(input, lengths, hidden)
emb = self.embeddings(input)
s_len, batch, emb_dim = emb.size()
emb = emb.transpose(0, 1).contiguous()
emb_reshape = emb.view(emb.size(0) * emb.size(1), -1)
emb_remap = self.linear(emb_reshape)
emb_remap = emb_remap.view(emb.size(0), emb.size(1), -1)
emb_remap = shape_transform(emb_remap)
out = self.cnn(emb_remap)
return emb_remap.squeeze(3).transpose(0, 1).contiguous(),\
out.squeeze(3).transpose(0, 1).contiguous()
class CNNDecoder(nn.Module):
"""
Decoder built on CNN, which consists of resduial convolutional layers,
with ConvMultiStepAttention.
"""
def __init__(self, num_layers, hidden_size, attn_type,
copy_attn, cnn_kernel_width, dropout, embeddings):
super(CNNDecoder, self).__init__()
# Basic attributes.
self.decoder_type = 'cnn'
self.num_layers = num_layers
self.hidden_size = hidden_size
self.cnn_kernel_width = cnn_kernel_width
self.embeddings = embeddings
self.dropout = dropout
# Build the CNN.
input_size = self.embeddings.embedding_size
self.linear = nn.Linear(input_size, self.hidden_size)
self.conv_layers = nn.ModuleList()
for i in range(self.num_layers):
self.conv_layers.append(
GatedConv(self.hidden_size, self.cnn_kernel_width,
self.dropout, True))
self.attn_layers = nn.ModuleList()
for i in range(self.num_layers):
self.attn_layers.append(
onmt.modules.ConvMultiStepAttention(self.hidden_size))
# CNNDecoder has its own attention mechanism.
# Set up a separated copy attention layer, if needed.
self._copy = False
if copy_attn:
self.copy_attn = onmt.modules.GlobalAttention(
hidden_size, attn_type=attn_type)
self._copy = True
def forward(self, input, context, state):
"""
Forward through the CNNDecoder.
Args:
input (LongTensor): a sequence of input tokens tensors
of size (len x batch x nfeats).
context (FloatTensor): output(tensor sequence) from the encoder
CNN of size (src_len x batch x hidden_size).
state (FloatTensor): hidden state from the encoder CNN for
initializing the decoder.
Returns:
outputs (FloatTensor): a Tensor sequence of output from the decoder
of shape (len x batch x hidden_size).
state (FloatTensor): final hidden state from the decoder.
attns (dict of (str, FloatTensor)): a dictionary of different
type of attention Tensor from the decoder
of shape (src_len x batch).
"""
# CHECKS
assert isinstance(state, CNNDecoderState)
input_len, input_batch, _ = input.size()
contxt_len, contxt_batch, _ = context.size()
aeq(input_batch, contxt_batch)
# END CHECKS
if state.previous_input is not None:
input = torch.cat([state.previous_input, input], 0)
# Initialize return variables.
outputs = []
attns = {"std": []}
assert not self._copy, "Copy mechanism not yet tested in conv2conv"
if self._copy:
attns["copy"] = []
emb = self.embeddings(input)
assert emb.dim() == 3 # len x batch x embedding_dim
tgt_emb = emb.transpose(0, 1).contiguous()
# The output of CNNEncoder.
src_context_t = context.transpose(0, 1).contiguous()
# The combination of output of CNNEncoder and source embeddings.
src_context_c = state.init_src.transpose(0, 1).contiguous()
# Run the forward pass of the CNNDecoder.
emb_reshape = tgt_emb.contiguous().view(
tgt_emb.size(0) * tgt_emb.size(1), -1)
linear_out = self.linear(emb_reshape)
x = linear_out.view(tgt_emb.size(0), tgt_emb.size(1), -1)
x = shape_transform(x)
pad = Variable(torch.zeros(x.size(0), x.size(1),
self.cnn_kernel_width - 1, 1))
pad = pad.type_as(x)
base_target_emb = x
for conv, attention in zip(self.conv_layers, self.attn_layers):
new_target_input = torch.cat([pad, x], 2)
out = conv(new_target_input)
c, attn = attention(base_target_emb, out,
src_context_t, src_context_c)
x = (x + (c + out) * SCALE_WEIGHT) * SCALE_WEIGHT
output = x.squeeze(3).transpose(1, 2)
# Process the result and update the attentions.
outputs = output.transpose(0, 1).contiguous()
if state.previous_input is not None:
outputs = outputs[state.previous_input.size(0):]
attn = attn[:, state.previous_input.size(0):].squeeze()
attn = torch.stack([attn])
attns["std"] = attn
if self._copy:
attns["copy"] = attn
# Update the state.
state.update_state(input)
return outputs, state, attns
def init_decoder_state(self, src, context, enc_hidden):
return CNNDecoderState(context, enc_hidden)
class CNNDecoderState(DecoderState):
def __init__(self, context, enc_hidden):
self.init_src = (context + enc_hidden) * SCALE_WEIGHT
self.previous_input = None
@property
def _all(self):
"""
Contains attributes that need to be updated in self.beam_update().
"""
return (self.previous_input,)
def update_state(self, input):
""" Called for every decoder forward pass. """
self.previous_input = input
def repeat_beam_size_times(self, beam_size):
""" Repeat beam_size times along batch dimension. """
self.init_src = Variable(
self.init_src.data.repeat(1, beam_size, 1), volatile=True)
| 8,557 | 35.57265 | 79 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/GlobalAttention.py | import torch
import torch.nn as nn
from onmt.modules.UtilClass import BottleLinear
from onmt.Utils import aeq
class GlobalAttention(nn.Module):
"""
Luong Attention.
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
H_1 H_2 H_3 ... H_n
q q q q
| | | |
\ | | /
.....
\ | /
a
Constructs a unit mapping.
$$(H_1 + H_n, q) => (a)$$
Where H is of `batch x n x dim` and q is of `batch x dim`.
Luong Attention (dot, general):
The full function is
$$\tanh(W_2 [(softmax((W_1 q + b_1) H) H), q] + b_2)$$.
* dot: $$score(h_t,{\overline{h}}_s) = h_t^T{\overline{h}}_s$$
* general: $$score(h_t,{\overline{h}}_s) = h_t^T W_a {\overline{h}}_s$$
Bahdanau Attention (mlp):
$$c = \sum_{j=1}^{SeqLength}\a_jh_j$$.
The Alignment-function $$a$$ computes an alignment as:
$$a_j = softmax(v_a^T \tanh(W_a q + U_a h_j) )$$.
"""
def __init__(self, dim, coverage=False, attn_type="dot"):
super(GlobalAttention, self).__init__()
self.dim = dim
self.attn_type = attn_type
assert (self.attn_type in ["dot", "general", "mlp"]), (
"Please select a valid attention type.")
if self.attn_type == "general":
self.linear_in = nn.Linear(dim, dim, bias=False)
elif self.attn_type == "mlp":
self.linear_context = BottleLinear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = BottleLinear(dim, 1, bias=False)
# mlp wants it with bias
out_bias = self.attn_type == "mlp"
self.linear_out = nn.Linear(dim*2, dim, bias=out_bias)
self.sm = nn.Softmax()
self.tanh = nn.Tanh()
self.mask = None
if coverage:
self.linear_cover = nn.Linear(1, dim, bias=False)
def applyMask(self, mask):
self.mask = mask
def score(self, h_t, h_s):
"""
h_t (FloatTensor): batch x tgt_len x dim
h_s (FloatTensor): batch x src_len x dim
returns scores (FloatTensor): batch x tgt_len x src_len:
raw attention scores for each src index
"""
# Check input sizes
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ["general", "dot"]:
if self.attn_type == "general":
h_t_ = h_t.view(tgt_batch*tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
# (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
# (batch, t_len, s_len, d)
wquh = self.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, input, context, coverage=None):
"""
input (FloatTensor): batch x tgt_len x dim: decoder's rnn's output.
context (FloatTensor): batch x src_len x dim: src hidden states
coverage (FloatTensor): None (not supported yet)
"""
# one step input
if input.dim() == 2:
one_step = True
input = input.unsqueeze(1)
else:
one_step = False
batch, sourceL, dim = context.size()
batch_, targetL, dim_ = input.size()
aeq(batch, batch_)
aeq(dim, dim_)
aeq(self.dim, dim)
if coverage is not None:
batch_, sourceL_ = coverage.size()
aeq(batch, batch_)
aeq(sourceL, sourceL_)
if self.mask is not None:
beam_, batch_, sourceL_ = self.mask.size()
aeq(batch, batch_*beam_)
aeq(sourceL, sourceL_)
if coverage is not None:
cover = coverage.view(-1).unsqueeze(1)
context += self.linear_cover(cover).view_as(context)
context = self.tanh(context)
# compute attention scores, as in Luong et al.
align = self.score(input, context)
if self.mask is not None:
mask_ = self.mask.view(batch, 1, sourceL) # make it broardcastable
align.data.masked_fill_(mask_, -float('inf'))
# Softmax to normalize attention weights
align_vectors = self.sm(align.view(batch*targetL, sourceL))
align_vectors = align_vectors.view(batch, targetL, sourceL)
# each context vector c_t is the weighted average
# over all the source hidden states
c = torch.bmm(align_vectors, context)
# concatenate
concat_c = torch.cat([c, input], 2).view(batch*targetL, dim*2)
attn_h = self.linear_out(concat_c).view(batch, targetL, dim)
if self.attn_type in ["general", "dot"]:
attn_h = self.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
# Check output sizes
batch_, dim_ = attn_h.size()
aeq(batch, batch_)
aeq(dim, dim_)
batch_, sourceL_ = align_vectors.size()
aeq(batch, batch_)
aeq(sourceL, sourceL_)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
# Check output sizes
targetL_, batch_, dim_ = attn_h.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
aeq(dim, dim_)
targetL_, batch_, sourceL_ = align_vectors.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
aeq(sourceL, sourceL_)
return attn_h, align_vectors
| 6,419 | 32.968254 | 79 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/SRU.py | """
Implementation of "Training RNNs as Fast as CNNs".
TODO: turn to pytorch's implementation when it is available.
This implementation is adpoted from the author of the paper:
https://github.com/taolei87/sru/blob/master/cuda_functional.py.
"""
import subprocess
import platform
import os
import re
import argparse
import torch
import torch.nn as nn
from torch.autograd import Function, Variable
from collections import namedtuple
# For command-line option parsing
class CheckSRU(argparse.Action):
def __init__(self, option_strings, dest, **kwargs):
super(CheckSRU, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if values == 'SRU':
check_sru_requirement(abort=True)
# Check pass, set the args.
setattr(namespace, self.dest, values)
# This SRU version implements its own cuda-level optimization,
# so it requires that:
# 1. `cupy` and `pynvrtc` python package installed.
# 2. pytorch is built with cuda support.
# 3. library path set: export LD_LIBRARY_PATH=<cuda lib path>.
def check_sru_requirement(abort=False):
"""
Return True if check pass; if check fails and abort is True,
raise an Exception, othereise return False.
"""
# Check 1.
try:
if platform.system() == 'Windows':
subprocess.check_output('pip freeze | findstr cupy', shell=True)
subprocess.check_output('pip freeze | findstr pynvrtc',
shell=True)
else: # Unix-like systems
subprocess.check_output('pip freeze | grep -w cupy', shell=True)
subprocess.check_output('pip freeze | grep -w pynvrtc',
shell=True)
except subprocess.CalledProcessError:
if not abort:
return False
raise AssertionError("Using SRU requires 'cupy' and 'pynvrtc' "
"python packages installed.")
# Check 2.
if torch.cuda.is_available() is False:
if not abort:
return False
raise AssertionError("Using SRU requires pytorch built with cuda.")
# Check 3.
pattern = re.compile(".*cuda/lib.*")
ld_path = os.getenv('LD_LIBRARY_PATH', "")
if re.match(pattern, ld_path) is None:
if not abort:
return False
raise AssertionError("Using SRU requires setting cuda lib path, e.g. "
"export LD_LIBRARY_PATH=/usr/local/cuda/lib64.")
return True
SRU_CODE = """
extern "C" {
__forceinline__ __device__ float sigmoidf(float x)
{
return 1.f / (1.f + expf(-x));
}
__forceinline__ __device__ float reluf(float x)
{
return (x > 0.f) ? x : 0.f;
}
__global__ void sru_fwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const int len, const int batch,
const int d, const int k,
float * __restrict__ h,
float * __restrict__ c,
const int activation_type)
{
assert ((k == 3) || (x == NULL));
int ncols = batch*d;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float bias1 = *(bias + (col%d));
const float bias2 = *(bias + (col%d) + d);
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float cur = *(init + col);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
float *cp = c + col;
float *hp = h + col;
for (int row = 0; row < len; ++row)
{
float g1 = sigmoidf((*(up+1))+bias1);
float g2 = sigmoidf((*(up+2))+bias2);
cur = (cur-(*up))*g1 + (*up);
*cp = cur;
float val = (activation_type == 1) ? tanh(cur) : (
(activation_type == 2) ? reluf(cur) : cur
);
*hp = (val*mask-(*xp))*g2 + (*xp);
up += ncols_u;
xp += ncols_x;
cp += ncols;
hp += ncols;
}
}
__global__ void sru_bwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const float * __restrict__ c,
const float * __restrict__ grad_h,
const float * __restrict__ grad_last,
const int len,
const int batch, const int d, const int k,
float * __restrict__ grad_u,
float * __restrict__ grad_x,
float * __restrict__ grad_bias,
float * __restrict__ grad_init,
int activation_type)
{
assert((k == 3) || (x == NULL));
assert((k == 3) || (grad_x == NULL));
int ncols = batch*d;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float bias1 = *(bias + (col%d));
const float bias2 = *(bias + (col%d) + d);
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float gbias1 = 0;
float gbias2 = 0;
float cur = *(grad_last + col);
const float *up = u + (col*k) + (len-1)*ncols_u;
const float *xp = (k == 3) ? (x + col + (len-1)*ncols) : (up + 3);
const float *cp = c + col + (len-1)*ncols;
const float *ghp = grad_h + col + (len-1)*ncols;
float *gup = grad_u + (col*k) + (len-1)*ncols_u;
float *gxp = (k == 3) ? (grad_x + col + (len-1)*ncols) : (gup + 3);
for (int row = len-1; row >= 0; --row)
{
const float g1 = sigmoidf((*(up+1))+bias1);
const float g2 = sigmoidf((*(up+2))+bias2);
const float c_val = (activation_type == 1) ? tanh(*cp) : (
(activation_type == 2) ? reluf(*cp) : (*cp)
);
const float x_val = *xp;
const float u_val = *up;
const float prev_c_val = (row>0) ? (*(cp-ncols)) : (*(init+col));
const float gh_val = *ghp;
// h = c*g2 + x*(1-g2) = (c-x)*g2 + x
// c = c'*g1 + g0*(1-g1) = (c'-g0)*g1 + g0
// grad wrt x
*gxp = gh_val*(1-g2);
// grad wrt g2, u2 and bias2
float gg2 = gh_val*(c_val*mask-x_val)*(g2*(1-g2));
*(gup+2) = gg2;
gbias2 += gg2;
// grad wrt c
const float tmp = (activation_type == 1) ? (g2*(1-c_val*c_val)) : (
((activation_type == 0) || (c_val > 0)) ? g2 : 0.f
);
const float gc = gh_val*mask*tmp + cur;
// grad wrt u0
*gup = gc*(1-g1);
// grad wrt g1, u1, and bias1
float gg1 = gc*(prev_c_val-u_val)*(g1*(1-g1));
*(gup+1) = gg1;
gbias1 += gg1;
// grad wrt c'
cur = gc*g1;
up -= ncols_u;
xp -= ncols_x;
cp -= ncols;
gup -= ncols_u;
gxp -= ncols_x;
ghp -= ncols;
}
*(grad_bias + col) = gbias1;
*(grad_bias + col + ncols) = gbias2;
*(grad_init +col) = cur;
}
__global__ void sru_bi_fwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const int len, const int batch,
const int d, const int k,
float * __restrict__ h,
float * __restrict__ c,
const int activation_type)
{
assert ((k == 3) || (x == NULL));
assert ((k == 3) || (k == 4));
int ncols = batch*d*2;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float cur = *(init + col);
const int d2 = d*2;
const bool flip = (col%d2) >= d;
const float bias1 = *(bias + (col%d2));
const float bias2 = *(bias + (col%d2) + d2);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
float *cp = c + col;
float *hp = h + col;
if (flip) {
up += (len-1)*ncols_u;
xp += (len-1)*ncols_x;
cp += (len-1)*ncols;
hp += (len-1)*ncols;
}
int ncols_u_ = flip ? -ncols_u : ncols_u;
int ncols_x_ = flip ? -ncols_x : ncols_x;
int ncols_ = flip ? -ncols : ncols;
for (int cnt = 0; cnt < len; ++cnt)
{
float g1 = sigmoidf((*(up+1))+bias1);
float g2 = sigmoidf((*(up+2))+bias2);
cur = (cur-(*up))*g1 + (*up);
*cp = cur;
float val = (activation_type == 1) ? tanh(cur) : (
(activation_type == 2) ? reluf(cur) : cur
);
*hp = (val*mask-(*xp))*g2 + (*xp);
up += ncols_u_;
xp += ncols_x_;
cp += ncols_;
hp += ncols_;
}
}
__global__ void sru_bi_bwd(const float * __restrict__ u,
const float * __restrict__ x,
const float * __restrict__ bias,
const float * __restrict__ init,
const float * __restrict__ mask_h,
const float * __restrict__ c,
const float * __restrict__ grad_h,
const float * __restrict__ grad_last,
const int len, const int batch,
const int d, const int k,
float * __restrict__ grad_u,
float * __restrict__ grad_x,
float * __restrict__ grad_bias,
float * __restrict__ grad_init,
int activation_type)
{
assert((k == 3) || (x == NULL));
assert((k == 3) || (grad_x == NULL));
assert((k == 3) || (k == 4));
int ncols = batch*d*2;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= ncols) return;
int ncols_u = ncols*k;
int ncols_x = (k == 3) ? ncols : ncols_u;
const float mask = (mask_h == NULL) ? 1.0 : (*(mask_h + col));
float gbias1 = 0;
float gbias2 = 0;
float cur = *(grad_last + col);
const int d2 = d*2;
const bool flip = ((col%d2) >= d);
const float bias1 = *(bias + (col%d2));
const float bias2 = *(bias + (col%d2) + d2);
const float *up = u + (col*k);
const float *xp = (k == 3) ? (x + col) : (up + 3);
const float *cp = c + col;
const float *ghp = grad_h + col;
float *gup = grad_u + (col*k);
float *gxp = (k == 3) ? (grad_x + col) : (gup + 3);
if (!flip) {
up += (len-1)*ncols_u;
xp += (len-1)*ncols_x;
cp += (len-1)*ncols;
ghp += (len-1)*ncols;
gup += (len-1)*ncols_u;
gxp += (len-1)*ncols_x;
}
int ncols_u_ = flip ? -ncols_u : ncols_u;
int ncols_x_ = flip ? -ncols_x : ncols_x;
int ncols_ = flip ? -ncols : ncols;
for (int cnt = 0; cnt < len; ++cnt)
{
const float g1 = sigmoidf((*(up+1))+bias1);
const float g2 = sigmoidf((*(up+2))+bias2);
const float c_val = (activation_type == 1) ? tanh(*cp) : (
(activation_type == 2) ? reluf(*cp) : (*cp)
);
const float x_val = *xp;
const float u_val = *up;
const float prev_c_val = (cnt<len-1)?(*(cp-ncols_)):(*(init+col));
const float gh_val = *ghp;
// h = c*g2 + x*(1-g2) = (c-x)*g2 + x
// c = c'*g1 + g0*(1-g1) = (c'-g0)*g1 + g0
// grad wrt x
*gxp = gh_val*(1-g2);
// grad wrt g2, u2 and bias2
float gg2 = gh_val*(c_val*mask-x_val)*(g2*(1-g2));
*(gup+2) = gg2;
gbias2 += gg2;
// grad wrt c
const float tmp = (activation_type == 1) ? (g2*(1-c_val*c_val)) : (
((activation_type == 0) || (c_val > 0)) ? g2 : 0.f
);
const float gc = gh_val*mask*tmp + cur;
// grad wrt u0
*gup = gc*(1-g1);
// grad wrt g1, u1, and bias1
float gg1 = gc*(prev_c_val-u_val)*(g1*(1-g1));
*(gup+1) = gg1;
gbias1 += gg1;
// grad wrt c'
cur = gc*g1;
up -= ncols_u_;
xp -= ncols_x_;
cp -= ncols_;
gup -= ncols_u_;
gxp -= ncols_x_;
ghp -= ncols_;
}
*(grad_bias + col) = gbias1;
*(grad_bias + col + ncols) = gbias2;
*(grad_init +col) = cur;
}
}
"""
if check_sru_requirement():
from cupy.cuda import function
from pynvrtc.compiler import Program
# This cuda() is important, it sets up device to use.
tmp_ = torch.rand(1, 1).cuda()
sru_prog = Program(SRU_CODE.encode('utf-8'),
'sru_prog.cu'.encode('utf-8'))
sru_ptx = sru_prog.compile()
sru_mod = function.Module()
sru_mod.load(bytes(sru_ptx.encode()))
SRU_FWD_FUNC = sru_mod.get_function('sru_fwd')
SRU_BWD_FUNC = sru_mod.get_function('sru_bwd')
SRU_BiFWD_FUNC = sru_mod.get_function('sru_bi_fwd')
SRU_BiBWD_FUNC = sru_mod.get_function('sru_bi_bwd')
stream = namedtuple('Stream', ['ptr'])
SRU_STREAM = stream(ptr=torch.cuda.current_stream().cuda_stream)
class SRU_Compute(Function):
def __init__(self, activation_type, d_out, bidirectional=False):
super(SRU_Compute, self).__init__()
self.activation_type = activation_type
self.d_out = d_out
self.bidirectional = bidirectional
def forward(self, u, x, bias, init=None, mask_h=None):
bidir = 2 if self.bidirectional else 1
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
d = self.d_out
k = u.size(-1) // d
k_ = k // 2 if self.bidirectional else k
ncols = batch * d * bidir
thread_per_block = min(512, ncols)
num_block = (ncols-1) // thread_per_block+1
init_ = x.new(ncols).zero_() if init is None else init
size = (length, batch, d*bidir) if x.dim() == 3 else (batch, d*bidir)
c = x.new(*size)
h = x.new(*size)
FUNC = SRU_FWD_FUNC if not self.bidirectional else SRU_BiFWD_FUNC
FUNC(args=[
u.contiguous().data_ptr(),
x.contiguous().data_ptr() if k_ == 3 else 0,
bias.data_ptr(),
init_.contiguous().data_ptr(),
mask_h.data_ptr() if mask_h is not None else 0,
length,
batch,
d,
k_,
h.data_ptr(),
c.data_ptr(),
self.activation_type],
block=(thread_per_block, 1, 1), grid=(num_block, 1, 1),
stream=SRU_STREAM
)
self.save_for_backward(u, x, bias, init, mask_h)
self.intermediate = c
if x.dim() == 2:
last_hidden = c
elif self.bidirectional:
# -> directions x batch x dim
last_hidden = torch.stack((c[-1, :, :d], c[0, :, d:]))
else:
last_hidden = c[-1]
return h, last_hidden
def backward(self, grad_h, grad_last):
if self.bidirectional:
grad_last = torch.cat((grad_last[0], grad_last[1]), 1)
bidir = 2 if self.bidirectional else 1
u, x, bias, init, mask_h = self.saved_tensors
c = self.intermediate
length = x.size(0) if x.dim() == 3 else 1
batch = x.size(-2)
d = self.d_out
k = u.size(-1) // d
k_ = k//2 if self.bidirectional else k
ncols = batch*d*bidir
thread_per_block = min(512, ncols)
num_block = (ncols-1) // thread_per_block+1
init_ = x.new(ncols).zero_() if init is None else init
grad_u = u.new(*u.size())
grad_bias = x.new(2, batch, d*bidir)
grad_init = x.new(batch, d*bidir)
# For DEBUG
# size = (length, batch, x.size(-1)) \
# if x.dim() == 3 else (batch, x.size(-1))
# grad_x = x.new(*x.size()) if k_ == 3 else x.new(*size).zero_()
# Normal use
grad_x = x.new(*x.size()) if k_ == 3 else None
FUNC = SRU_BWD_FUNC if not self.bidirectional else SRU_BiBWD_FUNC
FUNC(args=[
u.contiguous().data_ptr(),
x.contiguous().data_ptr() if k_ == 3 else 0,
bias.data_ptr(),
init_.contiguous().data_ptr(),
mask_h.data_ptr() if mask_h is not None else 0,
c.data_ptr(),
grad_h.contiguous().data_ptr(),
grad_last.contiguous().data_ptr(),
length,
batch,
d,
k_,
grad_u.data_ptr(),
grad_x.data_ptr() if k_ == 3 else 0,
grad_bias.data_ptr(),
grad_init.data_ptr(),
self.activation_type],
block=(thread_per_block, 1, 1), grid=(num_block, 1, 1),
stream=SRU_STREAM
)
return grad_u, grad_x, grad_bias.sum(1).view(-1), grad_init, None
class SRUCell(nn.Module):
def __init__(self, n_in, n_out, dropout=0, rnn_dropout=0,
bidirectional=False, use_tanh=1, use_relu=0):
super(SRUCell, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.rnn_dropout = rnn_dropout
self.dropout = dropout
self.bidirectional = bidirectional
self.activation_type = 2 if use_relu else (1 if use_tanh else 0)
out_size = n_out*2 if bidirectional else n_out
k = 4 if n_in != out_size else 3
self.size_per_dir = n_out*k
self.weight = nn.Parameter(torch.Tensor(
n_in,
self.size_per_dir*2 if bidirectional else self.size_per_dir
))
self.bias = nn.Parameter(torch.Tensor(
n_out*4 if bidirectional else n_out*2
))
self.init_weight()
def init_weight(self):
val_range = (3.0/self.n_in)**0.5
self.weight.data.uniform_(-val_range, val_range)
self.bias.data.zero_()
def set_bias(self, bias_val=0):
n_out = self.n_out
if self.bidirectional:
self.bias.data[n_out*2:].zero_().add_(bias_val)
else:
self.bias.data[n_out:].zero_().add_(bias_val)
def forward(self, input, c0=None):
assert input.dim() == 2 or input.dim() == 3
n_in, n_out = self.n_in, self.n_out
batch = input.size(-2)
if c0 is None:
c0 = Variable(input.data.new(
batch, n_out if not self.bidirectional else n_out*2
).zero_())
if self.training and (self.rnn_dropout > 0):
mask = self.get_dropout_mask_((batch, n_in), self.rnn_dropout)
x = input * mask.expand_as(input)
else:
x = input
x_2d = x if x.dim() == 2 else x.contiguous().view(-1, n_in)
u = x_2d.mm(self.weight)
if self.training and (self.dropout > 0):
bidir = 2 if self.bidirectional else 1
mask_h = self.get_dropout_mask_((batch, n_out*bidir), self.dropout)
h, c = SRU_Compute(self.activation_type, n_out,
self.bidirectional)(
u, input, self.bias, c0, mask_h
)
else:
h, c = SRU_Compute(self.activation_type, n_out,
self.bidirectional)(
u, input, self.bias, c0
)
return h, c
def get_dropout_mask_(self, size, p):
w = self.weight.data
return Variable(w.new(*size).bernoulli_(1-p).div_(1-p))
class SRU(nn.Module):
def __init__(self, input_size, hidden_size,
num_layers=2, dropout=0, rnn_dropout=0,
bidirectional=False, use_tanh=1, use_relu=0):
# An entry check here, will catch on train side and translate side
# if requirements are not satisfied.
check_sru_requirement(abort=True)
super(SRU, self).__init__()
self.n_in = input_size
self.n_out = hidden_size
self.depth = num_layers
self.dropout = dropout
self.rnn_dropout = rnn_dropout
self.rnn_lst = nn.ModuleList()
self.bidirectional = bidirectional
self.out_size = hidden_size*2 if bidirectional else hidden_size
for i in range(num_layers):
sru_cell = SRUCell(
n_in=self.n_in if i == 0 else self.out_size,
n_out=self.n_out,
dropout=dropout if i+1 != num_layers else 0,
rnn_dropout=rnn_dropout,
bidirectional=bidirectional,
use_tanh=use_tanh,
use_relu=use_relu,
)
self.rnn_lst.append(sru_cell)
def set_bias(self, bias_val=0):
for l in self.rnn_lst:
l.set_bias(bias_val)
def forward(self, input, c0=None, return_hidden=True):
assert input.dim() == 3 # (len, batch, n_in)
dir_ = 2 if self.bidirectional else 1
if c0 is None:
zeros = Variable(input.data.new(
input.size(1), self.n_out*dir_
).zero_())
c0 = [zeros for i in range(self.depth)]
else:
if isinstance(c0, tuple):
# RNNDecoderState wraps hidden as a tuple.
c0 = c0[0]
assert c0.dim() == 3 # (depth, batch, dir_*n_out)
c0 = [h.squeeze(0) for h in c0.chunk(self.depth, 0)]
prevx = input
lstc = []
for i, rnn in enumerate(self.rnn_lst):
h, c = rnn(prevx, c0[i])
prevx = h
lstc.append(c)
if self.bidirectional:
# fh -> (layers*directions) x batch x dim
fh = torch.cat(lstc)
else:
fh = torch.stack(lstc)
if return_hidden:
return prevx, fh
else:
return prevx
| 23,318 | 36.672052 | 79 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/WeightNorm.py | """
Implementation of "Weight Normalization: A Simple Reparameterization
to Accelerate Training of Deep Neural Networks"
As a reparameterization method, weight normalization is same
as BatchNormalization, but it doesn't depend on minibatch.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
from torch.autograd import Variable
def get_var_maybe_avg(namespace, var_name, training, polyak_decay):
# utility for retrieving polyak averaged params
# Update average
v = getattr(namespace, var_name)
v_avg = getattr(namespace, var_name + '_avg')
v_avg -= (1 - polyak_decay) * (v_avg - v.data)
if training:
return v
else:
return Variable(v_avg)
def get_vars_maybe_avg(namespace, var_names, training, polyak_decay):
# utility for retrieving polyak averaged params
vars = []
for vn in var_names:
vars.append(get_var_maybe_avg(
namespace, vn, training, polyak_decay))
return vars
class WeightNormLinear(nn.Linear):
def __init__(self, in_features, out_features,
init_scale=1., polyak_decay=0.9995):
super(WeightNormLinear, self).__init__(
in_features, out_features, bias=True)
self.V = self.weight
self.g = Parameter(torch.Tensor(out_features))
self.b = self.bias
self.register_buffer(
'V_avg', torch.zeros(out_features, in_features))
self.register_buffer('g_avg', torch.zeros(out_features))
self.register_buffer('b_avg', torch.zeros(out_features))
self.init_scale = init_scale
self.polyak_decay = polyak_decay
self.reset_parameters()
def reset_parameters(self):
return
def forward(self, x, init=False):
if init is True:
# out_features * in_features
self.V.data.copy_(torch.randn(self.V.data.size()).type_as(
self.V.data) * 0.05)
# norm is out_features * 1
V_norm = self.V.data / \
self.V.data.norm(2, 1).expand_as(self.V.data)
# batch_size * out_features
x_init = F.linear(x, Variable(V_norm)).data
# out_features
m_init, v_init = x_init.mean(0).squeeze(
0), x_init.var(0).squeeze(0)
# out_features
scale_init = self.init_scale / \
torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
x_init = scale_init.view(1, -1).expand_as(x_init) \
* (x_init - m_init.view(1, -1).expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return Variable(x_init)
else:
V, g, b = get_vars_maybe_avg(self, ['V', 'g', 'b'],
self.training,
polyak_decay=self.polyak_decay)
# batch_size * out_features
x = F.linear(x, V)
scalar = g / torch.norm(V, 2, 1).squeeze(1)
x = scalar.view(1, -1).expand_as(x) * x + \
b.view(1, -1).expand_as(x)
return x
class WeightNormConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, init_scale=1.,
polyak_decay=0.9995):
super(WeightNormConv2d, self).__init__(in_channels, out_channels,
kernel_size, stride, padding,
dilation, groups)
self.V = self.weight
self.g = Parameter(torch.Tensor(out_channels))
self.b = self.bias
self.register_buffer('V_avg', torch.zeros(self.V.size()))
self.register_buffer('g_avg', torch.zeros(out_channels))
self.register_buffer('b_avg', torch.zeros(out_channels))
self.init_scale = init_scale
self.polyak_decay = polyak_decay
self.reset_parameters()
def reset_parameters(self):
return
def forward(self, x, init=False):
if init is True:
# out_channels, in_channels // groups, * kernel_size
self.V.data.copy_(torch.randn(self.V.data.size()
).type_as(self.V.data) * 0.05)
V_norm = self.V.data / self.V.data.view(self.out_channels, -1)\
.norm(2, 1).view(self.out_channels, *(
[1] * (len(self.kernel_size) + 1))).expand_as(self.V.data)
x_init = F.conv2d(x, Variable(V_norm), None, self.stride,
self.padding, self.dilation, self.groups).data
t_x_init = x_init.transpose(0, 1).contiguous().view(
self.out_channels, -1)
m_init, v_init = t_x_init.mean(1).squeeze(
1), t_x_init.var(1).squeeze(1)
# out_features
scale_init = self.init_scale / \
torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
scale_init_shape = scale_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
m_init_shape = m_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
x_init = scale_init_shape.expand_as(
x_init) * (x_init - m_init_shape.expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return Variable(x_init)
else:
V, g, b = get_vars_maybe_avg(
self, ['V', 'g', 'b'], self.training,
polyak_decay=self.polyak_decay)
scalar = torch.norm(V.view(self.out_channels, -1), 2, 1)
if len(scalar.size()) == 2:
scalar = g / scalar.squeeze(1)
else:
scalar = g / scalar
W = scalar.view(self.out_channels, *
([1] * (len(V.size()) - 1))).expand_as(V) * V
x = F.conv2d(x, W, b, self.stride,
self.padding, self.dilation, self.groups)
return x
class WeightNormConvTranspose2d(nn.ConvTranspose2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1, init_scale=1.,
polyak_decay=0.9995):
super(WeightNormConvTranspose2d, self).__init__(
in_channels, out_channels,
kernel_size, stride,
padding, output_padding,
groups)
# in_channels, out_channels, *kernel_size
self.V = self.weight
self.g = Parameter(torch.Tensor(out_channels))
self.b = self.bias
self.register_buffer('V_avg', torch.zeros(self.V.size()))
self.register_buffer('g_avg', torch.zeros(out_channels))
self.register_buffer('b_avg', torch.zeros(out_channels))
self.init_scale = init_scale
self.polyak_decay = polyak_decay
self.reset_parameters()
def reset_parameters(self):
return
def forward(self, x, init=False):
if init is True:
# in_channels, out_channels, *kernel_size
self.V.data.copy_(torch.randn(self.V.data.size()).type_as(
self.V.data) * 0.05)
V_norm = self.V.data / self.V.data.transpose(0, 1).contiguous() \
.view(self.out_channels, -1).norm(2, 1).view(
self.in_channels, self.out_channels,
*([1] * len(self.kernel_size))).expand_as(self.V.data)
x_init = F.conv_transpose2d(
x, Variable(V_norm), None, self.stride,
self.padding, self.output_padding, self.groups).data
# self.out_channels, 1
t_x_init = x_init.tranpose(0, 1).contiguous().view(
self.out_channels, -1)
# out_features
m_init, v_init = t_x_init.mean(1).squeeze(
1), t_x_init.var(1).squeeze(1)
# out_features
scale_init = self.init_scale / \
torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
scale_init_shape = scale_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
m_init_shape = m_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
x_init = scale_init_shape.expand_as(x_init)\
* (x_init - m_init_shape.expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return Variable(x_init)
else:
V, g, b = get_vars_maybe_avg(
self, ['V', 'g', 'b'], self.training,
polyak_decay=self.polyak_decay)
scalar = g / \
torch.norm(V.transpose(0, 1).contiguous().view(
self.out_channels, -1), 2, 1).squeeze(1)
W = scalar.view(self.in_channels, self.out_channels,
*([1] * (len(V.size()) - 2))).expand_as(V) * V
x = F.conv_transpose2d(x, W, b, self.stride,
self.padding, self.output_padding,
self.groups)
return x
| 9,574 | 39.231092 | 78 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/__init__.py | from onmt.modules.UtilClass import LayerNorm, Bottle, BottleLinear, \
BottleLayerNorm, BottleSoftmax, Elementwise
from onmt.modules.Gate import ContextGateFactory
from onmt.modules.GlobalAttention import GlobalAttention
from onmt.modules.ConvMultiStepAttention import ConvMultiStepAttention
from onmt.modules.ImageEncoder import ImageEncoder
from onmt.modules.CopyGenerator import CopyGenerator, CopyGeneratorLossCompute
from onmt.modules.StructuredAttention import MatrixTree
from onmt.modules.Transformer import TransformerEncoder, TransformerDecoder
from onmt.modules.Conv2Conv import CNNEncoder, CNNDecoder
from onmt.modules.MultiHeadedAttn import MultiHeadedAttention
from onmt.modules.StackedRNN import StackedLSTM, StackedGRU
from onmt.modules.Embeddings import Embeddings
from onmt.modules.WeightNorm import WeightNormConv2d
from onmt.modules.SRU import check_sru_requirement
can_use_sru = check_sru_requirement()
if can_use_sru:
from onmt.modules.SRU import SRU
# For flake8 compatibility.
__all__ = [GlobalAttention, ImageEncoder, CopyGenerator, MultiHeadedAttention,
LayerNorm, Bottle, BottleLinear, BottleLayerNorm, BottleSoftmax,
TransformerEncoder, TransformerDecoder, Embeddings, Elementwise,
MatrixTree, WeightNormConv2d, ConvMultiStepAttention,
CNNEncoder, CNNDecoder, StackedLSTM, StackedGRU, ContextGateFactory,
CopyGeneratorLossCompute]
if can_use_sru:
__all__.extend([SRU, check_sru_requirement])
| 1,489 | 45.5625 | 79 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/OpenNMT/onmt/modules/ImageEncoder.py | import torch.nn as nn
import torch.nn.functional as F
import torch
import torch.cuda
from torch.autograd import Variable
class ImageEncoder(nn.Module):
"""
Encoder recurrent neural network for Images.
"""
def __init__(self, num_layers, bidirectional, rnn_size, dropout):
"""
Args:
num_layers (int): number of encoder layers.
bidirectional (bool): bidirectional encoder.
rnn_size (int): size of hidden states of the rnn.
dropout (float): dropout probablity.
"""
super(ImageEncoder, self).__init__()
self.num_layers = num_layers
self.num_directions = 2 if bidirectional else 1
self.hidden_size = rnn_size
self.layer1 = nn.Conv2d(3, 64, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer2 = nn.Conv2d(64, 128, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer3 = nn.Conv2d(128, 256, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer4 = nn.Conv2d(256, 256, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer5 = nn.Conv2d(256, 512, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer6 = nn.Conv2d(512, 512, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.batch_norm1 = nn.BatchNorm2d(256)
self.batch_norm2 = nn.BatchNorm2d(512)
self.batch_norm3 = nn.BatchNorm2d(512)
input_size = 512
self.rnn = nn.LSTM(input_size, rnn_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional)
self.pos_lut = nn.Embedding(1000, input_size)
def load_pretrained_vectors(self, opt):
# Pass in needed options only when modify function definition.
pass
def forward(self, input, lengths=None):
batchSize = input.size(0)
# (batch_size, 64, imgH, imgW)
# layer 1
input = F.relu(self.layer1(input[:, :, :, :]-0.5), True)
# (batch_size, 64, imgH/2, imgW/2)
input = F.max_pool2d(input, kernel_size=(2, 2), stride=(2, 2))
# (batch_size, 128, imgH/2, imgW/2)
# layer 2
input = F.relu(self.layer2(input), True)
# (batch_size, 128, imgH/2/2, imgW/2/2)
input = F.max_pool2d(input, kernel_size=(2, 2), stride=(2, 2))
# (batch_size, 256, imgH/2/2, imgW/2/2)
# layer 3
# batch norm 1
input = F.relu(self.batch_norm1(self.layer3(input)), True)
# (batch_size, 256, imgH/2/2, imgW/2/2)
# layer4
input = F.relu(self.layer4(input), True)
# (batch_size, 256, imgH/2/2/2, imgW/2/2)
input = F.max_pool2d(input, kernel_size=(1, 2), stride=(1, 2))
# (batch_size, 512, imgH/2/2/2, imgW/2/2)
# layer 5
# batch norm 2
input = F.relu(self.batch_norm2(self.layer5(input)), True)
# (batch_size, 512, imgH/2/2/2, imgW/2/2/2)
input = F.max_pool2d(input, kernel_size=(2, 1), stride=(2, 1))
# (batch_size, 512, imgH/2/2/2, imgW/2/2/2)
input = F.relu(self.batch_norm3(self.layer6(input)), True)
# # (batch_size, 512, H, W)
# # (batch_size, H, W, 512)
all_outputs = []
for row in range(input.size(2)):
inp = input[:, :, row, :].transpose(0, 2)\
.transpose(1, 2)
pos_emb = self.pos_lut(
Variable(torch.cuda.LongTensor(batchSize).fill_(row)))
with_pos = torch.cat(
(pos_emb.view(1, pos_emb.size(0), pos_emb.size(1)), inp), 0)
outputs, hidden_t = self.rnn(with_pos)
all_outputs.append(outputs)
out = torch.cat(all_outputs, 0)
return hidden_t, out
| 3,998 | 36.373832 | 76 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/__init__.py | 0 | 0 | 0 | py |
|
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/Cluster/clustering.py | # coding:utf-8
"""
main code for clustering
define clustering class
and running this code to get clustering for stanford data
"""
import json
from source.AuxiliaryTools.nlp_tool import low_case_tokenizer
all_file = ['dev.json', 'test.json', 'train.json']
all_task = ["navigate", "schedule", "weather"]
class Cluster:
def __init__(self, input_dir, result_dir):
print('This version of cluster is made for pseudo-labeled slot\intent data')
self.input_dir = input_dir
self.result_dir = result_dir
# Tips:
# you can't just assign {} as values when build dict with dict.fromkeys()
# because {}' one 1 is viewed as 1 address, result in that all {} in fact point to same memory
# "for" is only secure unless you use map:
# all_data = dict(zip(all_task, map(lambda x:{},[None] * len(all_task))))
self.all_data_item = dict.fromkeys(all_task) # store all data item
# Two dict for temp refill
# 1. Template query dictionary
# On the cluster directory, the key is the template text, the values are sub-dictionary,
# whose key is the slot name in the template, the value is a list, and the content is appeared slot value.
# 2. full dictionary
# On the cluster directory, the key is slot name, the value is list, and the contents are all possible values
self.all_temp_dict = dict.fromkeys(all_task)
self.all_full_dict = dict.fromkeys(all_task)
for key in all_task:
# init store all data item
self.all_data_item[key] = []
# init template refill dict
self.all_temp_dict[key] = {}
# init full query dict
self.all_full_dict[key] = {}
def unpack_and_cook_raw_data(self, raw_data):
# empty the all_data_item pool for current data
for task in all_task:
self.all_data_item[task] = []
for dialogue in raw_data:
task = dialogue['scenario']['task']['intent']
uuid = dialogue['scenario']['uuid']
turn_lst = dialogue['dialogue']
# data extraction
for ind, turn in enumerate(turn_lst):
if turn['turn'] == "driver":
agent_reply = None
for remained_turn in turn_lst[ind:]: # to get next agent reply
if remained_turn['turn'] == "assistant":
agent_reply = remained_turn
break
if agent_reply and agent_reply['data']['slots']:
common_slot = '_'.join(sorted(agent_reply['data']['slots'].keys()))
agent_say = agent_reply['data']['utterance']
else: # cast to blank category
agent_say = None
common_slot = 'no_slot'
ori_pair = [turn, agent_reply]
user_say = turn['data']['utterance']
data_item = {
'ori_pair': ori_pair,
'user_say': user_say,
'user_temp': '',
'agent_say': agent_say,
'agent_temp': '',
'uuid': uuid
}
data_item = self.entity_replace(data_item)
self.all_data_item[task].append([common_slot, data_item])
# fill the dict for temp refilling
self.update_dict(task, data_item)
def cluster_by_slot(self, target_file, split_rate_lst):
print('Start %s clustering by slot' % target_file)
data_label = target_file.replace('.json', '')
raw_data = self.load_data(self.input_dir + target_file)
self.unpack_and_cook_raw_data(raw_data)
# cluster and output results
for task in all_task:
data_item_set_lst = [] # store different size of data item set
if split_rate_lst and 'train' in data_label:
for split_rate in split_rate_lst:
end_index = int(len(self.all_data_item[task]) * split_rate)
data_item_set_lst.append(self.all_data_item[task][:end_index])
else:
data_item_set_lst = [self.all_data_item[task]]
split_rate_lst = [1.0]
for ind, data_item_set in enumerate(data_item_set_lst):
# clustering data here
clustered_data = {}
for common_slot, data_item in data_item_set:
if common_slot in clustered_data:
clustered_data[common_slot].append(data_item)
else:
clustered_data[common_slot] = [data_item]
with open(self.result_dir + data_label + '_' + task + str(split_rate_lst[ind]) + '.json', 'w') as writer:
json.dump(clustered_data, writer, indent=2)
def update_dict(self, task, data_item):
agent_reply = data_item['ori_pair'][1]
if agent_reply:
user_temp = ' '.join(low_case_tokenizer(data_item['user_temp']))
if user_temp not in self.all_temp_dict[task]:
self.all_temp_dict[task][user_temp] = {}
for (slot_name, slot_value) in agent_reply['data']['slots'].items():
slot_value = slot_value.lower().strip()
slot_name = slot_name.lower().strip()
# update temp query dict
if slot_name in self.all_temp_dict[task][user_temp]:
self.all_temp_dict[task][user_temp][slot_name].append(slot_value)
else:
self.all_temp_dict[task][user_temp][slot_name] = [slot_value]
# update full dict
if slot_name in self.all_full_dict[task]:
self.all_full_dict[task][slot_name].append(slot_value)
else:
self.all_full_dict[task][slot_name] = [slot_value]
else:
# print('no reply')
pass
return data_item
def dump_dict(self):
for task in all_task:
with open(self.result_dir + task + '_temp-query.dict', 'w') as writer:
json.dump(self.all_temp_dict[task], writer, indent=2)
with open(self.result_dir + task + '_full-query.dict', 'w') as writer:
json.dump(self.all_full_dict[task], writer, indent=2)
@staticmethod
def load_data(target_path):
with open(target_path, 'r') as reader:
json_data = json.load(reader)
return json_data
@staticmethod
def entity_replace(data_item):
agent_reply = data_item['ori_pair'][1]
if agent_reply:
data_item['user_temp'] = data_item['user_say'].lower()
data_item['agent_temp'] = data_item['agent_say'].lower() if data_item['agent_say'] else ''
for (slot_name, slot_value) in agent_reply['data']['slots'].items():
slot_value = slot_value.lower().strip()
slot_name = slot_name.lower().strip()
data_item['user_temp'] = data_item['user_temp'].replace(slot_value, '<' + slot_name + '>')
data_item['agent_temp'] = data_item['agent_temp'].replace(slot_value, slot_name)
else:
# print('no reply')
pass
return data_item
def slot_clustering_and_dump_dict(config=None, train_set_split_rate_lst=None):
print('user utterance clustering')
if not config:
with open('../../config.json', 'r') as con_f:
config = json.load(con_f)
tmp_cluster = Cluster(
input_dir=config['path']['RawData']['stanford'],
result_dir=config['path']['ClusteringResult']
)
for f in all_file:
tmp_cluster.cluster_by_slot(f, train_set_split_rate_lst)
debug_n = 0
for v in tmp_cluster.all_data_item.values():
debug_n += len(v)
print("debug!:", debug_n)
tmp_cluster.dump_dict()
| 8,064 | 43.558011 | 121 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/Cluster/conll_format_clustering.py | # coding:utf-8
"""
main code for clustering
define clustering class
and running this code to get clustering for stanford data
"""
import json
import os
from source.AuxiliaryTools.nlp_tool import low_case_tokenizer
CONTEXT_WINDOW_SIZE = 2 # 2 is used because it is empirical feature setting in slot filling task
SENT_COUNT_SPLIT = False
def debug_all_data_print(d):
for dm in d:
print('========== domain: %s ===========' % dm)
print(d[dm][:10])
class Cluster:
def __init__(self, input_dir, result_dir, all_domain, cluster_mode='all', special_mark=''):
self.input_dir = input_dir
self.result_dir = result_dir
self.all_domain = all_domain
self.cluster_mode = cluster_mode
self.special_mark = special_mark
# Tips:
# you can't just assign {} as values when build dict with dict.fromkeys()
# because {}' one 1 is viewed as 1 address, result in that all {} in fact point to same memory
# "for" is only secure unless you use map:
# all_data = dict(zip(all_task, map(lambda x:{},[None] * len(all_task))))
self.all_data_item = dict.fromkeys(all_domain) # store all data item
# ========= Prepare dict for temp refill ===============
# 1. Template query dictionary
# Saved in the cluster dir, keys of it are template texts,and the values of it are sub-dictionary
# Sub-dict's key is slot names appeared in template, the value is list of appeared slot values.
# 2. full query dictionary
# Saved in cluster directory, the key is slot name, the value is list.
# And the list contents are all possible slot values
# 3. train full query dictionary
# A list of dicts for different split rate se same to full query dictionary, but it only count train set appearance
# 4. all context dictionary
# Evolution of template query dict, record all slot's context format as below:
# context_dict = {
# slot-name1: {
# 3_word_context_text1 :set(value1, value2, value3)
# }
# }
# implemented below:
self.all_temp_dict = dict.fromkeys(all_domain)
self.all_full_dict = dict.fromkeys(all_domain)
self.train_full_dict = dict.fromkeys(all_domain)
self.all_context_dict = dict.fromkeys(all_domain)
for key in all_domain:
# init store all data item
self.all_data_item[key] = []
# init template refill dict
self.all_temp_dict[key] = {}
# init full query dict
self.all_full_dict[key] = {}
# init full query dict
self.train_full_dict[key] = {}
# init full query dict
self.all_context_dict[key] = {}
def update_dict(self, domain_name, data_item, data_label, split_rate):
user_temp = data_item['user_temp']
slot_name_lst = data_item['slot_name_lst']
slot_value_lst = data_item['slot_value_lst']
context_lst = data_item['context_lst']
# ===== init the all_context_dict for current split_rate =====
if split_rate not in self.all_context_dict[domain_name]:
self.all_context_dict[domain_name][split_rate] = {}
# ===== init the train_full_dict for current split_rate =====
if split_rate not in self.train_full_dict[domain_name]:
self.train_full_dict[domain_name][split_rate] = {}
# ==== start to update =======
for slot_name, slot_value, context in zip(slot_name_lst, slot_value_lst, context_lst):
slot_value_str = ' '.join(slot_value)
context_str = ' '.join(context)
if slot_name:
if data_label == 'train':
# ========== update context_dict =========
if slot_name not in self.all_context_dict[domain_name][split_rate]:
self.all_context_dict[domain_name][split_rate][slot_name] = {}
if context_str in self.all_context_dict[domain_name][split_rate][slot_name]:
self.all_context_dict[domain_name][split_rate][slot_name][context_str].add(slot_value_str)
else:
self.all_context_dict[domain_name][split_rate][slot_name][context_str] = set()
self.all_context_dict[domain_name][split_rate][slot_name][context_str].add(slot_value_str)
# ========== update all_temp_dict =========
if user_temp not in self.all_temp_dict[domain_name]:
self.all_temp_dict[domain_name][user_temp] = {}
if slot_name in self.all_temp_dict[domain_name][user_temp]:
self.all_temp_dict[domain_name][user_temp][slot_name].add(slot_value_str)
else:
self.all_temp_dict[domain_name][user_temp][slot_name] = set()
self.all_temp_dict[domain_name][user_temp][slot_name].add(slot_value_str)
# ========== update train_full_dict = =========
if slot_name not in self.train_full_dict[domain_name][split_rate]:
self.train_full_dict[domain_name][split_rate][slot_name] = set()
self.train_full_dict[domain_name][split_rate][slot_name].add(slot_value_str)
else:
self.train_full_dict[domain_name][split_rate][slot_name].add(slot_value_str)
# if slot_name in self.train_full_dict[domain_name]:
# self.train_full_dict[domain_name][slot_name].add(slot_value_str)
# else:
# self.train_full_dict[domain_name][slot_name] = set()
# self.train_full_dict[domain_name][slot_name].add(slot_value_str)
# ========= update all_full_dict ==========
if slot_name in self.all_full_dict[domain_name]:
self.all_full_dict[domain_name][slot_name].add(slot_value_str)
else:
self.all_full_dict[domain_name][slot_name] = set()
self.all_full_dict[domain_name][slot_name].add(slot_value_str)
elif data_label in ['dev', 'test']: # record slot name-value case in dev and test set
# ========= update all_full_dict ==========
if slot_name in self.all_full_dict[domain_name]:
self.all_full_dict[domain_name][slot_name].add(slot_value_str)
else:
self.all_full_dict[domain_name][slot_name] = set()
self.all_full_dict[domain_name][slot_name].add(slot_value_str)
else:
print('Error: wrong data label', data_label)
raise RuntimeError
def unpack_and_cook_raw_data(self, raw_data, domain_name):
# empty the all_data_item pool for current data
for dm in self.all_domain:
self.all_data_item[dm] = []
for dialogue in raw_data:
word_label_pair_lst = dialogue.split('\n')
# Store info for a dialogue
all_user_word_lst = []
all_slot_label_lst = []
all_intent_lst = []
# Store info for one sentence of a dialogue
user_word_lst = []
slot_label_lst = []
intent_lst = []
for pair in word_label_pair_lst:
word, label = pair.split()
word = word.lower()
if word in ['intent1', 'intent2', 'intent3', ]:
if label != 'O':
intent_lst.append(label)
elif word in ['intent4', 'intent5', 'intent6']: # deal with special case
all_intent_lst[-1].append(label)
else:
user_word_lst.append(word)
slot_label_lst.append(label)
# check sentence end
if word in ['intent3', ]:
all_user_word_lst.append(user_word_lst)
all_slot_label_lst.append(slot_label_lst)
all_intent_lst.append(intent_lst)
user_word_lst = []
slot_label_lst = []
intent_lst = []
# ======= collecting remained sentences and adapt to ATIS data =======
all_user_word_lst.append(user_word_lst)
all_slot_label_lst.append(slot_label_lst)
all_intent_lst.append(intent_lst)
for user_word_lst, slot_label_lst, intent_lst in zip(all_user_word_lst, all_slot_label_lst, all_intent_lst):
data_item = {
'user_say': ' '.join(user_word_lst),
'user_word_lst': user_word_lst,
'user_temp': '',
'user_temp_word_lst': [],
'label_lst': slot_label_lst, # A list of name-word, '' represent for empty element
'intent_lst': intent_lst, # A list of name-word, '' represent for empty element
'slot_name_lst': [], # A list of name-word, '' represent for empty element
'slot_value_lst': [], # A list of value-words' list, [''] represent for empty element
'context_lst': [] # A list of context-words; list, [''] represent for empty element
}
data_item = self.entity_replace(data_item)
self.all_data_item[domain_name].append(data_item)
def clustering(self, target_file, split_rate_lst, cluster_mode=None):
cluster_mode = cluster_mode if cluster_mode else self.cluster_mode
# processing data
domain_name = target_file.split('_')[0] + '_' + self.special_mark # eg: weather_labeled
data_label = target_file.split('_')[1] # eg: dev, train, test
raw_data = self.load_data(self.input_dir + target_file)
# print('debug!!!', len(self.all_data_item[domain_name]), target_file)
self.unpack_and_cook_raw_data(raw_data, domain_name)
print('debug!!!', len(self.all_data_item[domain_name]), target_file)
# ======= split the data to smaller parts ========
data_item_set_lst = [] # store different size of data_item set
if split_rate_lst and 'train' in data_label:
for split_rate in split_rate_lst:
if SENT_COUNT_SPLIT:
end_index = split_rate
else:
end_index = int(len(self.all_data_item[domain_name]) * split_rate)
# print('SR and Sentence Count:', split_rate, end_index, domain_name)
data_item_set_lst.append(self.all_data_item[domain_name][:end_index])
else:
data_item_set_lst = [self.all_data_item[domain_name]]
split_rate_lst = [1]
# ======= fill the dict for temp refilling =========
for ind, data_item_set in enumerate(data_item_set_lst):
for data_item in data_item_set:
self.update_dict(domain_name, data_item, data_label, split_rate_lst[ind])
# ======= start clustering with different algorithm =========
for ind, data_item_set in enumerate(data_item_set_lst):
print('Start %s clustering by %s on split rate of %f' % (target_file, cluster_mode, split_rate_lst[ind]))
if cluster_mode == 'all' or cluster_mode == 'slot':
self.cluster_by_slot(domain_name, data_item_set, data_label, split_rate_lst[ind])
if cluster_mode == 'all' or cluster_mode == 'intent':
self.cluster_by_intent(domain_name, data_item_set, data_label, split_rate_lst[ind])
if cluster_mode == 'all' or cluster_mode == 'slot-intent':
self.cluster_by_intent_and_slot(domain_name, data_item_set, data_label, split_rate_lst[ind])
if cluster_mode == 'no_clustering':
self.no_clustering(domain_name, data_item_set, data_label, split_rate_lst[ind])
def cluster_by_intent(self, domain_name, data_item_set, data_label, split_rate):
# cluster and output results
clustered_data = {} # clustering data here
for data_item in data_item_set:
common_intent = '-'.join(sorted(set(data_item['intent_lst'])))
if common_intent in clustered_data:
clustered_data[common_intent].append(data_item)
else:
clustered_data[common_intent] = [data_item]
with open(self.result_dir + data_label + '_' + domain_name + '_' + 'intent' + str(split_rate) + '.json', 'w') as writer:
json.dump(clustered_data, writer, indent=2)
def cluster_by_slot(self, domain_name, data_item_set, data_label, split_rate):
# cluster and output results
clustered_data = {} # clustering data here
for data_item in data_item_set:
# print('=====================', data_item_set)
common_slot = '-'.join(sorted(set(data_item['slot_name_lst'])))
if common_slot in clustered_data:
clustered_data[common_slot].append(data_item)
else:
clustered_data[common_slot] = [data_item]
with open(self.result_dir + data_label + '_' + domain_name + '_' + 'slot' + str(split_rate) + '.json', 'w') as writer:
json.dump(clustered_data, writer, indent=2)
def cluster_by_intent_and_slot(self, domain_name, data_item_set, data_label, split_rate):
# cluster and output results
clustered_data = {} # clustering data here
for data_item in data_item_set:
common_intent_and_slot = '-'.join(sorted(set(data_item['slot_name_lst'] + data_item['intent_lst'])))
if common_intent_and_slot in clustered_data:
clustered_data[common_intent_and_slot].append(data_item)
else:
clustered_data[common_intent_and_slot] = [data_item]
with open(self.result_dir + data_label + '_' + domain_name + '_' + 'intent-slot' + str(split_rate) + '.json', 'w') as writer:
json.dump(clustered_data, writer, indent=2)
def no_clustering(self, domain_name, data_item_set, data_label, split_rate):
# don't cluster and output results
clustered_data = {} # clustering data here
for data_item in data_item_set:
common_value = ''
if common_value in clustered_data:
clustered_data[common_value].append(data_item)
else:
clustered_data[common_value] = [data_item]
with open(self.result_dir + data_label + '_' + domain_name + '_' + 'nc' + str(split_rate) + '.json',
'w') as writer:
json.dump(clustered_data, writer, indent=2)
def dump_dict(self, split_rate_lst):
self.deep_change_set_to_list_dict(self.all_context_dict)
self.deep_change_set_to_list_dict(self.all_temp_dict)
self.deep_change_set_to_list_dict(self.all_full_dict)
self.deep_change_set_to_list_dict(self.train_full_dict)
for domain_name in self.all_domain:
for split_rate in split_rate_lst:
with open(self.result_dir + domain_name + str(split_rate) + '_context-query.dict', 'w') as writer:
json.dump(self.all_context_dict[domain_name][split_rate], writer, indent=2)
with open(self.result_dir + domain_name + str(split_rate) + '_train_full-query.dict', 'w') as writer:
json.dump(self.train_full_dict[domain_name][split_rate], writer, indent=2)
with open(self.result_dir + domain_name + '_temp-query.dict', 'w') as writer:
json.dump(self.all_temp_dict[domain_name], writer, indent=2)
with open(self.result_dir + domain_name + '_full-query.dict', 'w') as writer:
json.dump(self.all_full_dict[domain_name], writer, indent=2)
# with open(self.result_dir + domain_name + '_train_full-query.dict', 'w') as writer:
# json.dump(self.train_full_dict[domain_name], writer, indent=2)
def deep_change_set_to_list_dict(self, d):
for key in d:
if type(d[key]) == dict:
self.deep_change_set_to_list_dict(d[key])
elif type(d[key]) == set:
d[key] = list(d[key])
@staticmethod
def load_data(target_path):
with open(target_path, 'r') as reader:
return reader.read().strip().split('\n\n')
@staticmethod
def entity_replace(data_item):
"""
Replace slot value with slot name in the template.
Notice:
Element lists, including slot_name, slot_value and context_lst, are aligned with template word list.
And '' is used as padding element.
:param data_item: Dict type, saving different property
:return:
"""
word_lst = data_item['user_word_lst']
label_lst = data_item['label_lst']
temp_word_lst = []
slot_name_lst = []
slot_value_lst = []
context_lst = []
for w, l in zip(word_lst, label_lst):
if l == 'O':
temp_word_lst.append(w)
slot_name_lst.append('')
slot_value_lst.append([''])
elif 'B-' in l:
slot_name = l.replace('B-', '')
temp_word_lst.append('<%s>' % slot_name)
slot_name_lst.append(slot_name)
slot_value_lst.append([w])
elif 'I-' in l:
slot_value_lst[-1].append(w)
for ind, pair in enumerate(zip(temp_word_lst, slot_name_lst, slot_value_lst)):
temp_w, slot_name, slot_value = pair
if slot_name:
context_text = temp_word_lst[max(ind - CONTEXT_WINDOW_SIZE, 0): ind + CONTEXT_WINDOW_SIZE + 1]
else:
context_text = ['']
context_lst.append(context_text)
data_item['user_temp_word_lst'] = temp_word_lst
data_item['user_temp'] = ' '.join(temp_word_lst)
data_item['slot_name_lst'] = slot_name_lst
data_item['slot_value_lst'] = slot_value_lst
data_item['context_lst'] = context_lst
return data_item
def clustering_and_dump_dict(data_dir, config=None, cluster_mode='all', train_set_split_rate_lst=None, special_mark='labeled'):
print('user utterance clustering')
if not config:
with open('../../config.json', 'r') as con_f:
config = json.load(con_f)
all_file = os.listdir(data_dir)
# =========== collect domain name ==========
all_domain = set()
print(all_file)
for file_name in all_file:
all_domain.add(str(file_name.split('_')[0]) + '_' + special_mark)
tmp_cluster = Cluster(
input_dir=data_dir,
result_dir=config['path']['ClusteringResult'],
all_domain=all_domain,
cluster_mode=cluster_mode,
special_mark=special_mark,
)
for f in all_file:
tmp_cluster.clustering(f, train_set_split_rate_lst)
tmp_cluster.dump_dict(train_set_split_rate_lst)
| 19,236 | 48.836788 | 133 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/Cluster/__init__.py | 0 | 0 | 0 | py |
|
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/Cluster/atis_clustering.py | # coding:utf-8
from source.Cluster.clustering import Cluster
# wait to construct
class AtisCluster(Cluster):
def __init__(self, input_dir, output_dir):
Cluster.__init__(self, input_dir, output_dir)
def unpack_and_cook_raw_data(self, raw_data):
pass
if __name__ == "__main__":
print("Hi, there!!")
| 330 | 19.6875 | 53 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/Generation/seq2seq_translation_tutorial.py | # -*- coding: utf-8 -*-
"""
Translation with a Sequence to Sequence Network and Attention
*************************************************************
**Author**: `Sean Robertson <https://github.com/spro/practical-pytorch>`_
In this project we will be teaching a neural network to translate from
French to English.
::
[KEY: > input, = target, < output]
> il est en train de peindre un tableau .
= he is painting a picture .
< he is painting a picture .
> pourquoi ne pas essayer ce vin delicieux ?
= why not try that delicious wine ?
< why not try that delicious wine ?
> elle n est pas poete mais romanciere .
= she is not a poet but a novelist .
< she not not a poet but a novelist .
> vous etes trop maigre .
= you re too skinny .
< you re all alone .
... to varying degrees of success.
This is made possible by the simple but powerful idea of the `sequence
to sequence network <http://arxiv.org/abs/1409.3215>`__, in which two
recurrent neural networks work together to transform one sequence to
another. An encoder network condenses an input sequence into a vector,
and a decoder network unfolds that vector into a new sequence.
.. figure:: /_static/img/seq-seq-images/seq2seq.png
:alt:
To improve upon this model we'll use an `attention
mechanism <https://arxiv.org/abs/1409.0473>`__, which lets the decoder
learn to focus over a specific range of the input sequence.
**Recommended Reading:**
I assume you have at least installed PyTorch, know Python, and
understand Tensors:
- http://pytorch.org/ For installation instructions
- :doc:`/beginner/deep_learning_60min_blitz` to get started with PyTorch in general
- :doc:`/beginner/pytorch_with_examples` for a wide and deep overview
- :doc:`/beginner/former_torchies_tutorial` if you are former Lua Torch user
It would also be useful to know about Sequence to Sequence networks and
how they work:
- `Learning Phrase Representations using RNN Encoder-Decoder for
Statistical Machine Translation <http://arxiv.org/abs/1406.1078>`__
- `Sequence to Sequence Learning with Neural
Networks <http://arxiv.org/abs/1409.3215>`__
- `Neural Machine Translation by Jointly Learning to Align and
Translate <https://arxiv.org/abs/1409.0473>`__
- `A Neural Conversational Model <http://arxiv.org/abs/1506.05869>`__
You will also find the previous tutorials on
:doc:`/intermediate/char_rnn_classification_tutorial`
and :doc:`/intermediate/char_rnn_generation_tutorial`
helpful as those concepts are very similar to the Encoder and Decoder
models, respectively.
And for more, read the papers that introduced these topics:
- `Learning Phrase Representations using RNN Encoder-Decoder for
Statistical Machine Translation <http://arxiv.org/abs/1406.1078>`__
- `Sequence to Sequence Learning with Neural
Networks <http://arxiv.org/abs/1409.3215>`__
- `Neural Machine Translation by Jointly Learning to Align and
Translate <https://arxiv.org/abs/1409.0473>`__
- `A Neural Conversational Model <http://arxiv.org/abs/1506.05869>`__
**Requirements**
"""
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
use_cuda = torch.cuda.is_available()
######################################################################
# Loading data files
# ==================
#
# The data for this project is a set of many thousands of English to
# French translation pairs.
#
# `This question on Open Data Stack
# Exchange <http://opendata.stackexchange.com/questions/3888/dataset-of-sentences-translated-into-many-languages>`__
# pointed me to the open translation site http://tatoeba.org/ which has
# downloads available at http://tatoeba.org/eng/downloads - and better
# yet, someone did the extra work of splitting language pairs into
# individual text files here: http://www.manythings.org/anki/
#
# The English to French pairs are too big to include in the repo, so
# download to ``data/eng-fra.txt`` before continuing. The file is a tab
# separated list of translation pairs:
#
# ::
#
# I am cold. Je suis froid.
#
# .. Note::
# Download the data from
# `here <https://download.pytorch.org/tutorial/data.zip>`_
# and extract it to the current directory.
######################################################################
# Similar to the character encoding used in the character-level RNN
# tutorials, we will be representing each word in a language as a one-hot
# vector, or giant vector of zeros except for a single one (at the index
# of the word). Compared to the dozens of characters that might exist in a
# language, there are many many more words, so the encoding vector is much
# larger. We will however cheat a bit and trim the data to only use a few
# thousand words per language.
#
# .. figure:: /_static/img/seq-seq-images/word-encoding.png
# :alt:
#
#
######################################################################
# We'll need a unique index per word to use as the inputs and targets of
# the networks later. To keep track of all this we will use a helper class
# called ``Lang`` which has word → index (``word2index``) and index → word
# (``index2word``) dictionaries, as well as a count of each word
# ``word2count`` to use to later replace rare words.
#
SOS_token = 0
EOS_token = 1
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
######################################################################
# The files are all in Unicode, to simplify we will turn Unicode
# characters to ASCII, make everything lowercase, and trim most
# punctuation.
#
# Turn a Unicode string to plain ASCII, thanks to
# http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
######################################################################
# To read the data file we will split the file into lines, and then split
# lines into pairs. The files are all English → Other Language, so if we
# want to translate from Other Language → English I added the ``reverse``
# flag to reverse the pairs.
#
def readLangs(lang1, lang2, reverse=False):
print("Reading lines...")
# Read the file and split into lines
lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8').\
read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
# Reverse pairs, make Lang instances
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
######################################################################
# Since there are a *lot* of example sentences and we want to train
# something quickly, we'll trim the data set to only relatively short and
# simple sentences. Here the maximum length is 10 words (that includes
# ending punctuation) and we're filtering to sentences that translate to
# the form "I am" or "He is" etc. (accounting for apostrophes replaced
# earlier).
#
MAX_LENGTH = 10
eng_prefixes = (
"i am ", "i m ",
"he is", "he s ",
"she is", "she s",
"you are", "you re ",
"we are", "we re ",
"they are", "they re "
)
def filterPair(p):
return len(p[0].split(' ')) < MAX_LENGTH and \
len(p[1].split(' ')) < MAX_LENGTH and \
p[1].startswith(eng_prefixes)
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
######################################################################
# The full process for preparing the data is:
#
# - Read text file and split into lines, split lines into pairs
# - Normalize text, filter by length and content
# - Make word lists from sentences in pairs
#
def prepareData(lang1, lang2, reverse=False):
input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)
print("Read %s sentence pairs" % len(pairs))
pairs = filterPairs(pairs)
print("Trimmed to %s sentence pairs" % len(pairs))
print("Counting words...")
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
print("Counted words:")
print(input_lang.name, input_lang.n_words)
print(output_lang.name, output_lang.n_words)
return input_lang, output_lang, pairs
input_lang, output_lang, pairs = prepareData('eng', 'fra', True)
print(random.choice(pairs))
######################################################################
# The Seq2Seq Model
# =================
#
# A Recurrent Neural Network, or RNN, is a network that operates on a
# sequence and uses its own output as input for subsequent steps.
#
# A `Sequence to Sequence network <http://arxiv.org/abs/1409.3215>`__, or
# seq2seq network, or `Encoder Decoder
# network <https://arxiv.org/pdf/1406.1078v3.pdf>`__, is a model
# consisting of two RNNs called the encoder and decoder. The encoder reads
# an input sequence and outputs a single vector, and the decoder reads
# that vector to produce an output sequence.
#
# .. figure:: /_static/img/seq-seq-images/seq2seq.png
# :alt:
#
# Unlike sequence prediction with a single RNN, where every input
# corresponds to an output, the seq2seq model frees us from sequence
# length and order, which makes it ideal for translation between two
# languages.
#
# Consider the sentence "Je ne suis pas le chat noir" → "I am not the
# black cat". Most of the words in the input sentence have a direct
# translation in the output sentence, but are in slightly different
# orders, e.g. "chat noir" and "black cat". Because of the "ne/pas"
# construction there is also one more word in the input sentence. It would
# be difficult to produce a correct translation directly from the sequence
# of input words.
#
# With a seq2seq model the encoder creates a single vector which, in the
# ideal case, encodes the "meaning" of the input sequence into a single
# vector — a single point in some N dimensional space of sentences.
#
######################################################################
# The Encoder
# -----------
#
# The encoder of a seq2seq network is a RNN that outputs some value for
# every word from the input sentence. For every input word the encoder
# outputs a vector and a hidden state, and uses the hidden state for the
# next input word.
#
# .. figure:: /_static/img/seq-seq-images/encoder-network.png
# :alt:
#
#
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, n_layers=1):
super(EncoderRNN, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
def forward(self, input, hidden):
embedded = self.embedding(input).view(1, 1, -1)
output = embedded
for i in range(self.n_layers):
output, hidden = self.gru(output, hidden)
return output, hidden
def initHidden(self):
result = Variable(torch.zeros(1, 1, self.hidden_size))
if use_cuda:
return result.cuda()
else:
return result
######################################################################
# The Decoder
# -----------
#
# The decoder is another RNN that takes the encoder output vector(s) and
# outputs a sequence of words to create the translation.
#
######################################################################
# Simple Decoder
# ^^^^^^^^^^^^^^
#
# In the simplest seq2seq decoder we use only last output of the encoder.
# This last output is sometimes called the *context vector* as it encodes
# context from the entire sequence. This context vector is used as the
# initial hidden state of the decoder.
#
# At every step of decoding, the decoder is given an input token and
# hidden state. The initial input token is the start-of-string ``<SOS>``
# token, and the first hidden state is the context vector (the encoder's
# last hidden state).
#
# .. figure:: /_static/img/seq-seq-images/decoder-network.png
# :alt:
#
#
class DecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, n_layers=1):
super(DecoderRNN, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.embedding = nn.Embedding(output_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
self.softmax = nn.LogSoftmax()
def forward(self, input, hidden):
output = self.embedding(input).view(1, 1, -1)
for i in range(self.n_layers):
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = self.softmax(self.out(output[0]))
return output, hidden
def initHidden(self):
result = Variable(torch.zeros(1, 1, self.hidden_size))
if use_cuda:
return result.cuda()
else:
return result
######################################################################
# I encourage you to train and observe the results of this model, but to
# save space we'll be going straight for the gold and introducing the
# Attention Mechanism.
#
######################################################################
# Attention Decoder
# ^^^^^^^^^^^^^^^^^
#
# If only the context vector is passed betweeen the encoder and decoder,
# that single vector carries the burden of encoding the entire sentence.
#
# Attention allows the decoder network to "focus" on a different part of
# the encoder's outputs for every step of the decoder's own outputs. First
# we calculate a set of *attention weights*. These will be multiplied by
# the encoder output vectors to create a weighted combination. The result
# (called ``attn_applied`` in the code) should contain information about
# that specific part of the input sequence, and thus help the decoder
# choose the right output words.
#
# .. figure:: https://i.imgur.com/1152PYf.png
# :alt:
#
# Calculating the attention weights is done with another feed-forward
# layer ``attn``, using the decoder's input and hidden state as inputs.
# Because there are sentences of all sizes in the training data, to
# actually create and train this layer we have to choose a maximum
# sentence length (input length, for encoder outputs) that it can apply
# to. Sentences of the maximum length will use all the attention weights,
# while shorter sentences will only use the first few.
#
# .. figure:: /_static/img/seq-seq-images/attention-decoder-network.png
# :alt:
#
#
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, n_layers=1, dropout_p=0.1, max_length=MAX_LENGTH):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout_p = dropout_p
self.max_length = max_length
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = nn.GRU(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, hidden, encoder_output, encoder_outputs):
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
attn_weights = F.softmax(
self.attn(torch.cat((embedded[0], hidden[0]), 1)))
attn_applied = torch.bmm(attn_weights.unsqueeze(0),
encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
for i in range(self.n_layers):
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = F.log_softmax(self.out(output[0]))
return output, hidden, attn_weights
def initHidden(self):
result = Variable(torch.zeros(1, 1, self.hidden_size))
if use_cuda:
return result.cuda()
else:
return result
######################################################################
# .. note:: There are other forms of attention that work around the length
# limitation by using a relative position approach. Read about "local
# attention" in `Effective Approaches to Attention-based Neural Machine
# Translation <https://arxiv.org/abs/1508.04025>`__.
#
# Training
# ========
#
# Preparing Training Data
# -----------------------
#
# To train, for each pair we will need an input tensor (indexes of the
# words in the input sentence) and target tensor (indexes of the words in
# the target sentence). While creating these vectors we will append the
# EOS token to both sequences.
#
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def variableFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
result = Variable(torch.LongTensor(indexes).view(-1, 1))
if use_cuda:
return result.cuda()
else:
return result
def variablesFromPair(pair):
input_variable = variableFromSentence(input_lang, pair[0])
target_variable = variableFromSentence(output_lang, pair[1])
return (input_variable, target_variable)
######################################################################
# Training the Model
# ------------------
#
# To train we run the input sentence through the encoder, and keep track
# of every output and the latest hidden state. Then the decoder is given
# the ``<SOS>`` token as its first input, and the last hidden state of the
# encoder as its first hidden state.
#
# "Teacher forcing" is the concept of using the real target outputs as
# each next input, instead of using the decoder's guess as the next input.
# Using teacher forcing causes it to converge faster but `when the trained
# network is exploited, it may exhibit
# instability <http://minds.jacobs-university.de/sites/default/files/uploads/papers/ESNTutorialRev.pdf>`__.
#
# You can observe outputs of teacher-forced networks that read with
# coherent grammar but wander far from the correct translation -
# intuitively it has learned to represent the output grammar and can "pick
# up" the meaning once the teacher tells it the first few words, but it
# has not properly learned how to create the sentence from the translation
# in the first place.
#
# Because of the freedom PyTorch's autograd gives us, we can randomly
# choose to use teacher forcing or not with a simple if statement. Turn
# ``teacher_forcing_ratio`` up to use more of it.
#
teacher_forcing_ratio = 0.5
def train(input_variable, target_variable, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):
encoder_hidden = encoder.initHidden()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_length = input_variable.size()[0]
target_length = target_variable.size()[0]
encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))
encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_variable[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0][0]
decoder_input = Variable(torch.LongTensor([[SOS_token]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
decoder_hidden = encoder_hidden
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
loss += criterion(decoder_output, target_variable[di])
decoder_input = target_variable[di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
loss += criterion(decoder_output, target_variable[di])
if ni == EOS_token:
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.data[0] / target_length
######################################################################
# This is a helper function to print time elapsed and estimated time
# remaining given the current time and progress %.
#
import time
import math
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
######################################################################
# The whole training process looks like this:
#
# - Start a timer
# - Initialize optimizers and criterion
# - Create set of training pairs
# - Start empty losses array for plotting
#
# Then we call ``train`` many times and occasionally print the progress (%
# of examples, time so far, estimated time) and average loss.
#
def trainIters(encoder, decoder, n_iters, print_every=1000, plot_every=100, learning_rate=0.01):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
training_pairs = [variablesFromPair(random.choice(pairs))
for i in range(n_iters)]
criterion = nn.NLLLoss()
for iter in range(1, n_iters + 1):
training_pair = training_pairs[iter - 1]
input_variable = training_pair[0]
target_variable = training_pair[1]
loss = train(input_variable, target_variable, encoder,
decoder, encoder_optimizer, decoder_optimizer, criterion)
print_loss_total += loss
plot_loss_total += loss
if iter % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),
iter, iter / n_iters * 100, print_loss_avg))
if iter % plot_every == 0:
plot_loss_avg = plot_loss_total / plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
showPlot(plot_losses)
######################################################################
# Plotting results
# ----------------
#
# Plotting is done with matplotlib, using the array of loss values
# ``plot_losses`` saved while training.
#
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
def showPlot(points):
plt.figure()
fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
######################################################################
# Evaluation
# ==========
#
# Evaluation is mostly the same as training, but there are no targets so
# we simply feed the decoder's predictions back to itself for each step.
# Every time it predicts a word we add it to the output string, and if it
# predicts the EOS token we stop there. We also store the decoder's
# attention outputs for display later.
#
def evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH):
input_variable = variableFromSentence(input_lang, sentence)
input_length = input_variable.size()[0]
encoder_hidden = encoder.initHidden()
encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))
encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_variable[ei],
encoder_hidden)
encoder_outputs[ei] = encoder_outputs[ei] + encoder_output[0][0]
decoder_input = Variable(torch.LongTensor([[SOS_token]])) # SOS
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
decoder_hidden = encoder_hidden
decoded_words = []
decoder_attentions = torch.zeros(max_length, max_length)
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
decoder_attentions[di] = decoder_attention.data
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
if ni == EOS_token:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(output_lang.index2word[ni])
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
return decoded_words, decoder_attentions[:di + 1]
######################################################################
# We can evaluate random sentences from the training set and print out the
# input, target, and output to make some subjective quality judgements:
#
def evaluateRandomly(encoder, decoder, n=10):
for i in range(n):
pair = random.choice(pairs)
print('>', pair[0])
print('=', pair[1])
output_words, attentions = evaluate(encoder, decoder, pair[0])
output_sentence = ' '.join(output_words)
print('<', output_sentence)
print('')
######################################################################
# Training and Evaluating
# =======================
#
# With all these helper functions in place (it looks like extra work, but
# it's easier to run multiple experiments easier) we can actually
# initialize a network and start training.
#
# Remember that the input sentences were heavily filtered. For this small
# dataset we can use relatively small networks of 256 hidden nodes and a
# single GRU layer. After about 40 minutes on a MacBook CPU we'll get some
# reasonable results.
#
# .. Note::
# If you run this notebook you can train, interrupt the kernel,
# evaluate, and continue training later. Comment out the lines where the
# encoder and decoder are initialized and run ``trainIters`` again.
#
hidden_size = 256
encoder1 = EncoderRNN(input_lang.n_words, hidden_size)
attn_decoder1 = AttnDecoderRNN(hidden_size, output_lang.n_words,
1, dropout_p=0.1)
if use_cuda:
encoder1 = encoder1.cuda()
attn_decoder1 = attn_decoder1.cuda()
trainIters(encoder1, attn_decoder1, 75000, print_every=5000)
######################################################################
#
evaluateRandomly(encoder1, attn_decoder1)
######################################################################
# Visualizing Attention
# ---------------------
#
# A useful property of the attention mechanism is its highly interpretable
# outputs. Because it is used to weight specific encoder outputs of the
# input sequence, we can imagine looking where the network is focused most
# at each time step.
#
# You could simply run ``plt.matshow(attentions)`` to see attention output
# displayed as a matrix, with the columns being input steps and rows being
# output steps:
#
output_words, attentions = evaluate(
encoder1, attn_decoder1, "je suis trop froid .")
plt.matshow(attentions.numpy())
######################################################################
# For a better viewing experience we will do the extra work of adding axes
# and labels:
#
def showAttention(input_sentence, output_words, attentions):
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.numpy(), cmap='bone')
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + input_sentence.split(' ') +
['<EOS>'], rotation=90)
ax.set_yticklabels([''] + output_words)
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def evaluateAndShowAttention(input_sentence):
output_words, attentions = evaluate(
encoder1, attn_decoder1, input_sentence)
print('input =', input_sentence)
print('output =', ' '.join(output_words))
showAttention(input_sentence, output_words, attentions)
evaluateAndShowAttention("elle a cinq ans de moins que moi .")
evaluateAndShowAttention("elle est trop petit .")
evaluateAndShowAttention("je ne crains pas de mourir .")
evaluateAndShowAttention("c est un jeune directeur plein de talent .")
######################################################################
# Exercises
# =========
#
# - Try with a different dataset
#
# - Another language pair
# - Human → Machine (e.g. IOT commands)
# - Chat → Response
# - Question → Answer
#
# - Replace the embeddings with pre-trained word embeddings such as word2vec or
# GloVe
# - Try with more layers, more hidden units, and more sentences. Compare
# the training time and results.
# - If you use a translation file where pairs have two of the same phrase
# (``I am test \t I am test``), you can use this as an autoencoder. Try
# this:
#
# - Train as an autoencoder
# - Save only the Encoder network
# - Train a new Decoder for translation from there
#
| 31,375 | 33.939866 | 133 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/Generation/PrepareData.py | # coding: utf-8
from source.AuxiliaryTools.nlp_tool import low_case_tokenizer
from itertools import combinations
import pickle
import json
import os
SOS_token = 0
EOS_token = 1
class WordTable:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def add_sentence(self, sentence):
for word in sentence:
self.add_word(word)
def add_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
def utterance_tokenize(clustered_items):
"""
tokenize the utterance with nlp_tool.py
:param clustered_items: List, dialogue pairs with same user intents
:return:
"""
for ind, item in enumerate(clustered_items):
clustered_items[ind]['user_temp'] = low_case_tokenizer(item['user_temp'])
clustered_items[ind]['agent_temp'] = low_case_tokenizer(item['agent_temp'])
return clustered_items
def utterance_filter(clustered_items):
"""
remove bad utterance and duplicate
:param clustered_items: List, dialogue pairs with same user intents
:return:
"""
ret = {}
for item in clustered_items:
user_temp = item['user_temp']
user_say = item['user_say']
if len(user_temp) <= 7 and ('thank' in user_temp or 'thanks' in user_temp):
# abandon 'mean less' utterance
continue
if (user_say not in ret) or (item['agent_say'] and len(ret[user_say]['agent_say']) < len(item['agent_say'])):
# when there is a duplicate, keep the one with higher reply quality
ret[user_say] = item
return ret.values()
def data_stat(all_data):
return []
def cluster_to_pairs(cluster_item):
"""
Construct source, target pairs from cluster
:param cluster_item: List, dialogue pairs with same user intents
:return:
"""
all_utterance = [item["user_temp"] for item in cluster_item]
all_combination = []
for comb in combinations(all_utterance, 2):
all_combination.append(comb) # use combination to avoid self to self pairs
all_combination.append(comb[::-1]) # to get reverse of it
return all_combination
def prepare_data(config):
"""
This prepare the data with following steps
Step1: Tokenizing
Step2: Filter the meaningless data
Step3: Remove duplicate
Step4: Build word table
Step5: Generate pair wise Target & Source
:param config: config data
:return:
"""
raw_data_file_lst = os.listdir(config['path']['ClusteringResult'])
for f in raw_data_file_lst:
with open(config['path']['ClusteringResult'] + f, 'r') as reader:
json_data = json.load(reader)
all_src_tgt_pairs = []
file_mark = f.replace('.json', '')
for key in json_data:
# print('======== debug ========', type(json_data), f)
tokenized_cluster = utterance_tokenize(json_data[key])
filtered_cluster = utterance_filter(tokenized_cluster)
all_src_tgt_pairs.extend(cluster_to_pairs(filtered_cluster))
# === build word table ===
current_word_table = WordTable(name=file_mark)
for pairs in all_src_tgt_pairs:
current_word_table.add_sentence(pairs[0])
# add the second sent is useless because of the reverse opt
# current_word_table.add_sentence(pairs[1])
# === Export to file, avoid execute everytime ===
output_dir = config['path']['GenerationResult']
with open(output_dir + file_mark + '_pairs.json', 'w') as writer:
json.dump(all_src_tgt_pairs, writer)
with open(output_dir + file_mark + '_word-table.pickle', 'wb') as writer:
pickle.dump(current_word_table, writer)
| 4,127 | 33.689076 | 117 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/Generation/Evaluation.py | # coding: utf-8
######################################################################
# Evaluation
# ==========
#
# Evaluation is mostly the same as training, but there are no targets so
# we simply feed the decoder's predictions back to itself for each step.
# Every time it predicts a word we add it to the output string, and if it
# predicts the EOS token we stop there. We also store the decoder's
# attention outputs for display later.
#
import random
import torch
from torch.autograd import Variable
from source.AuxiliaryTools.nn_tool import variable_from_sentence
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
SOS_token = 0
EOS_token = 1
teacher_forcing_ratio = 0.5
def evaluate(encoder, decoder, sentence, input_word_table, output_word_table, max_length, use_cuda):
input_variable = variable_from_sentence(input_word_table, sentence)
input_length = input_variable.size()[0]
encoder_hidden = encoder.initHidden()
encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))
encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_variable[ei],
encoder_hidden)
encoder_outputs[ei] = encoder_outputs[ei] + encoder_output[0][0]
decoder_input = Variable(torch.LongTensor([[SOS_token]])) # SOS
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
decoder_hidden = encoder_hidden
decoded_words = []
decoder_attentions = torch.zeros(max_length, max_length)
last_time_best = 0
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
decoder_attentions[di] = decoder_attention.data
topv, topi = decoder_output.data.topk(1)
print('wait to check the shape')
ni = topi[0][0]
if ni == EOS_token:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(output_word_table.index2word[ni])
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
return decoded_words, decoder_attentions[:di + 1]
######################################################################
# We can evaluate random sentences from the training set and print out the
# input, target, and output to make some subjective quality judgements:
#
def evaluate_randomly(all_pairs, encoder, decoder, input_word_table, output_word_table, max_length, use_cuda, n=10):
for i in range(n):
pair = random.choice(all_pairs)
print('>', pair[0])
print('=', pair[1])
output_words, attentions = evaluate(encoder, decoder, pair[0],
input_word_table, output_word_table, max_length, use_cuda)
output_sentence = ' '.join(output_words)
print('<', output_sentence)
print('')
######################################################################
# For a better viewing experience we will do the extra work of adding axes
# and labels:
#
def show_attention(input_sentence, output_words, attentions):
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.numpy(), cmap='bone')
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + input_sentence.split(' ') +
['<EOS>'], rotation=90)
ax.set_yticklabels([''] + output_words)
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def evaluate_and_show_attention(input_sentence, encoder, attn_decoder, input_word_table, output_word_table):
output_words, attentions = evaluate(encoder, attn_decoder, input_sentence, input_word_table, output_word_table)
print('input =', input_sentence)
print('output =', ' '.join(output_words))
show_attention(input_sentence, output_words, attentions)
| 4,187 | 34.794872 | 116 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/Generation/Seq2SeqModel.py | # coding: utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, use_cuda, n_layers=1):
super(EncoderRNN, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.use_cuda = use_cuda
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
def forward(self, input, hidden):
embedded = self.embedding(input).view(1, 1, -1)
output = embedded
for i in range(self.n_layers):
output, hidden = self.gru(output, hidden)
return output, hidden
def initHidden(self):
result = Variable(torch.zeros(1, 1, self.hidden_size))
if self.use_cuda:
return result.cuda()
else:
return result
class DecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, use_cuda, n_layers=1):
super(DecoderRNN, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.use_cuda = use_cuda
self.embedding = nn.Embedding(output_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
self.softmax = nn.LogSoftmax()
def forward(self, input, hidden):
output = self.embedding(input).view(1, 1, -1)
for i in range(self.n_layers):
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = self.softmax(self.out(output[0]))
return output, hidden
def initHidden(self):
result = Variable(torch.zeros(1, 1, self.hidden_size))
if self.use_cuda:
return result.cuda()
else:
return result
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, max_length, use_cuda, n_layers=1, dropout_p=0.1):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout_p = dropout_p
self.max_length = max_length
self.use_cuda = use_cuda
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = nn.GRU(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, hidden, encoder_output, encoder_outputs):
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
attn_weights = F.softmax(self.attn(torch.cat((embedded[0], hidden[0]), 1)))
# print('debug', embedded.size(), attn_weights.unsqueeze(0).size(), encoder_outputs.unsqueeze(0).size())
attn_applied = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
for i in range(self.n_layers):
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = F.log_softmax(self.out(output[0]))
return output, hidden, attn_weights
def initHidden(self):
result = Variable(torch.zeros(1, 1, self.hidden_size))
if self.use_cuda:
return result.cuda()
else:
return result
| 3,661 | 34.901961 | 112 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/Generation/Training.py | import time
import math
import random
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
from source.AuxiliaryTools.nn_tool import show_plot, variables_from_pair
SOS_token = 0
EOS_token = 1
teacher_forcing_ratio = 0.5
def train(input_variable, target_variable, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion,
max_length, use_cuda):
encoder_hidden = encoder.initHidden()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_length = input_variable.size()[0]
target_length = target_variable.size()[0]
encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))
encoder_outputs = encoder_outputs.cuda() if use_cuda else encoder_outputs
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_variable[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0][0]
decoder_input = Variable(torch.LongTensor([[SOS_token]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
decoder_hidden = encoder_hidden
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
loss += criterion(decoder_output, target_variable[di])
decoder_input = target_variable[di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_output, encoder_outputs)
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_input.cuda() if use_cuda else decoder_input
loss += criterion(decoder_output, target_variable[di])
if ni == EOS_token:
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.data[0] / target_length
def as_minutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def time_since(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (as_minutes(s), as_minutes(rs))
def train_iters(encoder, decoder, n_iters, pairs, input_word_table, output_word_table, max_length, use_cuda,
print_every=1000, plot_every=100, learning_rate=0.01):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
training_pairs = [variables_from_pair(random.choice(pairs), input_word_table, output_word_table)
for i in range(n_iters)]
criterion = nn.NLLLoss()
for iter in range(1, n_iters + 1):
training_pair = training_pairs[iter - 1]
input_variable = training_pair[0].cuda() if use_cuda else training_pair[0]
target_variable = training_pair[1].cuda() if use_cuda else training_pair[1]
loss = train(input_variable, target_variable, encoder,
decoder, encoder_optimizer, decoder_optimizer, criterion, max_length, use_cuda)
print_loss_total += loss
plot_loss_total += loss
if iter % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (time_since(start, iter / n_iters),
iter, iter / n_iters * 100, print_loss_avg))
if iter % plot_every == 0:
plot_loss_avg = plot_loss_total / plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
# show_plot(plot_losses)
| 4,278 | 33.788618 | 109 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/Generation/__init__.py | 0 | 0 | 0 | py |
|
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/ReFilling/re_filling.py | # coding:utf-8
import re
import json
from source.AuxiliaryTools.nlp_tool import sentence_edit_distance
import random
from multiprocessing import Process, Queue, current_process, freeze_support, Manager
import copy
N_THREAD = 20
TASK_SIZE = 500
CONTEXT_WINDOW_SIZE = 2
FULL_SLOT_TABLE_AVAILABLE = True
CONTEXT_REFILL_RATE = 0.5 # Refill slot by context match by this rate
DEBUG = False
def old_candidate_num(d):
ret = 0
for value in d.values():
ret += len(value)
return ret
def candidate_num(d):
return len(d)
def old_re_fill_sentences(lines, temp_query_dict, full_query_dict):
"""
Step1: exact match context(temp) in temp_query_dict, if matched, refilling randomly and go to step3
Step2: Find the most similar context(temp), and refilling
Step3: Refill the remained slot with full_query_dict
:param lines: list of tuple (template sentence, line_id)
:param temp_query_dict: A dict for query, key is temp and value is possible slot candidate
:param full_query_dict: A dic for query, contains all slot name - possible slot value pair
:return: sentence after surface realization
"""
res_lst = []
r1, r2, r3 = 0, 0, 0
for line, line_id in lines:
tmp_res = line
# Step1
if line in temp_query_dict:
for slot_name in temp_query_dict[line]:
select_slot_value = random.choice(temp_query_dict[line][slot_name])
tmp_res = tmp_res.replace('<' + slot_name + '>', select_slot_value)
r1 += 1
# Step2
else:
target_temp_word_lst = line.split()
most_similar_temp = ""
min_distance = len(target_temp_word_lst)
for temp in temp_query_dict:
current_distance = sentence_edit_distance(temp, target_temp_word_lst)
if current_distance < min_distance:
min_distance = current_distance
most_similar_temp = temp
elif current_distance == min_distance and most_similar_temp \
and old_candidate_num(temp_query_dict[temp]) > old_candidate_num(temp_query_dict[most_similar_temp]):
min_distance = current_distance
most_similar_temp = temp
# print('Debug', min_distance, most_similar_temp, "|||", target_temp_word_lst)
if most_similar_temp: # Fill with the slots within most similar temp
for slot_name in temp_query_dict[most_similar_temp]:
select_slot_value = random.choice(temp_query_dict[most_similar_temp][slot_name])
tmp_res = tmp_res.replace('<' + slot_name + '>', select_slot_value)
r2 += 1
# Step3
slot_name_lst = re.findall("<(.*?)>", tmp_res)
if slot_name_lst:
for slot_name in slot_name_lst:
select_slot_value = random.choice(full_query_dict[slot_name])
tmp_res = tmp_res.replace("<" + slot_name + ">", select_slot_value)
r3 += 1
# recheck
if re.findall("<.*?>", tmp_res):
print("Error; unfinished re-filling!", tmp_res, '2333333333')
res_lst.append((tmp_res, line_id))
return res_lst, r1, r2, r3
def extract_slot_context(word_lst):
"""
retuen a slot list,
:param word_lst:
:return:
"""
slot_lst = []
context_lst = []
for ind, word in enumerate(word_lst):
if '<' in word and '>':
slot_lst.append(word)
context_text_word_lst = word_lst[max(ind - CONTEXT_WINDOW_SIZE, 0): ind + CONTEXT_WINDOW_SIZE + 1]
context_lst.append(context_text_word_lst)
else:
slot_lst.append(None)
context_lst.append(None)
return slot_lst, context_lst
def expand_tmp_res_and_get_label_lst(tmp_res, slot_lst):
final_res = []
label_lst = []
for tmp_res_word, slot_word in zip(tmp_res, slot_lst):
if type(tmp_res_word) == str:
final_res.append(tmp_res_word)
label_lst.append('O')
elif type(tmp_res_word) == list:
slot_name = slot_word.replace('<', '').replace('>', '')
final_res.extend(tmp_res_word)
label_lst.append('B-' + slot_name)
for i_value in tmp_res_word[1:]:
label_lst.append('I-' + slot_name)
else:
raise TypeError
return final_res, label_lst
def re_fill_sentences(lines, context_query_dict, full_query_dict, refill_only, full_slot_table=FULL_SLOT_TABLE_AVAILABLE):
"""
Step1: exact match context(temp) in temp_query_dict, if matched, refilling randomly and go to step3
Step2: For each slot name find the most similar context(temp), and refilling
Step3: Refill the remained slot with full_query_dict
:param lines: list of tuple (template sentence, line_id)
:param context_query_dict: A dict for query, key is temp and value is possible slot candidate
:param full_query_dict: A dic for query, contains all slot name - possible slot value pair
:return: sentence after surface realization
"""
res_lst = []
r1, r2, r3 = 0, 0, 0
for line, line_id in lines:
debug_lst = []
if refill_only:
line = re.sub('<\d+>', '', line)
word_lst = line.split()
tmp_res = word_lst
slot_lst, context_lst = extract_slot_context(word_lst)
for ind in range(len(slot_lst)):
slot_word = slot_lst[ind]
if slot_word and slot_word != '<unk>':
context_text_word_lst = context_lst[ind]
slot_name = slot_word.replace('<', '').replace('>', '')
context_text = ' '.join(context_text_word_lst)
# judge weather is slot & randomly using other slot & judge weather have these slot name
if slot_name and random.random() <= CONTEXT_REFILL_RATE and slot_name in context_query_dict:
# Step1
if context_text in context_query_dict[slot_name]:
select_slot_value = random.choice(context_query_dict[slot_name][context_text])
# select_slot_value is str
tmp_res[ind] = select_slot_value.split()
debug_lst.append([1, slot_word, select_slot_value, tmp_res])
r1 += 1
# Step2
else:
most_similar_temp = ""
min_distance = len(context_text_word_lst)
for candidate_context in context_query_dict[slot_name]:
current_distance = sentence_edit_distance(candidate_context, context_text_word_lst)
if current_distance < min_distance:
min_distance = current_distance
most_similar_temp = candidate_context
# select the candidate with more possible values
elif current_distance == min_distance and most_similar_temp \
and candidate_num(context_query_dict[slot_name][candidate_context]) > \
candidate_num(context_query_dict[slot_name][most_similar_temp]):
min_distance = current_distance
most_similar_temp = candidate_context
if most_similar_temp: # Fill with the slots within most similar temp
select_slot_value = random.choice(context_query_dict[slot_name][most_similar_temp])
# select_slot_value is str
tmp_res[ind] = select_slot_value.split()
debug_lst.append([2, slot_word, select_slot_value, tmp_res])
r2 += 1
# Step3
tmp_res_copy = copy.deepcopy(tmp_res)
for ind, slot_word in enumerate(tmp_res_copy):
if '<' in slot_word and '>' in slot_word and type(slot_word) == str and slot_word != '<unk>':
slot_name = slot_word.replace('<', '').replace('>', '')
try:
select_slot_value = random.choice(full_query_dict[slot_name])
tmp_res[ind] = select_slot_value.split()
debug_lst.append([3, "<" + slot_name + ">", select_slot_value, tmp_res])
r3 += 1
except KeyError:
print('================Key Warning \nslot_lst:', slot_word, '\nline:', line, '\ntmp_res:', tmp_res, '\nslot_lst', slot_lst, '\ncontext_lst', context_lst)
# print('================\nslot_name_lst:', slot_name_lst, '\nline:', line, '\ntmp_res:', tmp_res, '\nslot_lst',
# slot_lst, '\ncontext_lst', context_lst)
# recheck
tmp_res_copy = copy.deepcopy(tmp_res)
for ind, x in enumerate(tmp_res_copy):
if '<' in x and '>' in x and x != '<unk>' and type(x) == str:
try: # to capture the weird <1> appear problem
int(x.replace('<', '').replace('>', ''))
tmp_res[ind] = '<unk>'
except ValueError:
print("Error; unfinished re-filling!", tmp_res, '!!!!!!!!!!!!!!!!!!!!!!')
# missed_set = list(filter(lambda x: '<' in x and '>' in x and x != '<unk>' and type(x) == str, tmp_res))
# if missed_set:
# print("Error; unfinished re-filling!", missed_set, tmp_res, '!!!!!!!!!!!!!!!!!!!!!!')
if DEBUG:
for db in debug_lst:
print(db)
final_res, label_lst = expand_tmp_res_and_get_label_lst(tmp_res, slot_lst)
res_lst.append((final_res, line_id, label_lst))
return res_lst, r1, r2, r3
def re_filling_thread(task_queue, done_queue):
for param in iter(task_queue.get, 'STOP'):
ret = re_fill_sentences(** param)
done_queue.put(ret)
def re_filling(config, task, target_file_name='navigate1_pred.txt', split_rate=1, slot_value_table='train', refill_only=False):
# print('?????', task, target_file_name, split_rate, slot_value_table, refill_only)
result_dir = config['path']['OnmtData'] + 'Result/'
if not refill_only:
input_dir = config['path']['OnmtData'] + 'Result/'
else:
input_dir = config['path']['OnmtData'] + 'SourceData/'
target_file_path = input_dir + target_file_name
result_file_path = result_dir + target_file_name.replace('.txt', '_refilled.txt')
for_conll_file_path = result_dir + target_file_name.replace('.txt', '_for-conll.json')
dict_dir = config['path']['ClusteringResult']
temp_query_path = dict_dir + task + "_temp-query.dict"
context_query_path = dict_dir + task + str(split_rate) + "_context-query.dict"
if slot_value_table == 'full':
full_query_path = dict_dir + task + "_full-query.dict"
# print('=!!!!!!!!!!!!! full_query_path', full_query_path)
elif slot_value_table == 'train':
full_query_path = dict_dir + task + str(split_rate) + "_train_full-query.dict"
else:
print('Error: Wrong setting for slot value table, only train and full are supported')
raise RuntimeError
all_results = []
# re-filling case statistic
all_r1, all_r2, all_r3 = 0, 0, 0
with open(temp_query_path, 'r') as temp_query_file, \
open(full_query_path) as full_query_file, \
open(context_query_path) as context_query_file:
temp_query_dict = json.load(temp_query_file)
full_query_dict = json.load(full_query_file)
context_query_dict = json.load(context_query_file)
print("stat re-filling for %s" % target_file_path)
# debug_line = 'give me directions to my <poi_type> food the <traffic_info> .'
# print(re_fill_sentence(debug_line.replace('\n', ''), temp_query_dict, full_query_dict))
task_queue, done_queue, task_n = Queue(), Queue(), 0
with open(target_file_path, 'r') as reader, open(result_file_path, 'w') as writer, \
open(for_conll_file_path, 'w') as for_conll_file:
line_count = 0
lines = []
all_sent_set = set()
for line in reader:
# if '_ni' in target_file_name and line in all_sent_set:
if ('_ni' in target_file_name or '_nf' in target_file_name) and line in all_sent_set:
# remove duplicate for specific task
continue
else:
all_sent_set.add(line)
lines.append((line.replace('\n', ''), line_count))
if line_count % TASK_SIZE == 0:
param = {
'lines': lines,
'context_query_dict': context_query_dict,
'full_query_dict': full_query_dict,
'refill_only': refill_only
}
# param = {
# 'lines': lines,
# 'temp_query_dict': temp_query_dict,
# 'full_query_dict': full_query_dict
# }
task_queue.put(param)
task_n += 1
lines = []
line_count += 1
# to collect the left data
param = {
'lines': lines,
'context_query_dict': context_query_dict,
'full_query_dict': full_query_dict,
'refill_only': refill_only
}
# param = {
# 'lines': lines,
# 'temp_query_dict': temp_query_dict,
# 'full_query_dict': full_query_dict
# }
task_queue.put(param)
task_n += 1
print("Start multi-thread Processing")
for t in range(N_THREAD):
task_queue.put('STOP')
for t in range(N_THREAD):
Process(target=re_filling_thread, args=(task_queue, done_queue)).start()
print("All threads created")
# collect the results below
for t in range(task_n):
refilled_res_lst, r1, r2, r3 = done_queue.get()
# calculate result to utilize python's parallel loading feature
all_results.extend(refilled_res_lst)
all_r1 += r1
all_r2 += r2
all_r3 += r3
if t * TASK_SIZE % 10000 == 0:
print(t * TASK_SIZE, 'lines processed')
print('Filling finished, three re-filling case statistic as follow:')
print('find ori:%d, most same:%d, leaked slots%d' % (all_r1, all_r2, all_r3))
sorted_result = sorted(all_results, key=lambda x: x[1])
for res in sorted_result:
writer.write(' '.join(res[0]) + '\n')
json_for_conll = []
for res in sorted_result:
json_for_conll.append(
{
'word_lst': res[0],
'label_lst': res[2]
}
)
json.dump(json_for_conll, for_conll_file)
| 14,997 | 44.72561 | 173 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/ReFilling/__init__.py | 0 | 0 | 0 | py |
|
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/Evaluate/gen_eval.py | import json
import argparse
import random
import re
# from nlp_tool import sentence_edit_distance # this is worked out of pycharm
from source.AuxiliaryTools.nlp_tool import sentence_edit_distance
# # ==== load config =====
# with open('../../config.json', 'r') as con_f:
# CONFIG = json.load(con_f)
#
#
# parser = argparse.ArgumentParser()
# tn = 'navigate_labeled'
# # cm = 'intent-slot'
# cm = 'slot'
# parser.add_argument("--appr_check", help="check gen res's appearance within one file from another file", action="store_true")
# parser.add_argument("--appr_check_in", help="specific the file to check in", type=str,
# default=CONFIG['path']["OnmtData"] + f"SourceData/train_{tn}_{cm}1_src.txt")
# parser.add_argument("--appr_check_target", help="specific the file containing utterance to check", type=str,
# default=CONFIG['path']["OnmtData"] + f"Result/{tn}_{cm}1_pred.txt")
#
# parser.add_argument("--top_x", help="specific top x in checking appr", type=int, default=10)
# args = parser.parse_args()
def find_closest_sent(tgt_s, sent_set):
closest_s = ''
min_distance = len(tgt_s.split())
for s in sent_set:
tmp_d = sentence_edit_distance(tgt_s, s)
if tmp_d < min_distance:
closest_s = s
min_distance = tmp_d
return closest_s, min_distance
def get_1_2_gram_set(line):
word_lst = line.split()
bi_gram_set = set()
uni_gram_set = set(word_lst)
for ind, word in enumerate(word_lst):
if ind + 1 < len(word_lst):
temp_bi_gram = word_lst[ind] + ' ' + word_lst[ind + 1]
bi_gram_set.add(temp_bi_gram)
return word_lst, uni_gram_set, bi_gram_set
def get_distinct_score(target_set):
all_uni_gram_set = set()
all_bi_gram_set = set()
total_word_num = 0
for line in target_set:
word_lst, uni_gram_set, bi_gram_set = get_1_2_gram_set(line)
all_uni_gram_set = all_uni_gram_set | uni_gram_set
all_bi_gram_set = all_bi_gram_set | bi_gram_set
total_word_num += len(word_lst)
distinct_1 = 100.0 * len(all_uni_gram_set) / total_word_num # convert to percent
distinct_2 = 100.0 * len(all_bi_gram_set) / total_word_num # convert to percent
return distinct_1, distinct_2, len(all_uni_gram_set), len(all_bi_gram_set), total_word_num
def appearance_check(result_file, test_what_file, in_what_file, top_x=1):
total = 0
not_appeared = 0
unique_set = {}
unique = 0
unique_new = 0
try:
with open(test_what_file, 'r') as test_file, \
open(in_what_file, 'r') as source_file, \
open(result_file, 'w') as log_f:
source_lines = []
for line in source_file.readlines():
source_lines.append(re.sub('\s<\d*>', '', line))
source_lines = set(source_lines)
test_lines = test_file.readlines()
line_count = 0
distance_sum = 0
length_sum = 0
# ========== min edit distance evaluation ==========
for line in test_lines:
if line_count % 10 < top_x:
if line not in unique_set:
if line not in source_lines:
not_appeared += 1
if line not in unique_set:
unique_new += 1
tmp_closest, tmp_d = find_closest_sent(line, source_lines)
log_f.write("=== %d %d === \n\tgen: %s\n\tori: %s\n" % (
tmp_d, len(line.split()), line.replace('\n', ''), tmp_closest.replace('\n', '')
))
distance_sum += tmp_d
length_sum += len(line.split())
unique += 1
unique_set[line] = True
total += 1
line_count += 1
# ========= distinct evaluation ===========
test_new_lines = set(unique_set.keys())
# augmented_data = test_new_lines
augmented_data = source_lines | test_new_lines
augmented_distinct_1, augmented_distinct_2, augmented_unigram, augmented_bigram, augmented_total_word = get_distinct_score(augmented_data)
source_distinct_1, source_distinct_2, source_unigram, source_bigram, source_total_word = get_distinct_score(source_lines)
# ======== edit distance evaluation on source ========
source_distance_sum = 0
for line in source_lines:
temp_source_lines = source_lines.copy()
temp_source_lines.remove(line)
tmp_closest, tmp_d = find_closest_sent(line, temp_source_lines)
source_distance_sum += tmp_d
ave_source_d = source_distance_sum / len(source_lines)
# ======== edit edit distance evaluation on augmented ==========
augmented_distance_sum = 0
for line in augmented_data:
temp_augmented_data = augmented_data.copy()
temp_augmented_data.remove(line)
tmp_closest, tmp_d = find_closest_sent(line, temp_augmented_data)
augmented_distance_sum += tmp_d
ave_augmented_d = augmented_distance_sum / len(augmented_data)
ave_d = 0 if unique_new == 0 else distance_sum / unique_new
ave_l = 0 if unique_new == 0 else length_sum / unique_new
eval_results = {
"Not Appeared": not_appeared,
"Total": total,
"Unique": unique,
"Unique New": unique_new,
"Avg. Distance for new": ave_d,
"Avg. Distance for augmented": ave_augmented_d,
"Avg. Distance for source": ave_source_d,
'Avg. Length': ave_l,
'source_distinct_1': source_distinct_1,
'source_distinct_2': source_distinct_2,
'source_unigram': source_unigram,
'source_bigram': source_bigram,
'source_total_word': source_total_word,
'augmented_distinct_1': augmented_distinct_1,
'augmented_distinct_2': augmented_distinct_2,
'augmented_unigram': augmented_unigram,
'augmented_bigram': augmented_bigram,
'augmented_total_word': augmented_total_word,
'source_size': len(source_lines),
'generated_new_size': len(test_new_lines),
'augmented_size': len(augmented_data),
}
return eval_results
except FileNotFoundError as e:
return {'no_file': e}
# return {'no_file': (result_file, test_what_file, in_what_file)}
# if __name__ == "__main__":
# if args.appr_check and args.appr_check_target and args.appr_check_in:
# appearance_check(result_file_path, args.appr_check_target, args.appr_check_in, top_x=args.top_x)
| 6,950 | 40.622754 | 150 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/Evaluate/__init__.py | 0 | 0 | 0 | py |
|
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/Evaluate/slot_filling.py | # coding:utf-8
"""
Tips:
slot label format:
B-slot_name, there is no < or > in slot_name
"""
import json
import os
from source.AuxiliaryTools.nlp_tool import low_case_tokenizer
import re
# VERBOSE = True
VERBOSE = False
# DEBUG = False
DEBUG = True
SENT_COUNT_SPLIT = True
# def out_of_slot(i, j, tmp_word_lst, ori_word_lst):
# """
# to check if pointer of ori is out of the slot area
# :param i: index of the current slot
# :param j:
# :param tmp_word_lst:
# :param ori_word_lst:
# :return:
# """
# # two list keep same until next slot
# try:
# next_slot_id_inc = [('<' in x and '>' in x) for x in tmp_word_lst[i + 1:]].index(True)
# inner_remained_tmp = tmp_word_lst[i + 1: i + 1 + next_slot_id_inc]
# inner_remained_ori = ori_word_lst[j:j + next_slot_id_inc]
# return inner_remained_ori == inner_remained_tmp
# except:
# # There is no next slot
# remained_tmp = tmp_word_lst[i + 1:] # if i + 1 exceed length, they must both be []
# remained_ori = ori_word_lst[j:]
# return remained_tmp == remained_ori
# def get_label_from_temp(user_temp, user_say, verbose=VERBOSE):
# label_lst = []
# tmp_word_lst = low_case_tokenizer(user_temp)
# ori_word_lst = low_case_tokenizer(user_say)
# i = 0
# j = 0
# while i < len(tmp_word_lst) and j < len(ori_word_lst):
# tmp_word = tmp_word_lst[i]
# slot_name = re.findall("<(.*?)>", tmp_word)
# if slot_name:
# # find a slot, align the slot word
# slot_name = slot_name[0]
# label_lst.append('B-' + slot_name)
# j += 1
# while not out_of_slot(i, j, tmp_word_lst, ori_word_lst) and j <= len(ori_word_lst):
# # print('i, j, tmp_word_lst, ori_word_lst', i, j, tmp_word_lst, ori_word_lst, out_of_slot(i, j, tmp_word_lst, ori_word_lst))
# label_lst.append('I-' + slot_name)
# j += 1
# j -= 1
# else:
# if DEBUG and verbose:
# print('debug', 'i', i, 'j', j, '\n', ori_word_lst, '\n', tmp_word_lst, '\n', label_lst)
# if ori_word_lst[j] == tmp_word_lst[i]:
# label_lst.append('O')
# else:
# print(ori_word_lst[j], "vs", tmp_word_lst[i])
# print()
# print("ERROR fail to match non-slot word!!!!!!!!!!!")
# raise RuntimeError
# j += 1
# i += 1
# if verbose:
# print('ori:', ori_word_lst)
# print('tmp:', tmp_word_lst)
# print('label:', label_lst)
# print('zip res:', list(zip(ori_word_lst, label_lst)))
# print('\n')
# if len(label_lst) != len(ori_word_lst):
# print("Error: not equal length between label and word list!")
# print(len(ori_word_lst), ori_word_lst)
# print(len(tmp_word_lst), tmp_word_lst)
# print(len(label_lst), label_lst)
# raise RuntimeError
# else:
# if label_lst and ori_word_lst:
# # Remove I-slot_name word in label lst and ori_word_lst, to equal the length with temp_word_lst
# compressed_label_lst, compressed_ori_word_lst = zip(*filter(
# lambda x: 'I-' not in x[1], zip(ori_word_lst, label_lst)
# ))
# else:
# compressed_label_lst, compressed_ori_word_lst = [], []
# for l, tw, cow in zip(compressed_label_lst, tmp_word_lst, compressed_label_lst):
# if l == 'O' and tw != cow:
# print("Error: label not aligned!")
# print(ori_word_lst)
# print(tmp_word_lst)
# print(label_lst)
# raise RuntimeError
# # Result check: to find not aligned error
# return label_lst, ori_word_lst
def get_slot_filling_data_from_cluster(cluster_data_file):
ret = []
all_user_say = set()
with open(cluster_data_file, 'r') as cluster_file:
cluster_data = json.load(cluster_file)
for turn_lst in cluster_data.values():
for turn in turn_lst:
user_say = turn['user_say']
user_temp = turn['user_temp']
if not user_temp:
continue # remove empty turn
all_user_say.add(user_say)
# label_lst, word_sequence = get_label_from_temp(user_temp, user_say)
data_item = {
"utters": {
"tags": turn['label_lst'],
"ws": turn['user_word_lst'],
}
}
ret.append(data_item)
# except RuntimeError:
# if DEBUG:
# print('==== debug ====', cluster_data_file, user_temp, user_say)
return ret, all_user_say
def get_slot_filling_data_from_generation(for_conll_file_path, ori_say_set, use_topx, refilled_only=False):
# def get_slot_filling_data_from_generation(gen_f_path, refill_f_path, ori_say_set, use_topx, refilled_only=False):
ret = []
# with open(gen_f_path, 'r') as gen_f, open(refill_f_path, 'r') as refill_f:
# all_user_temp = gen_f.readlines()
# all_user_say = refill_f.readlines()
# temp_say_pairs = set(zip(all_user_temp, all_user_say)) # remove dup
with open(for_conll_file_path) as for_conll_file:
json_for_conll = json.load(for_conll_file)
for ind, pair in enumerate(json_for_conll):
if ind % 10 >= use_topx:
continue
elif ' '.join(pair['word_lst']) in ori_say_set: # remove occurred refilled utterance
continue
try:
# user_say = pair[1]
# user_temp = re.sub('<\d+>', '', pair[0]) if refilled_only else pair[0] # remove id label such as <1>
# label_lst, word_sequence = get_label_from_temp(user_temp, user_say)
data_item = {
"utters": {
"tags": pair['label_lst'],
"ws": pair['word_lst']
}
}
if ind % 10000 == 0:
print(ind, 'pairs finished.')
ret.append(data_item)
except RuntimeError:
if DEBUG:
print('==== debug ====', for_conll_file_path)
# print('==== debug ====', gen_f_path)
return ret
def prepare_data_to_dukehan(config, task_name='navigate_labeled', split_rate=1, use_topx=10):
print('Processing data for: ', task_name, split_rate)
# define dir path
gen_result_dir = config['path']['OnmtData'] + 'Result/'
cluster_result_dir = config['path']['ClusteringResult']
output_data_dir = config['path']['Evaluate'] + 'SlotFilling/'
if not os.path.isdir(output_data_dir):
os.makedirs(output_data_dir)
# define input file path
refilled_data_path = gen_result_dir + task_name + str(split_rate) + '_pred_refilled.txt'
gen_data_path = gen_result_dir + task_name + str(split_rate) + '_pred.txt'
train_cluster_result_path = cluster_result_dir + 'train_%s%s.json' % (task_name, str(split_rate))
test_cluster_result_path = cluster_result_dir + 'test_%s1.0.json' % task_name
dev_cluster_result_path = cluster_result_dir + 'dev_%s1.0.json' % task_name
all_slot_value_dict = cluster_result_dir + '%s_full-query.dict' % task_name
# define output file path
train_path = output_data_dir + 'train' + str(split_rate) + '.json'
dev_path = output_data_dir + 'dev.json'
test_path = output_data_dir + 'test.json'
extend_train_path = output_data_dir + 'extend_train' + str(split_rate) + '.json'
# get data label pair from cluster result
result_for_train, all_train_user_say = get_slot_filling_data_from_cluster(train_cluster_result_path)
result_for_test, _ = get_slot_filling_data_from_cluster(test_cluster_result_path)
result_for_dev, _ = get_slot_filling_data_from_cluster(dev_cluster_result_path)
# print('debug: all user', len(all_train_user_say))
# get extra data from generation
result_for_extend_train = get_slot_filling_data_from_generation(gen_data_path, refilled_data_path, all_train_user_say, use_topx=use_topx)
# get all slot set
all_slot_label = ['O']
with open(all_slot_value_dict, 'r') as reader:
all_slot_set = json.load(reader).keys()
for slot_name in all_slot_set:
all_slot_label.append('B-' + slot_name)
all_slot_label.append('I-' + slot_name)
print('debug', len(result_for_extend_train))
# output to file
with open(train_path, 'w') as train_res_f, \
open(dev_path, 'w') as dev_res_f, \
open(test_path, 'w') as test_res_f, \
open(extend_train_path, 'w') as extend_train_res_f:
train_res = {
'tags': all_slot_label,
'data': result_for_train
}
dev_res = {
'tags': all_slot_label,
'data': result_for_dev
}
test_res = {
'tags': all_slot_label,
'data': result_for_test
}
extend_train_res = {
'tags': all_slot_label,
'data': result_for_extend_train
}
json.dump(train_res, train_res_f)
json.dump(dev_res, dev_res_f)
json.dump(test_res, test_res_f)
json.dump(extend_train_res, extend_train_res_f)
def format_and_output_conll_data(file_path, results):
print('Out put to', file_path)
with open(file_path, 'w') as writer:
for data_item in results:
tag_lst = data_item['utters']['tags']
ws_lst = data_item['utters']['ws']
for tag, ws in zip(tag_lst, ws_lst):
writer.write('%s\t%s\n' % (ws, tag))
writer.write('\n')
def remove_result_duplication(results):
deduplicated_result = []
appeared_user_say = set()
for data_item in results:
user_utterance = ' '.join(data_item['utters']['ws'])
if user_utterance not in appeared_user_say:
appeared_user_say.add(user_utterance)
deduplicated_result.append(data_item)
return deduplicated_result
def prepare_data_to_conll_format(config, task_name='navigate', split_rate=1, cluster_method='_intent', use_topx=10, refilled_only=False, pair_mod='', no_index='', no_filter_str='' ):
print('Processing data for: ', task_name + cluster_method, split_rate)
# define dir path
gen_result_dir = config['path']['OnmtData'] + 'Result/'
gen_source_dir = config['path']['OnmtData'] + 'SourceData/'
cluster_result_dir = config['path']['ClusteringResult']
output_data_dir = config['path']['Evaluate'] + 'SlotFilling/Source/'
if not os.path.isdir(output_data_dir):
os.makedirs(output_data_dir)
# define input file path
# refilled_data_path = gen_result_dir + task_name + cluster_method + str(split_rate) + '_pred_refilled.txt'
# gen_data_path = gen_result_dir + task_name + cluster_method + str(split_rate) + '_pred.txt'
gen_for_conll_file_path = gen_result_dir + task_name + cluster_method + str(split_rate) + pair_mod + no_index + no_filter_str +'_pred_for-conll.json'
# rfo_refilled_data_path = gen_result_dir + 'train_' + task_name + cluster_method + str(split_rate) + '_src_refilled.txt'
# rfo_gen_data_path = gen_source_dir + 'train_' + task_name + cluster_method + str(split_rate) + '_src.txt'
rfo_for_conll_file_path = gen_result_dir + 'train_' + task_name + cluster_method + str(split_rate) + '_src_for-conll.json'
train_cluster_result_path = cluster_result_dir + 'train_%s%s%s.json' % (task_name, cluster_method, str(split_rate))
test_cluster_result_path = cluster_result_dir + 'test_%s%s1.json' % (task_name, cluster_method)
dev_cluster_result_path = cluster_result_dir + 'dev_%s%s1.json' % (task_name, cluster_method)
# all_slot_value_dict = cluster_result_dir + '%s_full-query.dict' % task_name
# define output file path
if not refilled_only:
train_path = output_data_dir + 'train_' + task_name + cluster_method + str(split_rate) + '.conll'
extend_train_path = output_data_dir + 'extend_train_' + task_name + cluster_method + str(split_rate) + pair_mod + no_index + no_filter_str + '.conll'
dev_path = output_data_dir + 'dev_' + task_name + pair_mod + no_index + no_filter_str + '.conll'
test_path = output_data_dir + 'test_' + task_name + pair_mod + no_index + no_filter_str + '.conll'
full_corpus_path = output_data_dir + 'full_corpus_' + task_name + pair_mod + no_index + no_filter_str + '.conll'
else:
train_path = output_data_dir + 'train_' + task_name + cluster_method + '_refill-only' + str(split_rate) + '.conll'
extend_train_path = output_data_dir + 'extend_train_' + task_name + cluster_method + '_refill-only' + str(split_rate) + '.conll'
dev_path = output_data_dir + 'dev_' + task_name + '_refill-only' + '.conll'
test_path = output_data_dir + 'test_' + task_name + '_refill-only' + '.conll'
full_corpus_path = output_data_dir + 'full_corpus_' + task_name + '_refill-only' + '.conll'
# get data label pair from cluster result
result_for_train, all_train_user_say = get_slot_filling_data_from_cluster(train_cluster_result_path)
result_for_test, _ = get_slot_filling_data_from_cluster(test_cluster_result_path)
result_for_dev, _ = get_slot_filling_data_from_cluster(dev_cluster_result_path)
# get extra data from generation (not include ori-train data)
result_for_gen_extra_train = get_slot_filling_data_from_generation(gen_for_conll_file_path, all_train_user_say, use_topx=use_topx, refilled_only=False)
# get extra data from source refilled (not include ori-train data), there is no beam search in rfo, use top 10
result_for_rof_extra_train = get_slot_filling_data_from_generation(rfo_for_conll_file_path, all_train_user_say, use_topx=10, refilled_only=True)
# merge to get extend train data
if not refilled_only:
# result_for_extend_train = result_for_train + result_for_gen_extra_train
result_for_extend_train = result_for_train + result_for_rof_extra_train + result_for_gen_extra_train
else:
result_for_extend_train = result_for_train + result_for_rof_extra_train
result_for_extend_train = remove_result_duplication(result_for_extend_train)
# output to file
format_and_output_conll_data(train_path, result_for_train)
format_and_output_conll_data(dev_path, result_for_dev)
format_and_output_conll_data(test_path, result_for_test)
format_and_output_conll_data(extend_train_path, result_for_extend_train)
# print('debug', len(result_for_extend_train))
# get and output full corpus data
full_data_mark = 4478 if SENT_COUNT_SPLIT else 1
if split_rate == full_data_mark:
print("Processing data for: full corpus")
result_for_full_corpus = result_for_train + result_for_dev + result_for_test
format_and_output_conll_data(full_corpus_path, result_for_full_corpus)
| 15,138 | 46.309375 | 182 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/AuxiliaryTools/eval_tool.py | # coding: utf-8
import json
import os
import re
LOG_DIR = '../../log/'
CONFIG_PATH = '../../config.json'
with open(CONFIG_PATH, 'r') as reader:
CONFIG = json.load(reader)
RANDOM_SEED = 100
# EXTEND_SETTING = ['extend_']
EXTEND_SETTING = ['extend_', '']
RFO_SETTING = ['_refill-only', '']
TASK_SETTING = ['atis_labeled']
# TASK_SETTING = ['navigate_labeled', 'schedule_labeled', 'weather_labeled']
RES_TABLE = {}
def collect_and_format_slot_filling_result_from_log():
all_log_file = os.listdir(LOG_DIR)
all_column_name = []
all_split_rate = []
for file_name in all_log_file:
if 'slot-filling' in file_name:
with open(f"{LOG_DIR}{file_name}", 'r') as reader:
split_rate = re.findall(r"[-+]?\d*\.\d+|\d+", file_name)[0]
column_name = file_name.replace('slot-filling', '').replace('log', '').replace('_labeled', '').replace(str(split_rate), '')
result = re.findall('slot_filling_bilstm.py : INFO test F1-value: (.*)\n', reader.read())[0]
if split_rate not in RES_TABLE:
RES_TABLE[split_rate] = {}
all_split_rate.append(split_rate)
RES_TABLE[split_rate][column_name] = result
if column_name not in all_column_name:
all_column_name.append(column_name)
all_column_name = ['split_rate'] + sorted(all_column_name)
all_split_rate = sorted(all_split_rate, key=lambda x: float(x))
output_rows = ['\t'.join(all_column_name)]
for sr in all_split_rate:
temp_row = [str(sr)]
for c in all_column_name[1:]:
temp_row.append(RES_TABLE[sr][c])
output_rows.append('\t'.join(temp_row))
with open(f"{LOG_DIR}slot_filling_result", 'w') as writer:
for line in output_rows:
writer.write(f'{line}\n')
def collect_and_format_slot_filling_result_from_prf():
prf_dir = CONFIG['path']['Evaluate'] + 'SlotFilling/prf/'
all_column_name = ['split_rate']
# all_split_rate = [0.005, 0.01, 0.02, 0.03, 0.04, 0.05, 0.08, 0.1, 0.2, 0.5, 1]
all_split_rate = CONFIG['experiment']['train_set_split_rate']
for split_rate in all_split_rate:
# for split_rate in :
if split_rate not in RES_TABLE:
RES_TABLE[split_rate] = {}
for task in TASK_SETTING:
for cluster_method in CONFIG['experiment']['cluster_method']:
# et
for et_str in EXTEND_SETTING:
# rfo
for rfo_str in RFO_SETTING:
if rfo_str and not et_str:
continue
prf_file_name = f'{et_str}{task}_weight2-lstm-RandomSeed-{RANDOM_SEED}{cluster_method}{rfo_str}{split_rate}_test'
column_name = f'{task}{cluster_method}{"-et" if et_str else ""}{"-rof" if rfo_str else ""}'
if column_name not in all_column_name:
all_column_name.append(column_name)
try:
with open(prf_dir + prf_file_name, 'r') as reader:
f_value = re.findall('accuracy:.*?; precision:.*?; recall:.*?; FB1:(.*?)\n', reader.read())[0].strip()
# print(66666666666666, prf_file_name, f_value)
except FileNotFoundError:
# print('No file:', prf_file_name)
f_value = 'N/A'
RES_TABLE[split_rate][column_name] = f_value
# print(split_rate, task, et_str, rfo_str)
output_rows = ['\t'.join(all_column_name)]
# print(all_column_name)
for sr in all_split_rate:
temp_row = [str(sr)]
for c in all_column_name[1:]:
temp_row.append(RES_TABLE[sr][c])
output_rows.append('\t'.join(temp_row))
with open(f"{LOG_DIR}slot_filling_result", 'w') as writer:
for line in output_rows:
writer.write(f'{line}\n')
def get_domain(ind):
if ind < 9:
return 'n'
if 9 <= ind < 18:
return 's'
if 18 <= ind:
return 'w'
def get_model(ind):
if ind % 3 == 0:
return 'r'
if ind % 3 == 1:
return 'f'
if ind % 3 == 2:
return 'o'
def get_average_from_all_result():
row_name = ["navigate_intent-slot", "navigate_labeled_intent-slot-et-rof", "navigate_labeled_intent-slot-et", "navigate_intent", "navigate_labeled_intent-et-rof", "navigate_labeled_intent-et", "navigate_slot", "navigate_labeled_slot-et-rof", "navigate_labeled_slot-et", "schedule_intent-slot", "schedule_labeled_intent-slot-et-rof", "schedule_labeled_intent-slot-et", "schedule_intent", "schedule_labeled_intent-et-rof", "schedule_labeled_intent-et", "schedule_slot", "schedule_labeled_slot-et-rof", "schedule_labeled_slot-et", "weather_intent-slot", "weather_labeled_intent-slot-et-rof", "weather_labeled_intent-slot-et", "weather_intent", "weather_labeled_intent-et-rof", "weather_labeled_intent-et", "weather_slot", "weather_labeled_slot-et-rof", "weather_labeled_slot-et"]
# paste results here
# rfo vs rf + gen | train table only
all_result = []
input_file = '/users4/ythou/Projects/TaskOrientedDialogue/code/DialogueDiversification/log/slot_filling_result'
result_file = '/users4/ythou/Projects/TaskOrientedDialogue/code/DialogueDiversification/log/merged_slot_filling_result'
with open(input_file, 'r') as reader:
all_lines = reader.readlines()
for line in all_lines[1:]:
one_line_res = []
for score in line.split('\t')[1:]:
one_line_res.append(float(score))
all_result.append(one_line_res)
merged_res = {
'n':{'o':[], 'r':[], 'f':[]},
's':{'o':[], 'r':[], 'f':[]},
'w':{'o':[], 'r':[], 'f':[]}
}
final_res = {
'n': {'o': [], 'r': [], 'f': []},
's': {'o': [], 'r': [], 'f': []},
'w': {'o': [], 'r': [], 'f': []}
}
for i, row in enumerate(all_result):
temp_res = {
'n': {'o': [], 'r': [], 'f': []},
's': {'o': [], 'r': [], 'f': []},
'w': {'o': [], 'r': [], 'f': []}
}
for j, c in enumerate(row):
model = get_model(j)
domain = get_domain(j)
temp_res[domain][model].append(c)
print('debug', row_name[j], domain, model, '\n')
for d in temp_res:
for m in temp_res[d]:
merged_res[d][m].append(temp_res[d][m])
with open(result_file, 'w') as writer:
line_str = ''
for d in merged_res:
for m in merged_res[d]:
line_str += '%s-%s\t' % (d, m)
print(line_str)
writer.write(line_str + '\n')
for r_n in range(len(all_result)):
line_str = ''
for d in merged_res:
for m in merged_res[d]:
if m == 'f':
line_str += '%.2f\t' % (max(merged_res[d][m][r_n]))
# line_str += '%.2f\t' % (sum(merged_res[d][m][r_n]) / 3)
else:
line_str += '%.2f\t' % (sum(merged_res[d][m][r_n]) / 3)
print(line_str)
writer.write(line_str + '\n')
print("notice ori-order")
if __name__ == '__main__':
# collect_and_format_slot_filling_result_from_log() # abandoned
collect_and_format_slot_filling_result_from_prf()
print('Notice :change setting for task and random et.al. before running if collect from prf')
print('Evaluating task', TASK_SETTING)
# get_average_from_all_result()
| 7,655 | 43.254335 | 780 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/AuxiliaryTools/ConfigTool.py | # coding: utf-8
import json
import os
from collections import OrderedDict
def create_dir(p_list):
new_folder_num = 0
for p in p_list:
if type(p) == dict:
new_folder_num += create_dir(p.values())
elif not os.path.isdir(p):
os.makedirs(p)
new_folder_num += 1
return new_folder_num
def update_config(data_root="/users4/ythou/Projects/TaskOrientedDialogue/data/", config_path="../../config.json"):
config = {
'path': {
"DataRoot": data_root,
"RawData": {
'stanford': data_root + "Stanford/",
'stanford_labeled': data_root + "StanfordLabeled/",
'atis': data_root + "Atis/",
},
"ClusteringResult": data_root + "ClusteringResult/",
"GenerationResult": data_root + "GenerationResult/",
"OnmtData": data_root + "OnmtData/",
"Evaluate": data_root + "Evaluate/",
"Embedding": data_root + 'Embedding/'
},
'onmt': {
'prepare_data': ['python3', './OpenNMT/preprocess.py', '-train_src', '<DATA_DIR>/train_<DATA_MARK><CLUSTER_METHOD><SPLIT_RATE><PAIR_MOD><NO_INDEX><NO_FILTERING>_src.txt', '-train_tgt', '<DATA_DIR>/train_<DATA_MARK><CLUSTER_METHOD><SPLIT_RATE><PAIR_MOD><NO_INDEX><NO_FILTERING>_tgt.txt', '-valid_src', '<DATA_DIR>/dev_<DATA_MARK><CLUSTER_METHOD>1<PAIR_MOD><NO_INDEX><NO_FILTERING>_src.txt', '-valid_tgt', '<DATA_DIR>/dev_<DATA_MARK><CLUSTER_METHOD>1<PAIR_MOD><NO_INDEX><NO_FILTERING>_tgt.txt', '-save_data', '<RESULT_DIR>/processed_<DATA_MARK><CLUSTER_METHOD><SPLIT_RATE><PAIR_MOD><NO_INDEX><NO_FILTERING>'],
'train': ['python3', './OpenNMT/train.py', '-data', '<RESULT_DIR>/processed_<DATA_MARK><CLUSTER_METHOD><SPLIT_RATE><PAIR_MOD><NO_INDEX><NO_FILTERING>', '-save_model', '<RESULT_DIR>/<DATA_MARK><CLUSTER_METHOD><SPLIT_RATE><PAIR_MOD><NO_INDEX><NO_FILTERING>-model', '<GPU>'],
'test': ['python3', './OpenNMT/translate.py', '-model', '<RESULT_DIR>/<DATA_MARK><CLUSTER_METHOD><SPLIT_RATE><PAIR_MOD><NO_INDEX><NO_FILTERING>-model.pt', '-src', '<DATA_DIR>/<EXPAND_TGT>_<DATA_MARK><CLUSTER_METHOD><SPLIT_RATE><PAIR_MOD><NO_INDEX><NO_FILTERING>_src.txt', '-output', '<RESULT_DIR>/<DATA_MARK><CLUSTER_METHOD><SPLIT_RATE><PAIR_MOD><NO_INDEX><NO_FILTERING>_pred.txt', '-replace_unk', '-verbose', '-n_best', '10', '<GPU>']
},
'gen_with_label': {
'prepare_data': ['python3', './OpenNMT/preprocess.py',
'-train_src', '<DATA_DIR>/train_<TRAIN_FILE_TAIL>_src.txt',
'-train_tgt', '<DATA_DIR>/train_<TRAIN_FILE_TAIL>_tgt.txt',
'-valid_src', '<DATA_DIR>/dev_<DEV_FILE_TAIL>_src.txt',
'-valid_tgt', '<DATA_DIR>/dev_<DEV_FILE_TAIL>_tgt.txt',
'-save_data', '<RESULT_DIR>/processed_<TRAIN_FILE_TAIL>'],
'train': ['python3', './OpenNMT/train.py',
'-data', '<RESULT_DIR>/processed_<TRAIN_FILE_TAIL>',
'-save_model', '<RESULT_DIR>/<TRAIN_FILE_TAIL>-model',
'<GPU>'],
'test': ['python3', './OpenNMT/translate.py',
'-model', '<RESULT_DIR>/<TRAIN_FILE_TAIL>-model.pt', '-src',
'<DATA_DIR>/train_<TRAIN_FILE_TAIL>_src.txt',
'-output', '<RESULT_DIR>/<TRAIN_FILE_TAIL>_pred.txt',
'-replace_unk', '-verbose', '-n_best', '5', '<GPU>']
},
'slot_filling': {
'train_and_test': ['python3', './source/Evaluate/slot_filling_bilstm.py', '-t', '<TASK_NAME>', '-s', '<SEED>', '-sr', '<SPLIT_RATE>', '-cm', '<CLUSTER_METHOD>', '<EXTEND>', '<REFILL_ONLY>']
},
'experiment': {
# 'train_set_split_rate': [515], # for ablation test
'train_set_split_rate': [129, 515, 4478],
# 'train_set_split_rate': [0.005, 0.01, 0.02, 0.03, 0.04, 0.05, 0.08, 0.1, 0.2],
# 'train_set_split_rate': [0.005, 0.01, 0.02, 0.03, 0.04, 0.05, 0.08, 0.1, 0.2, 0.5, 1],
# 'train_set_split_rate': [0.005, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.08, 0.1, 0.2, 0.3, 0.4, 0.5, 0.8, 1],
'cluster_method': ['_intent-slot', '_intent', '_slot']
}
}
with open(config_path, 'w') as writer:
json.dump(config, writer, indent=2)
new_folder_num = 0
# create folder if not exist
if not os.path.isdir(config['path']['DataRoot']):
os.makedirs(config['path']['DataRoot'])
new_folder_num += 1
new_folder_num += create_dir(config['path'].values())
print('config updated, make %d new folder to fit config setting' % new_folder_num)
if __name__ == "__main__":
update_config("/users4/ythou/Projects/TaskOrientedDialogue/data/") # For my linux server setting
# update_config("E:/Projects/Research/TaskOrientedDialogue/data/") # For my windows setting
| 4,972 | 60.395062 | 619 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/AuxiliaryTools/data_split.py | # coding: utf-8
DATA_FORMAT = 'conll'
INPUT_FILE = '/users4/ythou/Projects/TaskOrientedDialogue/data/AtisRaw/atis.train'
OUTPUT_DIR = '/users4/ythou/Projects/TaskOrientedDialogue/data/Atis/'
DATA_MARK = 'atis'
TRAIN_DEV_RATE = [0.8, 0.2]
TRAIN_DEV_COUNT = [None, 500]
USE_RATE = False
def split():
with open(INPUT_FILE, 'r') as input_f, \
open(f'{OUTPUT_DIR}{DATA_MARK}_train', 'w') as train_f, \
open(f'{OUTPUT_DIR}{DATA_MARK}_dev', 'w') as dev_f:
if DATA_FORMAT == 'conll':
all_data = input_f.read().strip().split('\n\n')
if USE_RATE:
train_end_ind = int(len(all_data) * TRAIN_DEV_RATE[0])
else:
train_end_ind = len(all_data) - TRAIN_DEV_COUNT[1]
train_data = all_data[: train_end_ind]
dev_data = all_data[train_end_ind:]
print(f'train{train_end_ind}, dev{len(all_data)- train_end_ind}')
train_f.write('\n\n'.join(train_data))
dev_f.write('\n\n'.join(dev_data))
if __name__ == '__main__':
split()
| 1,072 | 36 | 82 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/AuxiliaryTools/slot_leak_check.py | # coding:utf-8
import json
import os
import re
LOG_DIR = '../../log/'
CONFIG_PATH = '../../config.json'
with open(CONFIG_PATH, 'r') as reader:
CONFIG = json.load(reader)
test_slot_file_path = CONFIG['path']["ClusteringResult"] + 'test_atis_labeled_intent-slot1.json'
origin_train_file_path = CONFIG['path']['RawData']['atis'] + 'atis_train'
for split_rate in [129, 4478]:
# for split_rate in CONFIG['experiment']['train_set_split_rate']:
target_generation_file_path = CONFIG['path']['OnmtData'] + 'Result/' + f'atis_labeled_intent-slot{split_rate}_pred_refilled.txt'
all_test_slot_value_word = set()
test_only_value_word = set()
new_slot_count = 0
ori_slot_count = 0
with open(test_slot_file_path, 'r') as reader:
test_json_data = json.load(reader)
for data_item_lst in test_json_data.values():
for data_item in data_item_lst:
for slot_value_word_lst in data_item['slot_value_lst']:
all_test_slot_value_word = all_test_slot_value_word | set(slot_value_word_lst)
with open(origin_train_file_path, 'r') as reader:
all_origin_text = reader.read().lower()
with open(target_generation_file_path, 'r') as reader:
print(target_generation_file_path)
all_generated_text = reader.read()
for word in all_test_slot_value_word:
if word not in all_origin_text:
print(word)
test_only_value_word.add(word)
print(len(test_only_value_word), 'new word in total', len(all_test_slot_value_word) - len(test_only_value_word), 'slot words in total')
for word in test_only_value_word:
if word in all_generated_text:
print(word, 'leaked!!!!!')
| 1,696 | 38.465116 | 139 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/AuxiliaryTools/nn_tool.py | # coding: utf-8
from __future__ import unicode_literals, print_function, division
import torch
from torch.autograd import Variable
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
SOS_token = 0
EOS_token = 1
teacher_forcing_ratio = 0.5
MAX_LENGTH = 10
def show_plot(points):
plt.figure()
fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
def indexes_from_sentence(word_table, sentence):
return [word_table.word2index[word] for word in sentence]
def variable_from_sentence(word_table, sentence):
indexes = indexes_from_sentence(word_table, sentence)
indexes.append(EOS_token)
result = Variable(torch.LongTensor(indexes).view(-1, 1))
return result
def variables_from_pair(pair, input_word_table, output_word_table):
input_variable = variable_from_sentence(input_word_table, pair[0])
target_variable = variable_from_sentence(output_word_table, pair[1])
return input_variable, target_variable
| 1,083 | 26.794872 | 72 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/AuxiliaryTools/data_tool.py | # coding:utf-8
import json
import argparse
import random
import re
from nlp_tool import sentence_edit_distance # this is worked out of pycharm
# from source.AuxiliaryTools.nlp_tool import sentence_edit_distance
# ==== load config =====
with open('../../config.json', 'r') as con_f:
CONFIG = json.load(con_f)
def show_cluster_result(result_file):
with open(CONFIG['path']['ClusteringResult'] + result_file, 'r') as reader:
json_data = json.load(reader)
sampled_keys = random.sample(json_data.keys(), min(20, len(json_data.keys())))
while True:
print('There are %d clusters in total' % len(json_data.keys()))
for ind, key in enumerate(sampled_keys):
print("id:%d\t#%d %s" % (ind, len(json_data[key]), key))
kid = input("select a 'common slot' by id: \ninput -1 to quit \ninput -2 to re-sample\ninput:")
try:
kid = int(kid)
if kid == -1:
break
elif kid == -2:
sampled_keys = random.sample(json_data.keys(), min(20, len(json_data.keys())))
for ind, key in enumerate(sampled_keys):
print("id:%d\t#%d %s" % (ind, len(json_data[key]), key))
continue
else:
c_slot = sampled_keys[kid]
print('==== %s ====' % c_slot)
for item in json_data[c_slot]:
print(item['user_say'], '||', item['user_temp'])
print('==== %s ====' % c_slot)
except TypeError:
print("Error: select is not integer", int(kid))
def find_closest_sent(tgt_s, sent_set):
closest_s = ''
min_distance = len(tgt_s.split())
for s in sent_set:
tmp_d = sentence_edit_distance(tgt_s, s)
if tmp_d < min_distance:
closest_s = s
min_distance = tmp_d
return closest_s, min_distance
def appearance_check(test_what_file, in_what_file, top_x=1):
total = 0
not_appeared = 0
unique_set = {}
unique = 0
unique_new = 0
with open(test_what_file, 'r') as test_file, open(in_what_file, 'r') as source_file, \
open('./data_tool.log', 'w') as log_f:
target_lines = []
for line in source_file.readlines():
target_lines.append(re.sub('\s<\d*>', '', line))
target_lines = set(target_lines)
test_lines = test_file.readlines()
line_count = 0
distance_sum = 0
length_sum = 0
for line in test_lines:
if line_count % 10 < top_x:
if line not in unique_set:
unique += 1
unique_set[line] = True
if line not in target_lines:
not_appeared += 1
if unique_new not in unique_set:
unique_new += 1
tmp_closest, tmp_d = find_closest_sent(line, target_lines)
log_f.write("=== %d %d === \n\tgen: %s\n\tori: %s\n" % (
tmp_d, len(line.split()), line.replace('\n', ''), tmp_closest.replace('\n', '')
))
distance_sum += tmp_d
length_sum += len(line.split())
total += 1
line_count += 1
ave_d = 0 if unique_new == 0 else distance_sum / unique_new
ave_l = 0 if unique_new == 0 else length_sum / unique_new
log_str = "%s %f %s %f %s %f %s %f \n%s %f %s %f " % (
"Not Appeared", not_appeared,
"Total", total,
"Unique", unique,
"Unique New", unique_new,
"New: Average Distance", ave_d,
'New: Average Length', ave_l,
)
print(log_str)
def remove_appeared(test_what_file, in_what_file, source_file_path, result_file):
total = 0
appeared = 0
pure = []
with open(test_what_file, 'r') as test_file, \
open(in_what_file, 'r') as target_file, \
open(source_file_path, 'r') as source_file:
target_lines = target_file.readlines()
test_lines = test_file.readlines()
source_lines = source_file.readlines()
line_count = 0
for line in test_lines:
if line_count % 10 < 10:
if line not in target_lines:
pure.append(str(line_count % 10) + " " + line)
appeared += 1
total += 1
if line_count % 10 == 0:
# to show source sentence in result.
pure.append('\n==== %s\n' % source_lines[int(line_count / 10)])
line_count += 1
print("Appeared", appeared, "Total", total)
with open(result_file, 'w') as writer:
for line in pure:
writer.write(line)
if __name__ == '__main__':
tn = 'navigate_labeled'
# cm = 'intent-slot'
cm = 'slot'
parser = argparse.ArgumentParser()
parser.add_argument("--c_res", help="show clustering result of arg result file", type=str)
parser.add_argument("--appr_check", help="check utterance appearance within one file from another file", action="store_true")
parser.add_argument("--appr_remove", help="remove same utterance within one file from another file", action="store_true")
parser.add_argument("--appr_check_in", help="specific the file to check in", type=str,
default=CONFIG['path']["OnmtData"] + f"SourceData/train_{tn}_{cm}1_src.txt")
# parser.add_argument("--appr_check_in", help="specific the file to check in", type=str,
# default="/users4/ythou/Projects/TaskOrientedDialogue/data/Backup/SourceData_circle_wise/" + "train_navigate_src.txt")
parser.add_argument("--appr_check_target", help="specific the file containing utterance to check", type=str,
default=CONFIG['path']["OnmtData"] + f"Result/{tn}_{cm}1_pred.txt")
parser.add_argument("--appr_remove_res", help="specific the result file for appearance remove", type=str,
default=CONFIG['path']["OnmtData"] + f"Result/{tn}_{cm}1_pred_remove-appr.txt")
parser.add_argument("--appr_source_file", help="specific the source file for appearance remove", type=str,
default=CONFIG['path']["OnmtData"] + "SourceData/test_navigate_src.txt")
parser.add_argument("--top_x", help="specific top x in checking appr", type=int, default=10)
args = parser.parse_args()
if args.c_res:
show_cluster_result(args.c_res)
if args.appr_check and args.appr_check_target and args.appr_check_in:
appearance_check(args.appr_check_target, args.appr_check_in, top_x=args.top_x)
if args.appr_remove:
remove_appeared(args.appr_check_target, args.appr_check_in, args.appr_source_file, args.appr_remove_res)
| 6,819 | 43 | 143 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/AuxiliaryTools/diverse_score_demo.py | # coding:utf-8
import math
import editdistance
def sentence_edit_distance(s1, s2):
s1 = s1.split() if type(s1) is str else s1
s2 = s2.split() if type(s2) is str else s2
if type(s1) is list and type(s2) is list:
return editdistance.eval(s1, s2)
else:
print("Error: Only str and list is supported, got", type(s1), type(s2))
raise TypeError
def diverse_score(s, t):
"""
calculate pairing score
:param s: target str
:param t: candidate str
:return: score, edit distance, length penalty
"""
lst_s = s.split()
lst_t = t.split()
length_penalty = math.exp(-abs((len(lst_s) - len(lst_t))/len(lst_s)))
# length_penalty = math.exp(-abs((len(lst_s) - len(lst_t))/max(len(lst_s), len(lst_t))))
e_d = sentence_edit_distance(lst_t, lst_s)
# print(e_d * length_penalty, e_d, length_penalty, '\n', s, '\n', t)
return e_d * length_penalty
if __name__ == '__main__':
s = 'tell me the flights from <fromloc.city_name> to <toloc.city_name>'
s1 = 'please show flights arriving in <toloc.city_name> from <fromloc.city_name>'
s2 = "okay i would like to fly from <fromloc.city_name> to <toloc.city_name>"
s3 = 'show me all flights from <fromloc.city_name> to <toloc.city_name> with prices'
s4 = "what are all the flights between <fromloc.city_name> and <toloc.city_name>"
print(s, diverse_score(s, s))
print(s1, diverse_score(s, s1), sentence_edit_distance(s, s1))
print(s2, diverse_score(s, s2), sentence_edit_distance(s, s2))
print(s3, diverse_score(s, s3), sentence_edit_distance(s, s3))
print(s4, diverse_score(s, s4), sentence_edit_distance(s, s4))
| 1,668 | 36.931818 | 92 | py |
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/AuxiliaryTools/__init__.py | 0 | 0 | 0 | py |
|
Seq2SeqDataAugmentationForLU | Seq2SeqDataAugmentationForLU-master/source/AuxiliaryTools/nlp_tool.py | import string
from nltk.tokenize import TweetTokenizer
from nltk.tokenize.treebank import TreebankWordTokenizer, TreebankWordDetokenizer
import editdistance
def sentence_edit_distance(s1, s2):
s1 = s1.split() if type(s1) is str else s1
s2 = s2.split() if type(s2) is str else s2
if type(s1) is list and type(s2) is list:
return editdistance.eval(s1, s2)
else:
print("Error: Only str and list is supported, got", type(s1), type(s2))
raise TypeError
def num_there(s):
return any(i.isdigit() for i in s)
def low_case_tokenizer(sentence, tree_bank=True):
if tree_bank:
return treebank_tokenizer(sentence)
else:
tkn = TweetTokenizer(preserve_case=False)
return tkn.tokenize(sentence)
def treebank_tokenizer(sentence):
# split 's but also split <>, wait to use in further work
t = TreebankWordTokenizer()
word_lst = t.tokenize(sentence.lower().replace("<", "LAB_").replace(">", "_RAB"))
ret = []
for w in word_lst:
ret.append(w.replace("LAB_", "<").replace("_RAB", ">"))
return ret
def treebank_detokenizer(tokens):
d = TreebankWordDetokenizer()
return d.tokenize(tokens)
def convert_to_word_lst(sentence, lower=True):
sentence = filter(lambda x: x in string.printable, sentence)
exclude = '''!"#$%&\'()*+,:;<=>?@[\\]^_`{|}~-/\t''' + '\n'
for e in exclude:
sentence = sentence.replace(e, ' ')
if lower:
sentence = sentence.lower()
word_sq = sentence.split(' ')
ret = []
for ind, w in enumerate(word_sq):
if '.' in w:
if num_there(w):
try: # detect whether there is a float number
float_word = float(w)
if w[len(w) - 1] == '.':
w.replace('.', '')
ret.append(w)
except ValueError:
if w[len(w) - 1] == '.':
w.replace('.', '')
ret.append(w)
else:
ret.extend(w.split('.'))
else:
ret.append(w)
return filter(lambda x: x.strip(), ret)
if __name__ == "__main__":
print("Testing")
s = "show<O> me<O> the<O> flights<O> from<O> san<B-fromloc.city_name> diego<I-fromloc.city_name> to<O> newark<B-toloc.city_name>"
# s = "xxx's a-b_c-d (b-d) <aaa_ddd> yyyyy<sdfsd_bsdb> yyyyy:<sdfsd_bsdb>"
print(low_case_tokenizer(s))
| 2,444 | 30.753247 | 133 | py |
seq2seq | seq2seq-master/setup.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Python package setup file.
"""
from setuptools import setup
setup(
name="seq2seq",
version="0.1",
install_requires=[
"numpy",
"matplotlib",
"pyyaml",
"pyrouge"
],
extras_require={'tensorflow': ['tensorflow'],
'tensorflow with gpu': ['tensorflow-gpu']},
)
| 910 | 26.606061 | 74 | py |
seq2seq | seq2seq-master/bin/__init__.py | 0 | 0 | 0 | py |
|
seq2seq | seq2seq-master/bin/infer.py | #! /usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Generates model predictions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from pydoc import locate
import yaml
from six import string_types
import tensorflow as tf
from tensorflow import gfile
from seq2seq import tasks, models
from seq2seq.configurable import _maybe_load_yaml, _deep_merge_dict
from seq2seq.data import input_pipeline
from seq2seq.inference import create_inference_graph
from seq2seq.training import utils as training_utils
tf.flags.DEFINE_string("tasks", "{}", "List of inference tasks to run.")
tf.flags.DEFINE_string("model_params", "{}", """Optionally overwrite model
parameters for inference""")
tf.flags.DEFINE_string("config_path", None,
"""Path to a YAML configuration file defining FLAG
values and hyperparameters. Refer to the documentation
for more details.""")
tf.flags.DEFINE_string("input_pipeline", None,
"""Defines how input data should be loaded.
A YAML string.""")
tf.flags.DEFINE_string("model_dir", None, "directory to load model from")
tf.flags.DEFINE_string("checkpoint_path", None,
"""Full path to the checkpoint to be loaded. If None,
the latest checkpoint in the model dir is used.""")
tf.flags.DEFINE_integer("batch_size", 32, "the train/dev batch size")
FLAGS = tf.flags.FLAGS
def main(_argv):
"""Program entry point.
"""
# Load flags from config file
if FLAGS.config_path:
with gfile.GFile(FLAGS.config_path) as config_file:
config_flags = yaml.load(config_file)
for flag_key, flag_value in config_flags.items():
setattr(FLAGS, flag_key, flag_value)
if isinstance(FLAGS.tasks, string_types):
FLAGS.tasks = _maybe_load_yaml(FLAGS.tasks)
if isinstance(FLAGS.input_pipeline, string_types):
FLAGS.input_pipeline = _maybe_load_yaml(FLAGS.input_pipeline)
input_pipeline_infer = input_pipeline.make_input_pipeline_from_def(
FLAGS.input_pipeline, mode=tf.contrib.learn.ModeKeys.INFER,
shuffle=False, num_epochs=1)
# Load saved training options
train_options = training_utils.TrainOptions.load(FLAGS.model_dir)
# Create the model
model_cls = locate(train_options.model_class) or \
getattr(models, train_options.model_class)
model_params = train_options.model_params
model_params = _deep_merge_dict(
model_params, _maybe_load_yaml(FLAGS.model_params))
model = model_cls(
params=model_params,
mode=tf.contrib.learn.ModeKeys.INFER)
# Load inference tasks
hooks = []
for tdict in FLAGS.tasks:
if not "params" in tdict:
tdict["params"] = {}
task_cls = locate(tdict["class"]) or getattr(tasks, tdict["class"])
task = task_cls(tdict["params"])
hooks.append(task)
# Create the graph used for inference
predictions, _, _ = create_inference_graph(
model=model,
input_pipeline=input_pipeline_infer,
batch_size=FLAGS.batch_size)
saver = tf.train.Saver()
checkpoint_path = FLAGS.checkpoint_path
if not checkpoint_path:
checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir)
def session_init_op(_scaffold, sess):
saver.restore(sess, checkpoint_path)
tf.logging.info("Restored model from %s", checkpoint_path)
scaffold = tf.train.Scaffold(init_fn=session_init_op)
session_creator = tf.train.ChiefSessionCreator(scaffold=scaffold)
with tf.train.MonitoredSession(
session_creator=session_creator,
hooks=hooks) as sess:
# Run until the inputs are exhausted
while not sess.should_stop():
sess.run([])
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| 4,428 | 33.069231 | 77 | py |
seq2seq | seq2seq-master/bin/train.py | #! /usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main script to run training and evaluation of models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import tempfile
import yaml
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow import gfile
from seq2seq import models
from seq2seq.contrib.experiment import Experiment as PatchedExperiment
from seq2seq.configurable import _maybe_load_yaml, _create_from_dict
from seq2seq.configurable import _deep_merge_dict
from seq2seq.data import input_pipeline
from seq2seq.metrics import metric_specs
from seq2seq.training import hooks
from seq2seq.training import utils as training_utils
tf.flags.DEFINE_string("config_paths", "",
"""Path to a YAML configuration files defining FLAG
values. Multiple files can be separated by commas.
Files are merged recursively. Setting a key in these
files is equivalent to setting the FLAG value with
the same name.""")
tf.flags.DEFINE_string("hooks", "[]",
"""YAML configuration string for the
training hooks to use.""")
tf.flags.DEFINE_string("metrics", "[]",
"""YAML configuration string for the
training metrics to use.""")
tf.flags.DEFINE_string("model", "",
"""Name of the model class.
Can be either a fully-qualified name, or the name
of a class defined in `seq2seq.models`.""")
tf.flags.DEFINE_string("model_params", "{}",
"""YAML configuration string for the model
parameters.""")
tf.flags.DEFINE_string("input_pipeline_train", "{}",
"""YAML configuration string for the training
data input pipeline.""")
tf.flags.DEFINE_string("input_pipeline_dev", "{}",
"""YAML configuration string for the development
data input pipeline.""")
tf.flags.DEFINE_string("buckets", None,
"""Buckets input sequences according to these length.
A comma-separated list of sequence length buckets, e.g.
"10,20,30" would result in 4 buckets:
<10, 10-20, 20-30, >30. None disabled bucketing. """)
tf.flags.DEFINE_integer("batch_size", 16,
"""Batch size used for training and evaluation.""")
tf.flags.DEFINE_string("output_dir", None,
"""The directory to write model checkpoints and summaries
to. If None, a local temporary directory is created.""")
# Training parameters
tf.flags.DEFINE_string("schedule", "continuous_train_and_eval",
"""Estimator function to call, defaults to
continuous_train_and_eval for local run""")
tf.flags.DEFINE_integer("train_steps", None,
"""Maximum number of training steps to run.
If None, train forever.""")
tf.flags.DEFINE_integer("eval_every_n_steps", 1000,
"Run evaluation on validation data every N steps.")
# RunConfig Flags
tf.flags.DEFINE_integer("tf_random_seed", None,
"""Random seed for TensorFlow initializers. Setting
this value allows consistency between reruns.""")
tf.flags.DEFINE_integer("save_checkpoints_secs", None,
"""Save checkpoints every this many seconds.
Can not be specified with save_checkpoints_steps.""")
tf.flags.DEFINE_integer("save_checkpoints_steps", None,
"""Save checkpoints every this many steps.
Can not be specified with save_checkpoints_secs.""")
tf.flags.DEFINE_integer("keep_checkpoint_max", 5,
"""Maximum number of recent checkpoint files to keep.
As new files are created, older files are deleted.
If None or 0, all checkpoint files are kept.""")
tf.flags.DEFINE_integer("keep_checkpoint_every_n_hours", 4,
"""In addition to keeping the most recent checkpoint
files, keep one checkpoint file for every N hours of
training.""")
tf.flags.DEFINE_float("gpu_memory_fraction", 1.0,
"""Fraction of GPU memory used by the process on
each GPU uniformly on the same machine.""")
tf.flags.DEFINE_boolean("gpu_allow_growth", False,
"""Allow GPU memory allocation to grow
dynamically.""")
tf.flags.DEFINE_boolean("log_device_placement", False,
"""Log the op placement to devices""")
FLAGS = tf.flags.FLAGS
def create_experiment(output_dir):
"""
Creates a new Experiment instance.
Args:
output_dir: Output directory for model checkpoints and summaries.
"""
config = run_config.RunConfig(
tf_random_seed=FLAGS.tf_random_seed,
save_checkpoints_secs=FLAGS.save_checkpoints_secs,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
keep_checkpoint_max=FLAGS.keep_checkpoint_max,
keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours,
gpu_memory_fraction=FLAGS.gpu_memory_fraction)
config.tf_config.gpu_options.allow_growth = FLAGS.gpu_allow_growth
config.tf_config.log_device_placement = FLAGS.log_device_placement
train_options = training_utils.TrainOptions(
model_class=FLAGS.model,
model_params=FLAGS.model_params)
# On the main worker, save training options
if config.is_chief:
gfile.MakeDirs(output_dir)
train_options.dump(output_dir)
bucket_boundaries = None
if FLAGS.buckets:
bucket_boundaries = list(map(int, FLAGS.buckets.split(",")))
# Training data input pipeline
train_input_pipeline = input_pipeline.make_input_pipeline_from_def(
def_dict=FLAGS.input_pipeline_train,
mode=tf.contrib.learn.ModeKeys.TRAIN)
# Create training input function
train_input_fn = training_utils.create_input_fn(
pipeline=train_input_pipeline,
batch_size=FLAGS.batch_size,
bucket_boundaries=bucket_boundaries,
scope="train_input_fn")
# Development data input pipeline
dev_input_pipeline = input_pipeline.make_input_pipeline_from_def(
def_dict=FLAGS.input_pipeline_dev,
mode=tf.contrib.learn.ModeKeys.EVAL,
shuffle=False, num_epochs=1)
# Create eval input function
eval_input_fn = training_utils.create_input_fn(
pipeline=dev_input_pipeline,
batch_size=FLAGS.batch_size,
allow_smaller_final_batch=True,
scope="dev_input_fn")
def model_fn(features, labels, params, mode):
"""Builds the model graph"""
model = _create_from_dict({
"class": train_options.model_class,
"params": train_options.model_params
}, models, mode=mode)
return model(features, labels, params)
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=output_dir,
config=config,
params=FLAGS.model_params)
# Create hooks
train_hooks = []
for dict_ in FLAGS.hooks:
hook = _create_from_dict(
dict_, hooks,
model_dir=estimator.model_dir,
run_config=config)
train_hooks.append(hook)
# Create metrics
eval_metrics = {}
for dict_ in FLAGS.metrics:
metric = _create_from_dict(dict_, metric_specs)
eval_metrics[metric.name] = metric
experiment = PatchedExperiment(
estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
min_eval_frequency=FLAGS.eval_every_n_steps,
train_steps=FLAGS.train_steps,
eval_steps=None,
eval_metrics=eval_metrics,
train_monitors=train_hooks)
return experiment
def main(_argv):
"""The entrypoint for the script"""
# Parse YAML FLAGS
FLAGS.hooks = _maybe_load_yaml(FLAGS.hooks)
FLAGS.metrics = _maybe_load_yaml(FLAGS.metrics)
FLAGS.model_params = _maybe_load_yaml(FLAGS.model_params)
FLAGS.input_pipeline_train = _maybe_load_yaml(FLAGS.input_pipeline_train)
FLAGS.input_pipeline_dev = _maybe_load_yaml(FLAGS.input_pipeline_dev)
# Load flags from config file
final_config = {}
if FLAGS.config_paths:
for config_path in FLAGS.config_paths.split(","):
config_path = config_path.strip()
if not config_path:
continue
config_path = os.path.abspath(config_path)
tf.logging.info("Loading config from %s", config_path)
with gfile.GFile(config_path.strip()) as config_file:
config_flags = yaml.load(config_file)
final_config = _deep_merge_dict(final_config, config_flags)
tf.logging.info("Final Config:\n%s", yaml.dump(final_config))
# Merge flags with config values
for flag_key, flag_value in final_config.items():
if hasattr(FLAGS, flag_key) and isinstance(getattr(FLAGS, flag_key), dict):
merged_value = _deep_merge_dict(flag_value, getattr(FLAGS, flag_key))
setattr(FLAGS, flag_key, merged_value)
elif hasattr(FLAGS, flag_key):
setattr(FLAGS, flag_key, flag_value)
else:
tf.logging.warning("Ignoring config flag: %s", flag_key)
if FLAGS.save_checkpoints_secs is None \
and FLAGS.save_checkpoints_steps is None:
FLAGS.save_checkpoints_secs = 600
tf.logging.info("Setting save_checkpoints_secs to %d",
FLAGS.save_checkpoints_secs)
if not FLAGS.output_dir:
FLAGS.output_dir = tempfile.mkdtemp()
if not FLAGS.input_pipeline_train:
raise ValueError("You must specify input_pipeline_train")
if not FLAGS.input_pipeline_dev:
raise ValueError("You must specify input_pipeline_dev")
learn_runner.run(
experiment_fn=create_experiment,
output_dir=FLAGS.output_dir,
schedule=FLAGS.schedule)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| 10,770 | 37.744604 | 80 | py |
seq2seq | seq2seq-master/bin/tools/generate_beam_viz.py | #! /usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Generate beam search visualization.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
import json
import shutil
from string import Template
import numpy as np
import networkx as nx
from networkx.readwrite import json_graph
PARSER = argparse.ArgumentParser(
description="Generate beam search visualizations")
PARSER.add_argument(
"-d", "--data", type=str, required=True,
help="path to the beam search data file")
PARSER.add_argument(
"-o", "--output_dir", type=str, required=True,
help="path to the output directory")
PARSER.add_argument(
"-v", "--vocab", type=str, required=False,
help="path to the vocabulary file")
ARGS = PARSER.parse_args()
HTML_TEMPLATE = Template("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Beam Search</title>
<link rel="stylesheet" type="text/css" href="tree.css">
<script src="http://d3js.org/d3.v3.min.js"></script>
</head>
<body>
<script>
var treeData = $DATA
</script>
<script src="tree.js"></script>
</body>
</html>""")
def _add_graph_level(graph, level, parent_ids, names, scores):
"""Adds a levelto the passed graph"""
for i, parent_id in enumerate(parent_ids):
new_node = (level, i)
parent_node = (level - 1, parent_id)
graph.add_node(new_node)
graph.node[new_node]["name"] = names[i]
graph.node[new_node]["score"] = str(scores[i])
graph.node[new_node]["size"] = 100
# Add an edge to the parent
graph.add_edge(parent_node, new_node)
def create_graph(predicted_ids, parent_ids, scores, vocab=None):
def get_node_name(pred):
return vocab[pred] if vocab else str(pred)
seq_length = predicted_ids.shape[0]
graph = nx.DiGraph()
for level in range(seq_length):
names = [get_node_name(pred) for pred in predicted_ids[level]]
_add_graph_level(graph, level + 1, parent_ids[level], names, scores[level])
graph.node[(0, 0)]["name"] = "START"
return graph
def main():
beam_data = np.load(ARGS.data)
# Optionally load vocabulary data
vocab = None
if ARGS.vocab:
with open(ARGS.vocab) as file:
vocab = file.readlines()
vocab = [_.strip() for _ in vocab]
vocab += ["UNK", "SEQUENCE_START", "SEQUENCE_END"]
if not os.path.exists(ARGS.output_dir):
os.makedirs(ARGS.output_dir)
# Copy required files
shutil.copy2("./bin/tools/beam_search_viz/tree.css", ARGS.output_dir)
shutil.copy2("./bin/tools/beam_search_viz/tree.js", ARGS.output_dir)
for idx in range(len(beam_data["predicted_ids"])):
predicted_ids = beam_data["predicted_ids"][idx]
parent_ids = beam_data["beam_parent_ids"][idx]
scores = beam_data["scores"][idx]
graph = create_graph(
predicted_ids=predicted_ids,
parent_ids=parent_ids,
scores=scores,
vocab=vocab)
json_str = json.dumps(
json_graph.tree_data(graph, (0, 0)),
ensure_ascii=False)
html_str = HTML_TEMPLATE.substitute(DATA=json_str)
output_path = os.path.join(ARGS.output_dir, "{:06d}.html".format(idx))
with open(output_path, "w") as file:
file.write(html_str)
print(output_path)
if __name__ == "__main__":
main() | 3,893 | 28.5 | 79 | py |
Subsets and Splits