text
stringlengths
0
93.6k
embedding_path=embedding_path,
use_character=True)
num_labels = label_alphabet.size() - 1
logger.info("constructing network...")
# create variables
target_var = T.imatrix(name='targets')
mask_var = T.matrix(name='masks', dtype=theano.config.floatX)
if fine_tune:
input_var = T.imatrix(name='inputs')
num_data, max_length = X_train.shape
alphabet_size, embedd_dim = embedd_table.shape
else:
input_var = T.tensor3(name='inputs', dtype=theano.config.floatX)
num_data, max_length, embedd_dim = X_train.shape
char_input_var = T.itensor3(name='char-inputs')
num_data_char, max_sent_length, max_char_length = C_train.shape
char_alphabet_size, char_embedd_dim = char_embedd_table.shape
assert (max_length == max_sent_length)
assert (num_data == num_data_char)
# construct input and mask layers
layer_incoming1 = construct_char_input_layer()
layer_incoming2 = construct_input_layer()
layer_mask = lasagne.layers.InputLayer(shape=(None, max_length), input_var=mask_var, name='mask')
# construct bi-rnn-cnn
num_units = args.num_units
bi_lstm_cnn_crf = build_BiLSTM_CNN_CRF(layer_incoming1, layer_incoming2, num_units, num_labels, mask=layer_mask,
grad_clipping=grad_clipping, peepholes=peepholes, num_filters=num_filters,
dropout=dropout)
logger.info("Network structure: hidden=%d, filter=%d" % (num_units, num_filters))
# compute loss
num_tokens = mask_var.sum(dtype=theano.config.floatX)
# get outpout of bi-lstm-cnn-crf shape [batch, length, num_labels, num_labels]
energies_train = lasagne.layers.get_output(bi_lstm_cnn_crf)
energies_eval = lasagne.layers.get_output(bi_lstm_cnn_crf, deterministic=True)
loss_train = crf_loss(energies_train, target_var, mask_var).mean()
loss_eval = crf_loss(energies_eval, target_var, mask_var).mean()
# l2 regularization?
if regular == 'l2':
l2_penalty = lasagne.regularization.regularize_network_params(bi_lstm_cnn_crf, lasagne.regularization.l2)
loss_train = loss_train + gamma * l2_penalty
_, corr_train = crf_accuracy(energies_train, target_var)
corr_train = (corr_train * mask_var).sum(dtype=theano.config.floatX)
prediction_eval, corr_eval = crf_accuracy(energies_eval, target_var)
corr_eval = (corr_eval * mask_var).sum(dtype=theano.config.floatX)
# Create update expressions for training.
# hyper parameters to tune: learning rate, momentum, regularization.
batch_size = args.batch_size
learning_rate = 1.0 if update_algo == 'adadelta' else args.learning_rate
decay_rate = args.decay_rate
momentum = 0.9
params = lasagne.layers.get_all_params(bi_lstm_cnn_crf, trainable=True)
updates = utils.create_updates(loss_train, params, update_algo, learning_rate, momentum=momentum)
# Compile a function performing a training step on a mini-batch
train_fn = theano.function([input_var, target_var, mask_var, char_input_var], [loss_train, corr_train, num_tokens],
updates=updates)
# Compile a second function evaluating the loss and accuracy of network
eval_fn = theano.function([input_var, target_var, mask_var, char_input_var],
[loss_eval, corr_eval, num_tokens, prediction_eval])
# Finally, launch the training loop.
logger.info(
"Start training: %s with regularization: %s(%f), dropout: %s, fine tune: %s (#training data: %d, batch size: %d, clip: %.1f, peepholes: %s)..." \
% (
update_algo, regular, (0.0 if regular == 'none' else gamma), dropout, fine_tune, num_data, batch_size,
grad_clipping,
peepholes))
num_batches = num_data / batch_size
num_epochs = 1000
best_loss = 1e+12
best_acc = 0.0
best_epoch_loss = 0
best_epoch_acc = 0
best_loss_test_err = 0.
best_loss_test_corr = 0.
best_acc_test_err = 0.
best_acc_test_corr = 0.
stop_count = 0
lr = learning_rate
patience = args.patience
for epoch in range(1, num_epochs + 1):
print 'Epoch %d (learning rate=%.4f, decay rate=%.4f): ' % (epoch, lr, decay_rate)
train_err = 0.0
train_corr = 0.0
train_total = 0
train_inst = 0
start_time = time.time()
num_back = 0
train_batches = 0