text
stringlengths
0
93.6k
for batch in utils.iterate_minibatches(X_train, Y_train, masks=mask_train, char_inputs=C_train,
batch_size=batch_size, shuffle=True):
inputs, targets, masks, char_inputs = batch
err, corr, num = train_fn(inputs, targets, masks, char_inputs)
train_err += err * inputs.shape[0]
train_corr += corr
train_total += num
train_inst += inputs.shape[0]
train_batches += 1
time_ave = (time.time() - start_time) / train_batches
time_left = (num_batches - train_batches) * time_ave
# update log
sys.stdout.write("\b" * num_back)
log_info = 'train: %d/%d loss: %.4f, acc: %.2f%%, time left (estimated): %.2fs' % (
min(train_batches * batch_size, num_data), num_data,
train_err / train_inst, train_corr * 100 / train_total, time_left)
sys.stdout.write(log_info)
num_back = len(log_info)
# update training log after each epoch
assert train_inst == num_data
sys.stdout.write("\b" * num_back)
print 'train: %d/%d loss: %.4f, acc: %.2f%%, time: %.2fs' % (
min(train_batches * batch_size, num_data), num_data,
train_err / num_data, train_corr * 100 / train_total, time.time() - start_time)
# evaluate performance on dev data
dev_err = 0.0
dev_corr = 0.0
dev_total = 0
dev_inst = 0
for batch in utils.iterate_minibatches(X_dev, Y_dev, masks=mask_dev, char_inputs=C_dev, batch_size=batch_size):
inputs, targets, masks, char_inputs = batch
err, corr, num, predictions = eval_fn(inputs, targets, masks, char_inputs)
dev_err += err * inputs.shape[0]
dev_corr += corr
dev_total += num
dev_inst += inputs.shape[0]
if output_predict:
utils.output_predictions(predictions, targets, masks, 'tmp/dev%d' % epoch, label_alphabet,
is_flattened=False)
print 'dev loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
dev_err / dev_inst, dev_corr, dev_total, dev_corr * 100 / dev_total)
if best_loss < dev_err and best_acc > dev_corr / dev_total:
stop_count += 1
else:
update_loss = False
update_acc = False
stop_count = 0
if best_loss > dev_err:
update_loss = True
best_loss = dev_err
best_epoch_loss = epoch
if best_acc < dev_corr / dev_total:
update_acc = True
best_acc = dev_corr / dev_total
best_epoch_acc = epoch
# evaluate on test data when better performance detected
test_err = 0.0
test_corr = 0.0
test_total = 0
test_inst = 0
for batch in utils.iterate_minibatches(X_test, Y_test, masks=mask_test, char_inputs=C_test,
batch_size=batch_size):
inputs, targets, masks, char_inputs = batch
err, corr, num, predictions = eval_fn(inputs, targets, masks, char_inputs)
test_err += err * inputs.shape[0]
test_corr += corr
test_total += num
test_inst += inputs.shape[0]
if output_predict:
utils.output_predictions(predictions, targets, masks, 'tmp/test%d' % epoch, label_alphabet,
is_flattened=False)
print 'test loss: %.4f, corr: %d, total: %d, acc: %.2f%%' % (
test_err / test_inst, test_corr, test_total, test_corr * 100 / test_total)
if update_loss:
best_loss_test_err = test_err
best_loss_test_corr = test_corr
if update_acc:
best_acc_test_err = test_err
best_acc_test_corr = test_corr
# stop if dev acc decrease 3 time straightly.
if stop_count == patience:
break
# re-compile a function with new learning rate for training
if update_algo != 'adadelta':
lr = learning_rate / (1.0 + epoch * decay_rate)
updates = utils.create_updates(loss_train, params, update_algo, lr, momentum=momentum)
train_fn = theano.function([input_var, target_var, mask_var, char_input_var],
[loss_train, corr_train, num_tokens],
updates=updates)
# print best performance on test data.