text
stringlengths 0
93.6k
|
---|
print( "\tpre-", o.precondition_neg )
|
print( "\teff+", o.effect_pos )
|
print( "\teff-", o.effect_neg )
|
print()
|
if __name__ == '__main__':
|
args = parser.parse_args()
|
for n in args.demonumber:
|
run_demo(n)
|
# <FILESEP>
|
__author__ = 'max'
|
import time
|
import sys
|
import argparse
|
from lasagne_nlp.utils import utils
|
import lasagne_nlp.utils.data_processor as data_processor
|
from lasagne_nlp.utils.objectives import crf_loss, crf_accuracy
|
import lasagne
|
import theano
|
import theano.tensor as T
|
from lasagne_nlp.networks.networks import build_BiLSTM_CNN_CRF
|
import numpy as np
|
def main():
|
parser = argparse.ArgumentParser(description='Tuning with bi-directional LSTM-CNN-CRF')
|
parser.add_argument('--fine_tune', action='store_true', help='Fine tune the word embeddings')
|
parser.add_argument('--embedding', choices=['word2vec', 'glove', 'senna', 'random'], help='Embedding for words',
|
required=True)
|
parser.add_argument('--embedding_dict', default=None, help='path for embedding dict')
|
parser.add_argument('--batch_size', type=int, default=10, help='Number of sentences in each batch')
|
parser.add_argument('--num_units', type=int, default=100, help='Number of hidden units in LSTM')
|
parser.add_argument('--num_filters', type=int, default=20, help='Number of filters in CNN')
|
parser.add_argument('--learning_rate', type=float, default=0.1, help='Learning rate')
|
parser.add_argument('--decay_rate', type=float, default=0.1, help='Decay rate of learning rate')
|
parser.add_argument('--grad_clipping', type=float, default=0, help='Gradient clipping')
|
parser.add_argument('--gamma', type=float, default=1e-6, help='weight for regularization')
|
parser.add_argument('--peepholes', action='store_true', help='Peepholes for LSTM')
|
parser.add_argument('--oov', choices=['random', 'embedding'], help='Embedding for oov word', required=True)
|
parser.add_argument('--update', choices=['sgd', 'momentum', 'nesterov', 'adadelta'], help='update algorithm',
|
default='sgd')
|
parser.add_argument('--regular', choices=['none', 'l2'], help='regularization for training', required=True)
|
parser.add_argument('--dropout', action='store_true', help='Apply dropout layers')
|
parser.add_argument('--patience', type=int, default=5, help='Patience for early stopping')
|
parser.add_argument('--output_prediction', action='store_true', help='Output predictions to temp files')
|
parser.add_argument('--train') # "data/POS-penn/wsj/split1/wsj1.train.original"
|
parser.add_argument('--dev') # "data/POS-penn/wsj/split1/wsj1.dev.original"
|
parser.add_argument('--test') # "data/POS-penn/wsj/split1/wsj1.test.original"
|
args = parser.parse_args()
|
def construct_input_layer():
|
if fine_tune:
|
layer_input = lasagne.layers.InputLayer(shape=(None, max_length), input_var=input_var, name='input')
|
layer_embedding = lasagne.layers.EmbeddingLayer(layer_input, input_size=alphabet_size,
|
output_size=embedd_dim,
|
W=embedd_table, name='embedding')
|
return layer_embedding
|
else:
|
layer_input = lasagne.layers.InputLayer(shape=(None, max_length, embedd_dim), input_var=input_var,
|
name='input')
|
return layer_input
|
def construct_char_input_layer():
|
layer_char_input = lasagne.layers.InputLayer(shape=(None, max_sent_length, max_char_length),
|
input_var=char_input_var, name='char-input')
|
layer_char_input = lasagne.layers.reshape(layer_char_input, (-1, [2]))
|
layer_char_embedding = lasagne.layers.EmbeddingLayer(layer_char_input, input_size=char_alphabet_size,
|
output_size=char_embedd_dim, W=char_embedd_table,
|
name='char_embedding')
|
layer_char_input = lasagne.layers.DimshuffleLayer(layer_char_embedding, pattern=(0, 2, 1))
|
return layer_char_input
|
logger = utils.get_logger("BiLSTM-CNN-CRF")
|
fine_tune = args.fine_tune
|
oov = args.oov
|
regular = args.regular
|
embedding = args.embedding
|
embedding_path = args.embedding_dict
|
train_path = args.train
|
dev_path = args.dev
|
test_path = args.test
|
update_algo = args.update
|
grad_clipping = args.grad_clipping
|
peepholes = args.peepholes
|
num_filters = args.num_filters
|
gamma = args.gamma
|
output_predict = args.output_prediction
|
dropout = args.dropout
|
X_train, Y_train, mask_train, X_dev, Y_dev, mask_dev, X_test, Y_test, mask_test, \
|
embedd_table, label_alphabet, \
|
C_train, C_dev, C_test, char_embedd_table = data_processor.load_dataset_sequence_labeling(train_path, dev_path,
|
test_path, oov=oov,
|
fine_tune=fine_tune,
|
embedding=embedding,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.