desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Initializes a GRU cell.
The Variables of the GRU cell are initialized in a way that exactly matches
the skip-thoughts paper: recurrent weights are initialized from random
orthonormal matrices and non-recurrent weights are initialized from random
uniform matrices.
Args:
num_units: Number of output units.
Returns:
cell: An instance of RNNCell with variable initializers that match the
skip-thoughts paper.'
| def _initialize_gru_cell(self, num_units):
| return gru_cell.LayerNormGRUCell(num_units, w_initializer=self.uniform_initializer, u_initializer=random_orthonormal_initializer, b_initializer=tf.constant_initializer(0.0))
|
'Builds the sentence encoder.
Inputs:
self.encode_emb
self.encode_mask
Outputs:
self.thought_vectors
Raises:
ValueError: if config.bidirectional_encoder is True and config.encoder_dim
is odd.'
| def build_encoder(self):
| with tf.variable_scope('encoder') as scope:
length = tf.to_int32(tf.reduce_sum(self.encode_mask, 1), name='length')
if self.config.bidirectional_encoder:
if (self.config.encoder_dim % 2):
raise ValueError('encoder_dim must be even when using a bidirectional encoder.')
num_units = (self.config.encoder_dim // 2)
cell_fw = self._initialize_gru_cell(num_units)
cell_bw = self._initialize_gru_cell(num_units)
(_, states) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_fw, cell_bw=cell_bw, inputs=self.encode_emb, sequence_length=length, dtype=tf.float32, scope=scope)
thought_vectors = tf.concat(states, 1, name='thought_vectors')
else:
cell = self._initialize_gru_cell(self.config.encoder_dim)
(_, state) = tf.nn.dynamic_rnn(cell=cell, inputs=self.encode_emb, sequence_length=length, dtype=tf.float32, scope=scope)
thought_vectors = tf.identity(state, name='thought_vectors')
self.thought_vectors = thought_vectors
|
'Builds a sentence decoder.
Args:
name: Decoder name.
embeddings: Batch of sentences to decode; a float32 Tensor with shape
[batch_size, padded_length, emb_dim].
targets: Batch of target word ids; an int64 Tensor with shape
[batch_size, padded_length].
mask: A 0/1 Tensor with shape [batch_size, padded_length].
initial_state: Initial state of the GRU. A float32 Tensor with shape
[batch_size, num_gru_cells].
reuse_logits: Whether to reuse the logits weights.'
| def _build_decoder(self, name, embeddings, targets, mask, initial_state, reuse_logits):
| cell = self._initialize_gru_cell(self.config.encoder_dim)
with tf.variable_scope(name) as scope:
decoder_input = tf.pad(embeddings[:, :(-1), :], [[0, 0], [1, 0], [0, 0]], name='input')
length = tf.reduce_sum(mask, 1, name='length')
(decoder_output, _) = tf.nn.dynamic_rnn(cell=cell, inputs=decoder_input, sequence_length=length, initial_state=initial_state, scope=scope)
decoder_output = tf.reshape(decoder_output, [(-1), self.config.encoder_dim])
targets = tf.reshape(targets, [(-1)])
weights = tf.to_float(tf.reshape(mask, [(-1)]))
with tf.variable_scope('logits', reuse=reuse_logits) as scope:
logits = tf.contrib.layers.fully_connected(inputs=decoder_output, num_outputs=self.config.vocab_size, activation_fn=None, weights_initializer=self.uniform_initializer, scope=scope)
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, logits=logits)
batch_loss = tf.reduce_sum((losses * weights))
tf.losses.add_loss(batch_loss)
tf.summary.scalar(('losses/' + name), batch_loss)
self.target_cross_entropy_losses.append(losses)
self.target_cross_entropy_loss_weights.append(weights)
|
'Builds the sentence decoders.
Inputs:
self.decode_pre_emb
self.decode_post_emb
self.decode_pre_ids
self.decode_post_ids
self.decode_pre_mask
self.decode_post_mask
self.thought_vectors
Outputs:
self.target_cross_entropy_losses
self.target_cross_entropy_loss_weights'
| def build_decoders(self):
| if (self.mode != 'encode'):
self._build_decoder('decoder_pre', self.decode_pre_emb, self.decode_pre_ids, self.decode_pre_mask, self.thought_vectors, False)
self._build_decoder('decoder_post', self.decode_post_emb, self.decode_post_ids, self.decode_post_mask, self.thought_vectors, True)
|
'Builds the loss Tensor.
Outputs:
self.total_loss'
| def build_loss(self):
| if (self.mode != 'encode'):
total_loss = tf.losses.get_total_loss()
tf.summary.scalar('losses/total', total_loss)
self.total_loss = total_loss
|
'Builds the global step Tensor.
Outputs:
self.global_step'
| def build_global_step(self):
| self.global_step = tf.contrib.framework.create_global_step()
|
'Creates all ops for training, evaluation or encoding.'
| def build(self):
| self.build_inputs()
self.build_word_embeddings()
self.build_encoder()
self.build_decoders()
self.build_loss()
self.build_global_step()
|
'Counts the number of parameters in the model at top level scope.'
| def _countModelParameters(self):
| counter = {}
for v in tf.global_variables():
name = v.op.name.split('/')[0]
num_params = v.get_shape().num_elements()
if (not num_params):
self.fail(('Could not infer num_elements from Variable %s' % v.op.name))
counter[name] = (counter.get(name, 0) + num_params)
return counter
|
'Verifies the number of parameters in the model.'
| def _checkModelParameters(self):
| param_counts = self._countModelParameters()
expected_param_counts = {'word_embedding': 12400000, 'encoder': 21772800, 'decoder_pre': 21772800, 'decoder_post': 21772800, 'logits': 48020000, 'global_step': 1}
self.assertDictEqual(expected_param_counts, param_counts)
|
'Verifies that the model produces expected outputs.
Args:
expected_shapes: A dict mapping Tensor or Tensor name to expected output
shape.
feed_dict: Values of Tensors to feed into Session.run().'
| def _checkOutputs(self, expected_shapes, feed_dict=None):
| fetches = expected_shapes.keys()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
outputs = sess.run(fetches, feed_dict)
for (index, output) in enumerate(outputs):
tensor = fetches[index]
expected = expected_shapes[tensor]
actual = output.shape
if (expected != actual):
self.fail(('Tensor %s has shape %s (expected %s).' % (tensor, actual, expected)))
|
'Loads a skip-thoughts model.
Args:
model_config: Object containing parameters for building the model.
vocabulary_file: Path to vocabulary file containing a list of newline-
separated words where the word id is the corresponding 0-based index in
the file.
embedding_matrix_file: Path to a serialized numpy array of shape
[vocab_size, embedding_dim].
checkpoint_path: SkipThoughtsModel checkpoint file or a directory
containing a checkpoint file.'
| def load_model(self, model_config, vocabulary_file, embedding_matrix_file, checkpoint_path):
| tf.logging.info('Reading vocabulary from %s', vocabulary_file)
with tf.gfile.GFile(vocabulary_file, mode='r') as f:
lines = list(f.readlines())
reverse_vocab = [line.decode('utf-8').strip() for line in lines]
tf.logging.info('Loaded vocabulary with %d words.', len(reverse_vocab))
tf.logging.info('Loading embedding matrix from %s', embedding_matrix_file)
with open(embedding_matrix_file, 'r') as f:
embedding_matrix = np.load(f)
tf.logging.info('Loaded embedding matrix with shape %s', embedding_matrix.shape)
word_embeddings = collections.OrderedDict(zip(reverse_vocab, embedding_matrix))
g = tf.Graph()
with g.as_default():
encoder = skip_thoughts_encoder.SkipThoughtsEncoder(word_embeddings)
restore_model = encoder.build_graph_from_config(model_config, checkpoint_path)
sess = tf.Session(graph=g)
restore_model(sess)
self.encoders.append(encoder)
self.sessions.append(sess)
|
'Encodes a sequence of sentences as skip-thought vectors.
Args:
data: A list of input strings.
use_norm: If True, normalize output skip-thought vectors to unit L2 norm.
verbose: Whether to log every batch.
batch_size: Batch size for the RNN encoders.
use_eos: If True, append the end-of-sentence word to each input sentence.
Returns:
thought_vectors: A list of numpy arrays corresponding to \'data\'.
Raises:
ValueError: If called before calling load_encoder.'
| def encode(self, data, use_norm=True, verbose=False, batch_size=128, use_eos=False):
| if (not self.encoders):
raise ValueError('Must call load_model at least once before calling encode.')
encoded = []
for (encoder, sess) in zip(self.encoders, self.sessions):
encoded.append(np.array(encoder.encode(sess, data, use_norm=use_norm, verbose=verbose, batch_size=batch_size, use_eos=use_eos)))
return np.concatenate(encoded, axis=1)
|
'Closes the active TensorFlow Sessions.'
| def close(self):
| for sess in self.sessions:
sess.close()
|
'Initializes the encoder.
Args:
embeddings: Dictionary of word to embedding vector (1D numpy array).'
| def __init__(self, embeddings):
| self._sentence_detector = nltk.data.load('tokenizers/punkt/english.pickle')
self._embeddings = embeddings
|
'Creates a function that restores a model from checkpoint.
Args:
checkpoint_path: Checkpoint file or a directory containing a checkpoint
file.
saver: Saver for restoring variables from the checkpoint file.
Returns:
restore_fn: A function such that restore_fn(sess) loads model variables
from the checkpoint file.
Raises:
ValueError: If checkpoint_path does not refer to a checkpoint file or a
directory containing a checkpoint file.'
| def _create_restore_fn(self, checkpoint_path, saver):
| if tf.gfile.IsDirectory(checkpoint_path):
latest_checkpoint = tf.train.latest_checkpoint(checkpoint_path)
if (not latest_checkpoint):
raise ValueError(('No checkpoint file found in: %s' % checkpoint_path))
checkpoint_path = latest_checkpoint
def _restore_fn(sess):
tf.logging.info('Loading model from checkpoint: %s', checkpoint_path)
saver.restore(sess, checkpoint_path)
tf.logging.info('Successfully loaded checkpoint: %s', os.path.basename(checkpoint_path))
return _restore_fn
|
'Builds the inference graph from a configuration object.
Args:
model_config: Object containing configuration for building the model.
checkpoint_path: Checkpoint file or a directory containing a checkpoint
file.
Returns:
restore_fn: A function such that restore_fn(sess) loads model variables
from the checkpoint file.'
| def build_graph_from_config(self, model_config, checkpoint_path):
| tf.logging.info('Building model.')
model = skip_thoughts_model.SkipThoughtsModel(model_config, mode='encode')
model.build()
saver = tf.train.Saver()
return self._create_restore_fn(checkpoint_path, saver)
|
'Builds the inference graph from serialized GraphDef and SaverDef protos.
Args:
graph_def_file: File containing a serialized GraphDef proto.
saver_def_file: File containing a serialized SaverDef proto.
checkpoint_path: Checkpoint file or a directory containing a checkpoint
file.
Returns:
restore_fn: A function such that restore_fn(sess) loads model variables
from the checkpoint file.'
| def build_graph_from_proto(self, graph_def_file, saver_def_file, checkpoint_path):
| tf.logging.info('Loading GraphDef from file: %s', graph_def_file)
graph_def = tf.GraphDef()
with tf.gfile.FastGFile(graph_def_file, 'rb') as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
tf.logging.info('Loading SaverDef from file: %s', saver_def_file)
saver_def = tf.train.SaverDef()
with tf.gfile.FastGFile(saver_def_file, 'rb') as f:
saver_def.ParseFromString(f.read())
saver = tf.train.Saver(saver_def=saver_def)
return self._create_restore_fn(checkpoint_path, saver)
|
'Tokenizes an input string into a list of words.'
| def _tokenize(self, item):
| tokenized = []
for s in self._sentence_detector.tokenize(item):
tokenized.extend(nltk.tokenize.word_tokenize(s))
return tokenized
|
'Returns the embedding of a word.'
| def _word_to_embedding(self, w):
| return self._embeddings.get(w, self._embeddings[special_words.UNK])
|
'Preprocesses text for the encoder.
Args:
data: A list of input strings.
use_eos: Whether to append the end-of-sentence word to each sentence.
Returns:
embeddings: A list of word embedding sequences corresponding to the input
strings.'
| def _preprocess(self, data, use_eos):
| preprocessed_data = []
for item in data:
tokenized = self._tokenize(item)
if use_eos:
tokenized.append(special_words.EOS)
preprocessed_data.append([self._word_to_embedding(w) for w in tokenized])
return preprocessed_data
|
'Encodes a sequence of sentences as skip-thought vectors.
Args:
sess: TensorFlow Session.
data: A list of input strings.
use_norm: Whether to normalize skip-thought vectors to unit L2 norm.
verbose: Whether to log every batch.
batch_size: Batch size for the encoder.
use_eos: Whether to append the end-of-sentence word to each input
sentence.
Returns:
thought_vectors: A list of numpy arrays corresponding to the skip-thought
encodings of sentences in \'data\'.'
| def encode(self, sess, data, use_norm=True, verbose=True, batch_size=128, use_eos=False):
| data = self._preprocess(data, use_eos)
thought_vectors = []
batch_indices = np.arange(0, len(data), batch_size)
for (batch, start_index) in enumerate(batch_indices):
if verbose:
tf.logging.info('Batch %d / %d.', batch, len(batch_indices))
(embeddings, mask) = _batch_and_pad(data[start_index:(start_index + batch_size)])
feed_dict = {'encode_emb:0': embeddings, 'encode_mask:0': mask}
thought_vectors.extend(sess.run('encoder/thought_vectors:0', feed_dict=feed_dict))
if use_norm:
thought_vectors = [(v / np.linalg.norm(v)) for v in thought_vectors]
return thought_vectors
|
'Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question\'s
word ids.
questions_skipped: questions skipped due to unknown words.'
| def read_analogies(self):
| questions = []
questions_skipped = 0
with open(self._options.eval_data, 'rb') as analogy_f:
for line in analogy_f:
if line.startswith(':'):
continue
words = line.strip().lower().split(' ')
ids = [self._word2id.get(w.strip()) for w in words]
if ((None in ids) or (len(ids) != 4)):
questions_skipped += 1
else:
questions.append(np.array(ids))
print('Eval analogy file: ', self._options.eval_data)
print('Questions: ', len(questions))
print('Skipped: ', questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
|
'Build the graph for the forward pass.'
| def forward(self, examples, labels):
| opts = self._options
init_width = (0.5 / opts.emb_dim)
emb = tf.Variable(tf.random_uniform([opts.vocab_size, opts.emb_dim], (- init_width), init_width), name='emb')
self._emb = emb
sm_w_t = tf.Variable(tf.zeros([opts.vocab_size, opts.emb_dim]), name='sm_w_t')
sm_b = tf.Variable(tf.zeros([opts.vocab_size]), name='sm_b')
self.global_step = tf.Variable(0, name='global_step')
labels_matrix = tf.reshape(tf.cast(labels, dtype=tf.int64), [opts.batch_size, 1])
(sampled_ids, _, _) = tf.nn.fixed_unigram_candidate_sampler(true_classes=labels_matrix, num_true=1, num_sampled=opts.num_samples, unique=True, range_max=opts.vocab_size, distortion=0.75, unigrams=opts.vocab_counts.tolist())
example_emb = tf.nn.embedding_lookup(emb, examples)
true_w = tf.nn.embedding_lookup(sm_w_t, labels)
true_b = tf.nn.embedding_lookup(sm_b, labels)
sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids)
sampled_b = tf.nn.embedding_lookup(sm_b, sampled_ids)
true_logits = (tf.reduce_sum(tf.multiply(example_emb, true_w), 1) + true_b)
sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples])
sampled_logits = (tf.matmul(example_emb, sampled_w, transpose_b=True) + sampled_b_vec)
return (true_logits, sampled_logits)
|
'Build the graph for the NCE loss.'
| def nce_loss(self, true_logits, sampled_logits):
| opts = self._options
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(true_logits), logits=true_logits)
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(sampled_logits), logits=sampled_logits)
nce_loss_tensor = ((tf.reduce_sum(true_xent) + tf.reduce_sum(sampled_xent)) / opts.batch_size)
return nce_loss_tensor
|
'Build the graph to optimize the loss function.'
| def optimize(self, loss):
| opts = self._options
words_to_train = float((opts.words_per_epoch * opts.epochs_to_train))
lr = (opts.learning_rate * tf.maximum(0.0001, (1.0 - (tf.cast(self._words, tf.float32) / words_to_train))))
self._lr = lr
optimizer = tf.train.GradientDescentOptimizer(lr)
train = optimizer.minimize(loss, global_step=self.global_step, gate_gradients=optimizer.GATE_NONE)
self._train = train
|
'Build the eval graph.'
| def build_eval_graph(self):
| analogy_a = tf.placeholder(dtype=tf.int32)
analogy_b = tf.placeholder(dtype=tf.int32)
analogy_c = tf.placeholder(dtype=tf.int32)
nemb = tf.nn.l2_normalize(self._emb, 1)
a_emb = tf.gather(nemb, analogy_a)
b_emb = tf.gather(nemb, analogy_b)
c_emb = tf.gather(nemb, analogy_c)
target = (c_emb + (b_emb - a_emb))
dist = tf.matmul(target, nemb, transpose_b=True)
(_, pred_idx) = tf.nn.top_k(dist, 4)
nearby_word = tf.placeholder(dtype=tf.int32)
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
(nearby_val, nearby_idx) = tf.nn.top_k(nearby_dist, min(1000, self._options.vocab_size))
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
|
'Build the graph for the full model.'
| def build_graph(self):
| opts = self._options
(words, counts, words_per_epoch, self._epoch, self._words, examples, labels) = word2vec.skipgram_word2vec(filename=opts.train_data, batch_size=opts.batch_size, window_size=opts.window_size, min_count=opts.min_count, subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts, opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print('Data file: ', opts.train_data)
print('Vocab size: ', (opts.vocab_size - 1), ' + UNK')
print('Words per epoch: ', opts.words_per_epoch)
self._examples = examples
self._labels = labels
self._id2word = opts.vocab_words
for (i, w) in enumerate(self._id2word):
self._word2id[w] = i
(true_logits, sampled_logits) = self.forward(examples, labels)
loss = self.nce_loss(true_logits, sampled_logits)
tf.summary.scalar('NCE loss', loss)
self._loss = loss
self.optimize(loss)
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
|
'Save the vocabulary to a file so the model can be reloaded.'
| def save_vocab(self):
| opts = self._options
with open(os.path.join(opts.save_path, 'vocab.txt'), 'w') as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode('utf-8')
f.write(('%s %d\n' % (vocab_word, opts.vocab_counts[i])))
|
'Train the model.'
| def train(self):
| opts = self._options
(initial_epoch, initial_words) = self._session.run([self._epoch, self._words])
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(opts.save_path, self._session.graph)
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
(last_words, last_time, last_summary_time) = (initial_words, time.time(), 0)
last_checkpoint_time = 0
while True:
time.sleep(opts.statistics_interval)
(epoch, step, loss, words, lr) = self._session.run([self._epoch, self.global_step, self._loss, self._words, self._lr])
now = time.time()
(last_words, last_time, rate) = (words, now, ((words - last_words) / (now - last_time)))
print(('Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r' % (epoch, step, lr, loss, rate)), end='')
sys.stdout.flush()
if ((now - last_summary_time) > opts.summary_interval):
summary_str = self._session.run(summary_op)
summary_writer.add_summary(summary_str, step)
last_summary_time = now
if ((now - last_checkpoint_time) > opts.checkpoint_interval):
self.saver.save(self._session, os.path.join(opts.save_path, 'model.ckpt'), global_step=step.astype(int))
last_checkpoint_time = now
if (epoch != initial_epoch):
break
for t in workers:
t.join()
return epoch
|
'Predict the top 4 answers for analogy questions.'
| def _predict(self, analogy):
| (idx,) = self._session.run([self._analogy_pred_idx], {self._analogy_a: analogy[:, 0], self._analogy_b: analogy[:, 1], self._analogy_c: analogy[:, 2]})
return idx
|
'Evaluate analogy questions and reports accuracy.'
| def eval(self):
| correct = 0
try:
total = self._analogy_questions.shape[0]
except AttributeError as e:
raise AttributeError('Need to read analogy questions.')
start = 0
while (start < total):
limit = (start + 2500)
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if (idx[(question, j)] == sub[(question, 3)]):
correct += 1
break
elif (idx[(question, j)] in sub[question, :3]):
continue
else:
break
print()
print(('Eval %4d/%d accuracy = %4.1f%%' % (correct, total, ((correct * 100.0) / total))))
|
'Predict word w3 as in w0:w1 vs w2:w3.'
| def analogy(self, w0, w1, w2):
| wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if (c not in [w0, w1, w2]):
print(c)
break
print('unknown')
|
'Prints out nearby words given a list of words.'
| def nearby(self, words, num=20):
| ids = np.array([self._word2id.get(x, 0) for x in words])
(vals, idx) = self._session.run([self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print(('\n%s\n=====================================' % words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print(('%-20s %6.4f' % (self._id2word[neighbor], distance)))
|
'Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question\'s
word ids.
questions_skipped: questions skipped due to unknown words.'
| def read_analogies(self):
| questions = []
questions_skipped = 0
with open(self._options.eval_data, 'rb') as analogy_f:
for line in analogy_f:
if line.startswith(':'):
continue
words = line.strip().lower().split(' ')
ids = [self._word2id.get(w.strip()) for w in words]
if ((None in ids) or (len(ids) != 4)):
questions_skipped += 1
else:
questions.append(np.array(ids))
print('Eval analogy file: ', self._options.eval_data)
print('Questions: ', len(questions))
print('Skipped: ', questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
|
'Build the model graph.'
| def build_graph(self):
| opts = self._options
(words, counts, words_per_epoch, current_epoch, total_words_processed, examples, labels) = word2vec.skipgram_word2vec(filename=opts.train_data, batch_size=opts.batch_size, window_size=opts.window_size, min_count=opts.min_count, subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts, opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print('Data file: ', opts.train_data)
print('Vocab size: ', (opts.vocab_size - 1), ' + UNK')
print('Words per epoch: ', opts.words_per_epoch)
self._id2word = opts.vocab_words
for (i, w) in enumerate(self._id2word):
self._word2id[w] = i
w_in = tf.Variable(tf.random_uniform([opts.vocab_size, opts.emb_dim], ((-0.5) / opts.emb_dim), (0.5 / opts.emb_dim)), name='w_in')
w_out = tf.Variable(tf.zeros([opts.vocab_size, opts.emb_dim]), name='w_out')
global_step = tf.Variable(0, name='global_step')
words_to_train = float((opts.words_per_epoch * opts.epochs_to_train))
lr = (opts.learning_rate * tf.maximum(0.0001, (1.0 - (tf.cast(total_words_processed, tf.float32) / words_to_train))))
inc = global_step.assign_add(1)
with tf.control_dependencies([inc]):
train = word2vec.neg_train_word2vec(w_in, w_out, examples, labels, lr, vocab_count=opts.vocab_counts.tolist(), num_negative_samples=opts.num_samples)
self._w_in = w_in
self._examples = examples
self._labels = labels
self._lr = lr
self._train = train
self.global_step = global_step
self._epoch = current_epoch
self._words = total_words_processed
|
'Save the vocabulary to a file so the model can be reloaded.'
| def save_vocab(self):
| opts = self._options
with open(os.path.join(opts.save_path, 'vocab.txt'), 'w') as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode('utf-8')
f.write(('%s %d\n' % (vocab_word, opts.vocab_counts[i])))
|
'Build the evaluation graph.'
| def build_eval_graph(self):
| opts = self._options
analogy_a = tf.placeholder(dtype=tf.int32)
analogy_b = tf.placeholder(dtype=tf.int32)
analogy_c = tf.placeholder(dtype=tf.int32)
nemb = tf.nn.l2_normalize(self._w_in, 1)
a_emb = tf.gather(nemb, analogy_a)
b_emb = tf.gather(nemb, analogy_b)
c_emb = tf.gather(nemb, analogy_c)
target = (c_emb + (b_emb - a_emb))
dist = tf.matmul(target, nemb, transpose_b=True)
(_, pred_idx) = tf.nn.top_k(dist, 4)
nearby_word = tf.placeholder(dtype=tf.int32)
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
(nearby_val, nearby_idx) = tf.nn.top_k(nearby_dist, min(1000, opts.vocab_size))
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
|
'Train the model.'
| def train(self):
| opts = self._options
(initial_epoch, initial_words) = self._session.run([self._epoch, self._words])
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
(last_words, last_time) = (initial_words, time.time())
while True:
time.sleep(5)
(epoch, step, words, lr) = self._session.run([self._epoch, self.global_step, self._words, self._lr])
now = time.time()
(last_words, last_time, rate) = (words, now, ((words - last_words) / (now - last_time)))
print(('Epoch %4d Step %8d: lr = %5.3f words/sec = %8.0f\r' % (epoch, step, lr, rate)), end='')
sys.stdout.flush()
if (epoch != initial_epoch):
break
for t in workers:
t.join()
|
'Predict the top 4 answers for analogy questions.'
| def _predict(self, analogy):
| (idx,) = self._session.run([self._analogy_pred_idx], {self._analogy_a: analogy[:, 0], self._analogy_b: analogy[:, 1], self._analogy_c: analogy[:, 2]})
return idx
|
'Evaluate analogy questions and reports accuracy.'
| def eval(self):
| correct = 0
try:
total = self._analogy_questions.shape[0]
except AttributeError as e:
raise AttributeError('Need to read analogy questions.')
start = 0
while (start < total):
limit = (start + 2500)
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if (idx[(question, j)] == sub[(question, 3)]):
correct += 1
break
elif (idx[(question, j)] in sub[question, :3]):
continue
else:
break
print()
print(('Eval %4d/%d accuracy = %4.1f%%' % (correct, total, ((correct * 100.0) / total))))
|
'Predict word w3 as in w0:w1 vs w2:w3.'
| def analogy(self, w0, w1, w2):
| wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if (c not in [w0, w1, w2]):
print(c)
break
print('unknown')
|
'Prints out nearby words given a list of words.'
| def nearby(self, words, num=20):
| ids = np.array([self._word2id.get(x, 0) for x in words])
(vals, idx) = self._session.run([self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print(('\n%s\n=====================================' % words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print(('%-20s %6.4f' % (self._id2word[neighbor], distance)))
|
'Create the model.
Args:
source_vocab_size: size of the source vocabulary.
target_vocab_size: size of the target vocabulary.
buckets: a list of pairs (I, O), where I specifies maximum input length
that will be processed in that bucket, and O specifies maximum output
length. Training instances that have inputs longer than I or outputs
longer than O will be pushed to the next bucket and padded accordingly.
We assume that the list is sorted, e.g., [(2, 4), (8, 16)].
size: number of units in each layer of the model.
num_layers: number of layers in the model.
max_gradient_norm: gradients will be clipped to maximally this norm.
batch_size: the size of the batches used during training;
the model construction is independent of batch_size, so it can be
changed after initialization if this is convenient, e.g., for decoding.
learning_rate: learning rate to start with.
learning_rate_decay_factor: decay learning rate by this much when needed.
use_lstm: if true, we use LSTM cells instead of GRU cells.
num_samples: number of samples for sampled softmax.
forward_only: if set, we do not construct the backward pass in the model.
dtype: the data type to use to store internal variables.'
| def __init__(self, source_vocab_size, target_vocab_size, buckets, size, num_layers, max_gradient_norm, batch_size, learning_rate, learning_rate_decay_factor, use_lstm=False, num_samples=512, forward_only=False, dtype=tf.float32):
| self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.buckets = buckets
self.batch_size = batch_size
self.learning_rate = tf.Variable(float(learning_rate), trainable=False, dtype=dtype)
self.learning_rate_decay_op = self.learning_rate.assign((self.learning_rate * learning_rate_decay_factor))
self.global_step = tf.Variable(0, trainable=False)
output_projection = None
softmax_loss_function = None
if ((num_samples > 0) and (num_samples < self.target_vocab_size)):
w_t = tf.get_variable('proj_w', [self.target_vocab_size, size], dtype=dtype)
w = tf.transpose(w_t)
b = tf.get_variable('proj_b', [self.target_vocab_size], dtype=dtype)
output_projection = (w, b)
def sampled_loss(labels, logits):
labels = tf.reshape(labels, [(-1), 1])
local_w_t = tf.cast(w_t, tf.float32)
local_b = tf.cast(b, tf.float32)
local_inputs = tf.cast(logits, tf.float32)
return tf.cast(tf.nn.sampled_softmax_loss(weights=local_w_t, biases=local_b, labels=labels, inputs=local_inputs, num_sampled=num_samples, num_classes=self.target_vocab_size), dtype)
softmax_loss_function = sampled_loss
def single_cell():
return tf.contrib.rnn.GRUCell(size)
if use_lstm:
def single_cell():
return tf.contrib.rnn.BasicLSTMCell(size)
cell = single_cell()
if (num_layers > 1):
cell = tf.contrib.rnn.MultiRNNCell([single_cell() for _ in range(num_layers)])
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
return tf.contrib.legacy_seq2seq.embedding_attention_seq2seq(encoder_inputs, decoder_inputs, cell, num_encoder_symbols=source_vocab_size, num_decoder_symbols=target_vocab_size, embedding_size=size, output_projection=output_projection, feed_previous=do_decode, dtype=dtype)
self.encoder_inputs = []
self.decoder_inputs = []
self.target_weights = []
for i in xrange(buckets[(-1)][0]):
self.encoder_inputs.append(tf.placeholder(tf.int32, shape=[None], name='encoder{0}'.format(i)))
for i in xrange((buckets[(-1)][1] + 1)):
self.decoder_inputs.append(tf.placeholder(tf.int32, shape=[None], name='decoder{0}'.format(i)))
self.target_weights.append(tf.placeholder(dtype, shape=[None], name='weight{0}'.format(i)))
targets = [self.decoder_inputs[(i + 1)] for i in xrange((len(self.decoder_inputs) - 1))]
if forward_only:
(self.outputs, self.losses) = tf.contrib.legacy_seq2seq.model_with_buckets(self.encoder_inputs, self.decoder_inputs, targets, self.target_weights, buckets, (lambda x, y: seq2seq_f(x, y, True)), softmax_loss_function=softmax_loss_function)
if (output_projection is not None):
for b in xrange(len(buckets)):
self.outputs[b] = [(tf.matmul(output, output_projection[0]) + output_projection[1]) for output in self.outputs[b]]
else:
(self.outputs, self.losses) = tf.contrib.legacy_seq2seq.model_with_buckets(self.encoder_inputs, self.decoder_inputs, targets, self.target_weights, buckets, (lambda x, y: seq2seq_f(x, y, False)), softmax_loss_function=softmax_loss_function)
params = tf.trainable_variables()
if (not forward_only):
self.gradient_norms = []
self.updates = []
opt = tf.train.GradientDescentOptimizer(self.learning_rate)
for b in xrange(len(buckets)):
gradients = tf.gradients(self.losses[b], params)
(clipped_gradients, norm) = tf.clip_by_global_norm(gradients, max_gradient_norm)
self.gradient_norms.append(norm)
self.updates.append(opt.apply_gradients(zip(clipped_gradients, params), global_step=self.global_step))
self.saver = tf.train.Saver(tf.global_variables())
|
'Run a step of the model feeding the given inputs.
Args:
session: tensorflow session to use.
encoder_inputs: list of numpy int vectors to feed as encoder inputs.
decoder_inputs: list of numpy int vectors to feed as decoder inputs.
target_weights: list of numpy float vectors to feed as target weights.
bucket_id: which bucket of the model to use.
forward_only: whether to do the backward step or only forward.
Returns:
A triple consisting of gradient norm (or None if we did not do backward),
average perplexity, and the outputs.
Raises:
ValueError: if length of encoder_inputs, decoder_inputs, or
target_weights disagrees with bucket size for the specified bucket_id.'
| def step(self, session, encoder_inputs, decoder_inputs, target_weights, bucket_id, forward_only):
| (encoder_size, decoder_size) = self.buckets[bucket_id]
if (len(encoder_inputs) != encoder_size):
raise ValueError(('Encoder length must be equal to the one in bucket, %d != %d.' % (len(encoder_inputs), encoder_size)))
if (len(decoder_inputs) != decoder_size):
raise ValueError(('Decoder length must be equal to the one in bucket, %d != %d.' % (len(decoder_inputs), decoder_size)))
if (len(target_weights) != decoder_size):
raise ValueError(('Weights length must be equal to the one in bucket, %d != %d.' % (len(target_weights), decoder_size)))
input_feed = {}
for l in xrange(encoder_size):
input_feed[self.encoder_inputs[l].name] = encoder_inputs[l]
for l in xrange(decoder_size):
input_feed[self.decoder_inputs[l].name] = decoder_inputs[l]
input_feed[self.target_weights[l].name] = target_weights[l]
last_target = self.decoder_inputs[decoder_size].name
input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32)
if (not forward_only):
output_feed = [self.updates[bucket_id], self.gradient_norms[bucket_id], self.losses[bucket_id]]
else:
output_feed = [self.losses[bucket_id]]
for l in xrange(decoder_size):
output_feed.append(self.outputs[bucket_id][l])
outputs = session.run(output_feed, input_feed)
if (not forward_only):
return (outputs[1], outputs[2], None)
else:
return (None, outputs[0], outputs[1:])
|
'Get a random batch of data from the specified bucket, prepare for step.
To feed data in step(..) it must be a list of batch-major vectors, while
data here contains single length-major cases. So the main logic of this
function is to re-index data cases to be in the proper format for feeding.
Args:
data: a tuple of size len(self.buckets) in which each element contains
lists of pairs of input and output data that we use to create a batch.
bucket_id: integer, which bucket to get the batch for.
Returns:
The triple (encoder_inputs, decoder_inputs, target_weights) for
the constructed batch that has the proper format to call step(...) later.'
| def get_batch(self, data, bucket_id):
| (encoder_size, decoder_size) = self.buckets[bucket_id]
(encoder_inputs, decoder_inputs) = ([], [])
for _ in xrange(self.batch_size):
(encoder_input, decoder_input) = random.choice(data[bucket_id])
encoder_pad = ([data_utils.PAD_ID] * (encoder_size - len(encoder_input)))
encoder_inputs.append(list(reversed((encoder_input + encoder_pad))))
decoder_pad_size = ((decoder_size - len(decoder_input)) - 1)
decoder_inputs.append((([data_utils.GO_ID] + decoder_input) + ([data_utils.PAD_ID] * decoder_pad_size)))
(batch_encoder_inputs, batch_decoder_inputs, batch_weights) = ([], [], [])
for length_idx in xrange(encoder_size):
batch_encoder_inputs.append(np.array([encoder_inputs[batch_idx][length_idx] for batch_idx in xrange(self.batch_size)], dtype=np.int32))
for length_idx in xrange(decoder_size):
batch_decoder_inputs.append(np.array([decoder_inputs[batch_idx][length_idx] for batch_idx in xrange(self.batch_size)], dtype=np.int32))
batch_weight = np.ones(self.batch_size, dtype=np.float32)
for batch_idx in xrange(self.batch_size):
if (length_idx < (decoder_size - 1)):
target = decoder_inputs[batch_idx][(length_idx + 1)]
if ((length_idx == (decoder_size - 1)) or (target == data_utils.PAD_ID)):
batch_weight[batch_idx] = 0.0
batch_weights.append(batch_weight)
return (batch_encoder_inputs, batch_decoder_inputs, batch_weights)
|
'Build the core model within the graph.'
| def forward_pass(self, x, input_data_format='channels_last'):
| if (self._data_format != input_data_format):
if (input_data_format == 'channels_last'):
x = tf.transpose(x, [0, 3, 1, 2])
else:
x = tf.transpose(x, [0, 2, 3, 1])
x = ((x / 128) - 1)
x = self._conv(x, 3, 16, 1)
x = self._batch_norm(x)
x = self._relu(x)
res_func = self._residual_v1
for i in range(3):
with tf.name_scope('stage'):
for j in range(self.n):
if (j == 0):
x = res_func(x, 3, self.filters[i], self.filters[(i + 1)], self.strides[i])
else:
x = res_func(x, 3, self.filters[(i + 1)], self.filters[(i + 1)], 1)
x = self._global_avg_pool(x)
x = self._fully_connected(x, self.num_classes)
return x
|
'Parses a single tf.Example into image and label tensors.'
| def parser(self, serialized_example):
| features = tf.parse_single_example(serialized_example, features={'image': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.int64)})
image = tf.decode_raw(features['image'], tf.uint8)
image.set_shape([((DEPTH * HEIGHT) * WIDTH)])
image = tf.cast(tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]), tf.float32)
label = tf.cast(features['label'], tf.int32)
image = self.preprocess(image)
return (image, label)
|
'Read the images and labels from \'filenames\'.'
| def make_batch(self, batch_size):
| filenames = self.get_filenames()
dataset = tf.contrib.data.TFRecordDataset(filenames).repeat()
dataset = dataset.map(self.parser, num_threads=batch_size, output_buffer_size=(2 * batch_size))
if (self.subset == 'train'):
min_queue_examples = int((Cifar10DataSet.num_examples_per_epoch(self.subset) * 0.4))
dataset = dataset.shuffle(buffer_size=(min_queue_examples + (3 * batch_size)))
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
(image_batch, label_batch) = iterator.get_next()
return (image_batch, label_batch)
|
'Preprocess a single image in [height, width, depth] layout.'
| def preprocess(self, image):
| if ((self.subset == 'train') and self.use_distortion):
image = tf.image.resize_image_with_crop_or_pad(image, 40, 40)
image = tf.random_crop(image, [HEIGHT, WIDTH, DEPTH])
image = tf.image.random_flip_left_right(image)
return image
|
'ResNet constructor.
Args:
is_training: if build training or inference model.
data_format: the data_format used during computation.
one of \'channels_first\' or \'channels_last\'.'
| def __init__(self, is_training, data_format):
| self._is_training = is_training
assert (data_format in ('channels_first', 'channels_last'))
self._data_format = data_format
|
'Residual unit with 2 sub layers, using Plan A for shortcut connection.'
| def _residual_v1(self, x, kernel_size, in_filter, out_filter, stride, activate_before_residual=False):
| del activate_before_residual
with tf.name_scope('residual_v1') as name_scope:
orig_x = x
x = self._conv(x, kernel_size, out_filter, stride)
x = self._batch_norm(x)
x = self._relu(x)
x = self._conv(x, kernel_size, out_filter, 1)
x = self._batch_norm(x)
if (in_filter != out_filter):
orig_x = self._avg_pool(orig_x, stride, stride)
pad = ((out_filter - in_filter) // 2)
if (self._data_format == 'channels_first'):
orig_x = tf.pad(orig_x, [[0, 0], [pad, pad], [0, 0], [0, 0]])
else:
orig_x = tf.pad(orig_x, [[0, 0], [0, 0], [0, 0], [pad, pad]])
x = self._relu(tf.add(x, orig_x))
tf.logging.info('image after unit %s: %s', name_scope, x.get_shape())
return x
|
'Residual unit with 2 sub layers with preactivation, plan A shortcut.'
| def _residual_v2(self, x, in_filter, out_filter, stride, activate_before_residual=False):
| with tf.name_scope('residual_v2') as name_scope:
if activate_before_residual:
x = self._batch_norm(x)
x = self._relu(x)
orig_x = x
else:
orig_x = x
x = self._batch_norm(x)
x = self._relu(x)
x = self._conv(x, 3, out_filter, stride)
x = self._batch_norm(x)
x = self._relu(x)
x = self._conv(x, 3, out_filter, [1, 1, 1, 1])
if (in_filter != out_filter):
pad = ((out_filter - in_filter) // 2)
orig_x = self._avg_pool(orig_x, stride, stride)
if (self._data_format == 'channels_first'):
orig_x = tf.pad(orig_x, [[0, 0], [pad, pad], [0, 0], [0, 0]])
else:
orig_x = tf.pad(orig_x, [[0, 0], [0, 0], [0, 0], [pad, pad]])
x = tf.add(x, orig_x)
tf.logging.info('image after unit %s: %s', name_scope, x.get_shape())
return x
|
'Bottleneck residual unit with 3 sub layers, plan B shortcut.'
| def _bottleneck_residual_v2(self, x, in_filter, out_filter, stride, activate_before_residual=False):
| with tf.name_scope('bottle_residual_v2') as name_scope:
if activate_before_residual:
x = self._batch_norm(x)
x = self._relu(x)
orig_x = x
else:
orig_x = x
x = self._batch_norm(x)
x = self._relu(x)
x = self._conv(x, 1, (out_filter // 4), stride, is_atrous=True)
x = self._batch_norm(x)
x = self._relu(x)
x = self._conv(x, 3, (out_filter // 4), 1, is_atrous=True)
x = self._batch_norm(x)
x = self._relu(x)
x = self._conv(x, 1, out_filter, 1, is_atrous=True)
if (in_filter != out_filter):
orig_x = self._conv(orig_x, 1, out_filter, stride, is_atrous=True)
x = tf.add(x, orig_x)
tf.logging.info('image after unit %s: %s', name_scope, x.get_shape())
return x
|
'Convolution.'
| def _conv(self, x, kernel_size, filters, strides, is_atrous=False):
| padding = 'SAME'
if ((not is_atrous) and (strides > 1)):
pad = (kernel_size - 1)
pad_beg = (pad // 2)
pad_end = (pad - pad_beg)
if (self._data_format == 'channels_first'):
x = tf.pad(x, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])
else:
x = tf.pad(x, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])
padding = 'VALID'
return tf.layers.conv2d(inputs=x, kernel_size=kernel_size, filters=filters, strides=strides, padding=padding, use_bias=False, data_format=self._data_format)
|
'Initializer for ExamplesPerSecondHook.
Args:
batch_size: Total batch size used to calculate examples/second from
global time.
every_n_steps: Log stats every n steps.
every_n_secs: Log stats every n seconds.'
| def __init__(self, batch_size, every_n_steps=100, every_n_secs=None):
| if ((every_n_steps is None) == (every_n_secs is None)):
raise ValueError('exactly one of every_n_steps and every_n_secs should be provided.')
self._timer = basic_session_run_hooks.SecondOrStepTimer(every_steps=every_n_steps, every_secs=every_n_secs)
self._step_train_time = 0
self._total_steps = 0
self._batch_size = batch_size
|
'Initializer for GpuParamServerDeviceSetter.
Args:
worker_device: the device to use for computation Ops.
ps_devices: a list of devices to use for Variable Ops. Each variable is
assigned to the least loaded device.'
| def __init__(self, worker_device, ps_devices):
| self.ps_devices = ps_devices
self.worker_device = worker_device
self.ps_sizes = ([0] * len(self.ps_devices))
|
'Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string.'
| def load(self, label_lookup_path, uid_lookup_path):
| if (not tf.gfile.Exists(uid_lookup_path)):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if (not tf.gfile.Exists(label_lookup_path)):
tf.logging.fatal('File does not exist %s', label_lookup_path)
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile('[n\\d]*[ \\S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
node_id_to_uid = {}
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:(-2)]
node_id_to_name = {}
for (key, val) in node_id_to_uid.items():
if (val not in uid_to_human):
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
|
'Basic setup.
Args:
config: Object containing configuration parameters.
mode: "train", "eval" or "inference".
train_inception: Whether the inception submodel variables are trainable.'
| def __init__(self, config, mode, train_inception=False):
| assert (mode in ['train', 'eval', 'inference'])
self.config = config
self.mode = mode
self.train_inception = train_inception
self.reader = tf.TFRecordReader()
self.initializer = tf.random_uniform_initializer(minval=(- self.config.initializer_scale), maxval=self.config.initializer_scale)
self.images = None
self.input_seqs = None
self.target_seqs = None
self.input_mask = None
self.image_embeddings = None
self.seq_embeddings = None
self.total_loss = None
self.target_cross_entropy_losses = None
self.target_cross_entropy_loss_weights = None
self.inception_variables = []
self.init_fn = None
self.global_step = None
|
'Returns true if the model is built for training mode.'
| def is_training(self):
| return (self.mode == 'train')
|
'Decodes and processes an image string.
Args:
encoded_image: A scalar string Tensor; the encoded image.
thread_id: Preprocessing thread id used to select the ordering of color
distortions.
Returns:
A float32 Tensor of shape [height, width, 3]; the processed image.'
| def process_image(self, encoded_image, thread_id=0):
| return image_processing.process_image(encoded_image, is_training=self.is_training(), height=self.config.image_height, width=self.config.image_width, thread_id=thread_id, image_format=self.config.image_format)
|
'Input prefetching, preprocessing and batching.
Outputs:
self.images
self.input_seqs
self.target_seqs (training and eval only)
self.input_mask (training and eval only)'
| def build_inputs(self):
| if (self.mode == 'inference'):
image_feed = tf.placeholder(dtype=tf.string, shape=[], name='image_feed')
input_feed = tf.placeholder(dtype=tf.int64, shape=[None], name='input_feed')
images = tf.expand_dims(self.process_image(image_feed), 0)
input_seqs = tf.expand_dims(input_feed, 1)
target_seqs = None
input_mask = None
else:
input_queue = input_ops.prefetch_input_data(self.reader, self.config.input_file_pattern, is_training=self.is_training(), batch_size=self.config.batch_size, values_per_shard=self.config.values_per_input_shard, input_queue_capacity_factor=self.config.input_queue_capacity_factor, num_reader_threads=self.config.num_input_reader_threads)
assert ((self.config.num_preprocess_threads % 2) == 0)
images_and_captions = []
for thread_id in range(self.config.num_preprocess_threads):
serialized_sequence_example = input_queue.dequeue()
(encoded_image, caption) = input_ops.parse_sequence_example(serialized_sequence_example, image_feature=self.config.image_feature_name, caption_feature=self.config.caption_feature_name)
image = self.process_image(encoded_image, thread_id=thread_id)
images_and_captions.append([image, caption])
queue_capacity = ((2 * self.config.num_preprocess_threads) * self.config.batch_size)
(images, input_seqs, target_seqs, input_mask) = input_ops.batch_with_dynamic_pad(images_and_captions, batch_size=self.config.batch_size, queue_capacity=queue_capacity)
self.images = images
self.input_seqs = input_seqs
self.target_seqs = target_seqs
self.input_mask = input_mask
|
'Builds the image model subgraph and generates image embeddings.
Inputs:
self.images
Outputs:
self.image_embeddings'
| def build_image_embeddings(self):
| inception_output = image_embedding.inception_v3(self.images, trainable=self.train_inception, is_training=self.is_training())
self.inception_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='InceptionV3')
with tf.variable_scope('image_embedding') as scope:
image_embeddings = tf.contrib.layers.fully_connected(inputs=inception_output, num_outputs=self.config.embedding_size, activation_fn=None, weights_initializer=self.initializer, biases_initializer=None, scope=scope)
tf.constant(self.config.embedding_size, name='embedding_size')
self.image_embeddings = image_embeddings
|
'Builds the input sequence embeddings.
Inputs:
self.input_seqs
Outputs:
self.seq_embeddings'
| def build_seq_embeddings(self):
| with tf.variable_scope('seq_embedding'):
with tf.device('/cpu:0'):
embedding_map = tf.get_variable(name='map', shape=[self.config.vocab_size, self.config.embedding_size], initializer=self.initializer)
seq_embeddings = tf.nn.embedding_lookup(embedding_map, self.input_seqs)
self.seq_embeddings = seq_embeddings
|
'Builds the model.
Inputs:
self.image_embeddings
self.seq_embeddings
self.target_seqs (training and eval only)
self.input_mask (training and eval only)
Outputs:
self.total_loss (training and eval only)
self.target_cross_entropy_losses (training and eval only)
self.target_cross_entropy_loss_weights (training and eval only)'
| def build_model(self):
| lstm_cell = tf.contrib.rnn.BasicLSTMCell(num_units=self.config.num_lstm_units, state_is_tuple=True)
if (self.mode == 'train'):
lstm_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell, input_keep_prob=self.config.lstm_dropout_keep_prob, output_keep_prob=self.config.lstm_dropout_keep_prob)
with tf.variable_scope('lstm', initializer=self.initializer) as lstm_scope:
zero_state = lstm_cell.zero_state(batch_size=self.image_embeddings.get_shape()[0], dtype=tf.float32)
(_, initial_state) = lstm_cell(self.image_embeddings, zero_state)
lstm_scope.reuse_variables()
if (self.mode == 'inference'):
tf.concat(axis=1, values=initial_state, name='initial_state')
state_feed = tf.placeholder(dtype=tf.float32, shape=[None, sum(lstm_cell.state_size)], name='state_feed')
state_tuple = tf.split(value=state_feed, num_or_size_splits=2, axis=1)
(lstm_outputs, state_tuple) = lstm_cell(inputs=tf.squeeze(self.seq_embeddings, axis=[1]), state=state_tuple)
tf.concat(axis=1, values=state_tuple, name='state')
else:
sequence_length = tf.reduce_sum(self.input_mask, 1)
(lstm_outputs, _) = tf.nn.dynamic_rnn(cell=lstm_cell, inputs=self.seq_embeddings, sequence_length=sequence_length, initial_state=initial_state, dtype=tf.float32, scope=lstm_scope)
lstm_outputs = tf.reshape(lstm_outputs, [(-1), lstm_cell.output_size])
with tf.variable_scope('logits') as logits_scope:
logits = tf.contrib.layers.fully_connected(inputs=lstm_outputs, num_outputs=self.config.vocab_size, activation_fn=None, weights_initializer=self.initializer, scope=logits_scope)
if (self.mode == 'inference'):
tf.nn.softmax(logits, name='softmax')
else:
targets = tf.reshape(self.target_seqs, [(-1)])
weights = tf.to_float(tf.reshape(self.input_mask, [(-1)]))
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, logits=logits)
batch_loss = tf.div(tf.reduce_sum(tf.multiply(losses, weights)), tf.reduce_sum(weights), name='batch_loss')
tf.losses.add_loss(batch_loss)
total_loss = tf.losses.get_total_loss()
tf.summary.scalar('losses/batch_loss', batch_loss)
tf.summary.scalar('losses/total_loss', total_loss)
for var in tf.trainable_variables():
tf.summary.histogram(('parameters/' + var.op.name), var)
self.total_loss = total_loss
self.target_cross_entropy_losses = losses
self.target_cross_entropy_loss_weights = weights
|
'Sets up the function to restore inception variables from checkpoint.'
| def setup_inception_initializer(self):
| if (self.mode != 'inference'):
saver = tf.train.Saver(self.inception_variables)
def restore_fn(sess):
tf.logging.info('Restoring Inception variables from checkpoint file %s', self.config.inception_checkpoint_file)
saver.restore(sess, self.config.inception_checkpoint_file)
self.init_fn = restore_fn
|
'Sets up the global step Tensor.'
| def setup_global_step(self):
| global_step = tf.Variable(initial_value=0, name='global_step', trainable=False, collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])
self.global_step = global_step
|
'Creates all ops for training and evaluation.'
| def build(self):
| self.build_inputs()
self.build_image_embeddings()
self.build_seq_embeddings()
self.build_model()
self.setup_inception_initializer()
self.setup_global_step()
|
'Counts the number of parameters in the inception model at top scope.'
| def _countInceptionParameters(self):
| counter = {}
for v in tf.global_variables():
name_tokens = v.op.name.split('/')
if (name_tokens[0] == 'InceptionV3'):
name = ('InceptionV3/' + name_tokens[1])
num_params = v.get_shape().num_elements()
assert num_params
counter[name] = (counter.get(name, 0) + num_params)
return counter
|
'Verifies the number of parameters in the inception model.'
| def _verifyParameterCounts(self):
| param_counts = self._countInceptionParameters()
expected_param_counts = {'InceptionV3/Conv2d_1a_3x3': 960, 'InceptionV3/Conv2d_2a_3x3': 9312, 'InceptionV3/Conv2d_2b_3x3': 18624, 'InceptionV3/Conv2d_3b_1x1': 5360, 'InceptionV3/Conv2d_4a_3x3': 138816, 'InceptionV3/Mixed_5b': 256368, 'InceptionV3/Mixed_5c': 277968, 'InceptionV3/Mixed_5d': 285648, 'InceptionV3/Mixed_6a': 1153920, 'InceptionV3/Mixed_6b': 1298944, 'InceptionV3/Mixed_6c': 1692736, 'InceptionV3/Mixed_6d': 1692736, 'InceptionV3/Mixed_6e': 2143872, 'InceptionV3/Mixed_7a': 1699584, 'InceptionV3/Mixed_7b': 5047872, 'InceptionV3/Mixed_7c': 6080064}
self.assertDictEqual(expected_param_counts, param_counts)
|
'Counts the number of parameters in the model at top level scope.'
| def _countModelParameters(self):
| counter = {}
for v in tf.global_variables():
name = v.op.name.split('/')[0]
num_params = v.get_shape().num_elements()
assert num_params
counter[name] = (counter.get(name, 0) + num_params)
return counter
|
'Verifies the number of parameters in the model.'
| def _checkModelParameters(self):
| param_counts = self._countModelParameters()
expected_param_counts = {'InceptionV3': 21802784, 'image_embedding': 1048576, 'seq_embedding': 6144000, 'lstm': 2099200, 'logits': 6156000, 'global_step': 1}
self.assertDictEqual(expected_param_counts, param_counts)
|
'Verifies that the model produces expected outputs.
Args:
expected_shapes: A dict mapping Tensor or Tensor name to expected output
shape.
feed_dict: Values of Tensors to feed into Session.run().'
| def _checkOutputs(self, expected_shapes, feed_dict=None):
| fetches = expected_shapes.keys()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
outputs = sess.run(fetches, feed_dict)
for (index, output) in enumerate(outputs):
tensor = fetches[index]
expected = expected_shapes[tensor]
actual = output.shape
if (expected != actual):
self.fail(('Tensor %s has shape %s (expected %s).' % (tensor, actual, expected)))
|
'Initializes the Caption.
Args:
sentence: List of word ids in the caption.
state: Model state after generating the previous word.
logprob: Log-probability of the caption.
score: Score of the caption.
metadata: Optional metadata associated with the partial sentence. If not
None, a list of strings with the same length as \'sentence\'.'
| def __init__(self, sentence, state, logprob, score, metadata=None):
| self.sentence = sentence
self.state = state
self.logprob = logprob
self.score = score
self.metadata = metadata
|
'Compares Captions by score.'
| def __cmp__(self, other):
| assert isinstance(other, Caption)
if (self.score == other.score):
return 0
elif (self.score < other.score):
return (-1)
else:
return 1
|
'Pushes a new element.'
| def push(self, x):
| assert (self._data is not None)
if (len(self._data) < self._n):
heapq.heappush(self._data, x)
else:
heapq.heappushpop(self._data, x)
|
'Extracts all elements from the TopN. This is a destructive operation.
The only method that can be called immediately after extract() is reset().
Args:
sort: Whether to return the elements in descending sorted order.
Returns:
A list of data; the top n elements provided to the set.'
| def extract(self, sort=False):
| assert (self._data is not None)
data = self._data
self._data = None
if sort:
data.sort(reverse=True)
return data
|
'Returns the TopN to an empty state.'
| def reset(self):
| self._data = []
|
'Initializes the generator.
Args:
model: Object encapsulating a trained image-to-text model. Must have
methods feed_image() and inference_step(). For example, an instance of
InferenceWrapperBase.
vocab: A Vocabulary object.
beam_size: Beam size to use when generating captions.
max_caption_length: The maximum caption length before stopping the search.
length_normalization_factor: If != 0, a number x such that captions are
scored by logprob/length^x, rather than logprob. This changes the
relative scores of captions depending on their lengths. For example, if
x > 0 then longer captions will be favored.'
| def __init__(self, model, vocab, beam_size=3, max_caption_length=20, length_normalization_factor=0.0):
| self.vocab = vocab
self.model = model
self.beam_size = beam_size
self.max_caption_length = max_caption_length
self.length_normalization_factor = length_normalization_factor
|
'Runs beam search caption generation on a single image.
Args:
sess: TensorFlow Session object.
encoded_image: An encoded image string.
Returns:
A list of Caption sorted by descending score.'
| def beam_search(self, sess, encoded_image):
| initial_state = self.model.feed_image(sess, encoded_image)
initial_beam = Caption(sentence=[self.vocab.start_id], state=initial_state[0], logprob=0.0, score=0.0, metadata=[''])
partial_captions = TopN(self.beam_size)
partial_captions.push(initial_beam)
complete_captions = TopN(self.beam_size)
for _ in range((self.max_caption_length - 1)):
partial_captions_list = partial_captions.extract()
partial_captions.reset()
input_feed = np.array([c.sentence[(-1)] for c in partial_captions_list])
state_feed = np.array([c.state for c in partial_captions_list])
(softmax, new_states, metadata) = self.model.inference_step(sess, input_feed, state_feed)
for (i, partial_caption) in enumerate(partial_captions_list):
word_probabilities = softmax[i]
state = new_states[i]
words_and_probs = list(enumerate(word_probabilities))
words_and_probs.sort(key=(lambda x: (- x[1])))
words_and_probs = words_and_probs[0:self.beam_size]
for (w, p) in words_and_probs:
if (p < 1e-12):
continue
sentence = (partial_caption.sentence + [w])
logprob = (partial_caption.logprob + math.log(p))
score = logprob
if metadata:
metadata_list = (partial_caption.metadata + [metadata[i]])
else:
metadata_list = None
if (w == self.vocab.end_id):
if (self.length_normalization_factor > 0):
score /= (len(sentence) ** self.length_normalization_factor)
beam = Caption(sentence, state, logprob, score, metadata_list)
complete_captions.push(beam)
else:
beam = Caption(sentence, state, logprob, score, metadata_list)
partial_captions.push(beam)
if (partial_captions.size() == 0):
break
if (not complete_captions.size()):
complete_captions = partial_captions
return complete_captions.extract(sort=True)
|
'Builds the model for inference.
Args:
model_config: Object containing configuration for building the model.
Returns:
model: The model object.'
| def build_model(self, model_config):
| tf.logging.fatal('Please implement build_model in subclass')
|
'Creates a function that restores a model from checkpoint.
Args:
checkpoint_path: Checkpoint file or a directory containing a checkpoint
file.
saver: Saver for restoring variables from the checkpoint file.
Returns:
restore_fn: A function such that restore_fn(sess) loads model variables
from the checkpoint file.
Raises:
ValueError: If checkpoint_path does not refer to a checkpoint file or a
directory containing a checkpoint file.'
| def _create_restore_fn(self, checkpoint_path, saver):
| if tf.gfile.IsDirectory(checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
if (not checkpoint_path):
raise ValueError(('No checkpoint file found in: %s' % checkpoint_path))
def _restore_fn(sess):
tf.logging.info('Loading model from checkpoint: %s', checkpoint_path)
saver.restore(sess, checkpoint_path)
tf.logging.info('Successfully loaded checkpoint: %s', os.path.basename(checkpoint_path))
return _restore_fn
|
'Builds the inference graph from a configuration object.
Args:
model_config: Object containing configuration for building the model.
checkpoint_path: Checkpoint file or a directory containing a checkpoint
file.
Returns:
restore_fn: A function such that restore_fn(sess) loads model variables
from the checkpoint file.'
| def build_graph_from_config(self, model_config, checkpoint_path):
| tf.logging.info('Building model.')
self.build_model(model_config)
saver = tf.train.Saver()
return self._create_restore_fn(checkpoint_path, saver)
|
'Builds the inference graph from serialized GraphDef and SaverDef protos.
Args:
graph_def_file: File containing a serialized GraphDef proto.
saver_def_file: File containing a serialized SaverDef proto.
checkpoint_path: Checkpoint file or a directory containing a checkpoint
file.
Returns:
restore_fn: A function such that restore_fn(sess) loads model variables
from the checkpoint file.'
| def build_graph_from_proto(self, graph_def_file, saver_def_file, checkpoint_path):
| tf.logging.info('Loading GraphDef from file: %s', graph_def_file)
graph_def = tf.GraphDef()
with tf.gfile.FastGFile(graph_def_file, 'rb') as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
tf.logging.info('Loading SaverDef from file: %s', saver_def_file)
saver_def = tf.train.SaverDef()
with tf.gfile.FastGFile(saver_def_file, 'rb') as f:
saver_def.ParseFromString(f.read())
saver = tf.train.Saver(saver_def=saver_def)
return self._create_restore_fn(checkpoint_path, saver)
|
'Feeds an image and returns the initial model state.
See comments at the top of file.
Args:
sess: TensorFlow Session object.
encoded_image: An encoded image string.
Returns:
state: A numpy array of shape [1, state_size].'
| def feed_image(self, sess, encoded_image):
| tf.logging.fatal('Please implement feed_image in subclass')
|
'Runs one step of inference.
Args:
sess: TensorFlow Session object.
input_feed: A numpy array of shape [batch_size].
state_feed: A numpy array of shape [batch_size, state_size].
Returns:
softmax_output: A numpy array of shape [batch_size, vocab_size].
new_state: A numpy array of shape [batch_size, state_size].
metadata: Optional. If not None, a string containing metadata about the
current inference step (e.g. serialized numpy array containing
activations from a particular model layer.).'
| def inference_step(self, sess, input_feed, state_feed):
| tf.logging.fatal('Please implement inference_step in subclass')
|
'Tests that beam search generates the expected captions.
Args:
expected_captions: A sequence of pairs (sentence, probability), where
sentence is a list of integer ids and probability is a float in [0, 1].
beam_size: Parameter passed to beam_search().
max_caption_length: Parameter passed to beam_search().
length_normalization_factor: Parameter passed to beam_search().'
| def _assertExpectedCaptions(self, expected_captions, beam_size=3, max_caption_length=20, length_normalization_factor=0):
| expected_sentences = [c[0] for c in expected_captions]
expected_probabilities = [c[1] for c in expected_captions]
generator = caption_generator.CaptionGenerator(model=FakeModel(), vocab=FakeVocab(), beam_size=beam_size, max_caption_length=max_caption_length, length_normalization_factor=length_normalization_factor)
actual_captions = generator.beam_search(sess=None, encoded_image=None)
actual_sentences = [c.sentence for c in actual_captions]
actual_probabilities = [math.exp(c.logprob) for c in actual_captions]
self.assertEqual(expected_sentences, actual_sentences)
self.assertAllClose(expected_probabilities, actual_probabilities)
|
'Initializes the vocabulary.
Args:
vocab_file: File containing the vocabulary, where the words are the first
whitespace-separated token on each line (other tokens are ignored) and
the word ids are the corresponding line numbers.
start_word: Special word denoting sentence start.
end_word: Special word denoting sentence end.
unk_word: Special word denoting unknown words.'
| def __init__(self, vocab_file, start_word='<S>', end_word='</S>', unk_word='<UNK>'):
| if (not tf.gfile.Exists(vocab_file)):
tf.logging.fatal('Vocab file %s not found.', vocab_file)
tf.logging.info('Initializing vocabulary from file: %s', vocab_file)
with tf.gfile.GFile(vocab_file, mode='r') as f:
reverse_vocab = list(f.readlines())
reverse_vocab = [line.split()[0] for line in reverse_vocab]
assert (start_word in reverse_vocab)
assert (end_word in reverse_vocab)
if (unk_word not in reverse_vocab):
reverse_vocab.append(unk_word)
vocab = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])
tf.logging.info(('Created vocabulary with %d words' % len(vocab)))
self.vocab = vocab
self.reverse_vocab = reverse_vocab
self.start_id = vocab[start_word]
self.end_id = vocab[end_word]
self.unk_id = vocab[unk_word]
|
'Returns the integer word id of a word string.'
| def word_to_id(self, word):
| if (word in self.vocab):
return self.vocab[word]
else:
return self.unk_id
|
'Returns the word string of an integer word id.'
| def id_to_word(self, word_id):
| if (word_id >= len(self.reverse_vocab)):
return self.reverse_vocab[self.unk_id]
else:
return self.reverse_vocab[word_id]
|
'Sets the default model hyperparameters.'
| def __init__(self):
| self.input_file_pattern = None
self.image_format = 'jpeg'
self.values_per_input_shard = 2300
self.input_queue_capacity_factor = 2
self.num_input_reader_threads = 1
self.image_feature_name = 'image/data'
self.caption_feature_name = 'image/caption_ids'
self.vocab_size = 12000
self.num_preprocess_threads = 4
self.batch_size = 32
self.inception_checkpoint_file = None
self.image_height = 299
self.image_width = 299
self.initializer_scale = 0.08
self.embedding_size = 512
self.num_lstm_units = 512
self.lstm_dropout_keep_prob = 0.7
|
'Sets the default training hyperparameters.'
| def __init__(self):
| self.num_examples_per_epoch = 586363
self.optimizer = 'SGD'
self.initial_learning_rate = 2.0
self.learning_rate_decay_factor = 0.5
self.num_epochs_per_decay = 8.0
self.train_inception_learning_rate = 0.0005
self.clip_gradients = 5.0
self.max_checkpoints_to_keep = 5
|
'Initializes the vocabulary.
Args:
vocab: A dictionary of word to word_id.
unk_id: Id of the special \'unknown\' word.'
| def __init__(self, vocab, unk_id):
| self._vocab = vocab
self._unk_id = unk_id
|
'Returns the integer id of a word string.'
| def word_to_id(self, word):
| if (word in self._vocab):
return self._vocab[word]
else:
return self._unk_id
|
'Embeds x using standard CNN architecture.
Args:
x: Batch of images as a 2-d Tensor [batch_size, -1].
Returns:
A 2-d Tensor [batch_size, hidden_dim] of embedded images.'
| def core_builder(self, x):
| ch1 = (32 * 2)
ch2 = (64 * 2)
conv1_weights = tf.get_variable('conv1_w', [3, 3, self.num_channels, ch1], initializer=self.matrix_init)
conv1_biases = tf.get_variable('conv1_b', [ch1], initializer=self.vector_init)
conv1a_weights = tf.get_variable('conv1a_w', [3, 3, ch1, ch1], initializer=self.matrix_init)
conv1a_biases = tf.get_variable('conv1a_b', [ch1], initializer=self.vector_init)
conv2_weights = tf.get_variable('conv2_w', [3, 3, ch1, ch2], initializer=self.matrix_init)
conv2_biases = tf.get_variable('conv2_b', [ch2], initializer=self.vector_init)
conv2a_weights = tf.get_variable('conv2a_w', [3, 3, ch2, ch2], initializer=self.matrix_init)
conv2a_biases = tf.get_variable('conv2a_b', [ch2], initializer=self.vector_init)
fc1_weights = tf.get_variable('fc1_w', [((((self.image_size // 4) * self.image_size) // 4) * ch2), self.hidden_dim], initializer=self.matrix_init)
fc1_biases = tf.get_variable('fc1_b', [self.hidden_dim], initializer=self.vector_init)
x = tf.reshape(x, [(-1), self.image_size, self.image_size, self.num_channels])
batch_size = tf.shape(x)[0]
conv1 = tf.nn.conv2d(x, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
conv1 = tf.nn.conv2d(relu1, conv1a_weights, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1a_biases))
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
conv2 = tf.nn.conv2d(relu2, conv2a_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2a_biases))
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
reshape = tf.reshape(pool2, [batch_size, (-1)])
hidden = (tf.matmul(reshape, fc1_weights) + fc1_biases)
return hidden
|
'Sets up all components of the computation graph.'
| def setup(self):
| (self.x, self.y) = self.get_xy_placeholders()
with tf.variable_scope('core', reuse=None):
(self.loss, self.gradient_ops) = self.train(self.x, self.y)
with tf.variable_scope('core', reuse=True):
self.y_preds = self.eval(self.x, self.y)
(self.mem_keys, self.mem_vals, self.mem_age, self.recent_idx) = self.memory.get()
self.mem_keys_reset = tf.placeholder(self.mem_keys.dtype, tf.identity(self.mem_keys).shape)
self.mem_vals_reset = tf.placeholder(self.mem_vals.dtype, tf.identity(self.mem_vals).shape)
self.mem_age_reset = tf.placeholder(self.mem_age.dtype, tf.identity(self.mem_age).shape)
self.recent_idx_reset = tf.placeholder(self.recent_idx.dtype, tf.identity(self.recent_idx).shape)
self.mem_reset_op = self.memory.set(self.mem_keys_reset, self.mem_vals_reset, self.mem_age_reset, None)
|
'Performs training steps on episodic input.
Args:
sess: A Tensorflow Session.
x: A list of batches of images defining the episode.
y: A list of batches of labels corresponding to x.
clear_memory: Whether to clear the memory before the episode.
Returns:
List of losses the same length as the episode.'
| def episode_step(self, sess, x, y, clear_memory=False):
| outputs = [self.loss, self.gradient_ops]
if clear_memory:
self.clear_memory(sess)
losses = []
for (xx, yy) in zip(x, y):
out = sess.run(outputs, feed_dict={self.x: xx, self.y: yy})
loss = out[0]
losses.append(loss)
return losses
|
'Predict the labels on a single batch of examples.
Args:
sess: A Tensorflow Session.
x: A batch of images.
y: The labels for the images in x.
This allows for updating the memory.
Returns:
Predicted y.'
| def predict(self, sess, x, y=None):
| cur_memory = sess.run([self.mem_keys, self.mem_vals, self.mem_age])
outputs = [self.y_preds]
if (y is None):
ret = sess.run(outputs, feed_dict={self.x: x})
else:
ret = sess.run(outputs, feed_dict={self.x: x, self.y: y})
sess.run([self.mem_reset_op], feed_dict={self.mem_keys_reset: cur_memory[0], self.mem_vals_reset: cur_memory[1], self.mem_age_reset: cur_memory[2]})
return ret
|
'Predict the labels on an episode of examples.
Args:
sess: A Tensorflow Session.
x: A list of batches of images.
y: A list of labels for the images in x.
This allows for updating the memory.
clear_memory: Whether to clear the memory before the episode.
Returns:
List of predicted y.'
| def episode_predict(self, sess, x, y, clear_memory=False):
| cur_memory = sess.run([self.mem_keys, self.mem_vals, self.mem_age])
if clear_memory:
self.clear_memory(sess)
outputs = [self.y_preds]
y_preds = []
for (xx, yy) in zip(x, y):
out = sess.run(outputs, feed_dict={self.x: xx, self.y: yy})
y_pred = out[0]
y_preds.append(y_pred)
sess.run([self.mem_reset_op], feed_dict={self.mem_keys_reset: cur_memory[0], self.mem_vals_reset: cur_memory[1], self.mem_age_reset: cur_memory[2]})
return y_preds
|
'Generates a random batch for training or validation.
Structures each element of the batch as an \'episode\'.
Each episode contains episode_length examples and
episode_width distinct labels.
Args:
data: A dictionary mapping label to list of examples.
episode_length: Number of examples in each episode.
episode_width: Distinct number of labels in each episode.
batch_size: Batch size (number of episodes).
Returns:
A tuple (x, y) where x is a list of batches of examples
with size episode_length and y is a list of batches of labels.'
| def sample_episode_batch(self, data, episode_length, episode_width, batch_size):
| episodes_x = [[] for _ in xrange(episode_length)]
episodes_y = [[] for _ in xrange(episode_length)]
assert (len(data) >= episode_width)
keys = data.keys()
for b in xrange(batch_size):
episode_labels = random.sample(keys, episode_width)
remainder = (episode_length % episode_width)
remainders = (([0] * (episode_width - remainder)) + ([1] * remainder))
episode_x = [random.sample(data[lab], (r + ((episode_length - remainder) / episode_width))) for (lab, r) in zip(episode_labels, remainders)]
episode = sum([[(x, i, ii) for (ii, x) in enumerate(xx)] for (i, xx) in enumerate(episode_x)], [])
random.shuffle(episode)
episode.sort(key=(lambda elem: elem[2]))
assert (len(episode) == episode_length)
for i in xrange(episode_length):
episodes_x[i].append(episode[i][0])
episodes_y[i].append((episode[i][1] + (b * episode_width)))
return ([np.array(xx).astype('float32') for xx in episodes_x], [np.array(yy).astype('int32') for yy in episodes_y])
|
'Performs training.
Trains a model using episodic training.
Every so often, runs some evaluations on validation data.'
| def run(self):
| (train_data, valid_data) = (self.train_data, self.valid_data)
(input_dim, output_dim) = (self.input_dim, self.output_dim)
(rep_dim, episode_length) = (self.rep_dim, self.episode_length)
(episode_width, memory_size) = (self.episode_width, self.memory_size)
batch_size = self.batch_size
train_size = len(train_data)
valid_size = len(valid_data)
logging.info('train_size (number of labels) %d', train_size)
logging.info('valid_size (number of labels) %d', valid_size)
logging.info('input_dim %d', input_dim)
logging.info('output_dim %d', output_dim)
logging.info('rep_dim %d', rep_dim)
logging.info('episode_length %d', episode_length)
logging.info('episode_width %d', episode_width)
logging.info('memory_size %d', memory_size)
logging.info('batch_size %d', batch_size)
assert all(((len(v) >= (float(episode_length) / episode_width)) for v in train_data.itervalues()))
assert all(((len(v) >= (float(episode_length) / episode_width)) for v in valid_data.itervalues()))
output_dim = episode_width
self.model = self.get_model()
self.model.setup()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=10)
ckpt = None
if FLAGS.save_dir:
ckpt = tf.train.get_checkpoint_state(FLAGS.save_dir)
if (ckpt and ckpt.model_checkpoint_path):
logging.info('restoring from %s', ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
logging.info('starting now')
losses = []
random.seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
for i in xrange(FLAGS.num_episodes):
(x, y) = self.sample_episode_batch(train_data, episode_length, episode_width, batch_size)
outputs = self.model.episode_step(sess, x, y, clear_memory=True)
loss = outputs
losses.append(loss)
if ((i % FLAGS.validation_frequency) == 0):
logging.info('episode batch %d, avg train loss %f', i, np.mean(losses))
losses = []
correct = []
correct_by_shot = dict(((k, []) for k in xrange((self.episode_width + 1))))
for _ in xrange(FLAGS.validation_length):
(x, y) = self.sample_episode_batch(valid_data, episode_length, episode_width, 1)
outputs = self.model.episode_predict(sess, x, y, clear_memory=True)
y_preds = outputs
correct.append(self.compute_correct(np.array(y), y_preds))
seen_counts = [([0] * episode_width) for _ in xrange(batch_size)]
for (yy, yy_preds) in zip(y, y_preds):
for (k, (yyy, yyy_preds)) in enumerate(zip(yy, yy_preds)):
(yyy, yyy_preds) = (int(yyy), int(yyy_preds))
count = seen_counts[k][(yyy % self.episode_width)]
if (count in correct_by_shot):
correct_by_shot[count].append(self.individual_compute_correct(yyy, yyy_preds))
seen_counts[k][(yyy % self.episode_width)] = (count + 1)
logging.info('validation overall accuracy %f', np.mean(correct))
logging.info(('%d-shot: %.3f, ' * (self.episode_width + 1)), *sum([[k, np.mean(correct_by_shot[k])] for k in xrange((self.episode_width + 1))], []))
if (saver and FLAGS.save_dir):
saved_file = saver.save(sess, os.path.join(FLAGS.save_dir, 'model.ckpt'), global_step=self.model.global_step)
logging.info('saved model to %s', saved_file)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.