desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'load_word2vec: load the w2v model'
def load_word2vec(self):
print 'Start load word2vec model' self.w2vec = {} with open(self.w2v_file, 'r') as fread: for line in fread.readlines(): line_list = line.strip().split(' ') word = line_list[0] word_vec = np.fromstring(' '.join(line_list[1:]), dtype=float, sep=' ') self.w2vec[word] = word_vec print 'Done load word2vec model'
'get all tokens from file'
def __get_all_tokens_v2(self):
print 'load the tokens from file ' with open(self.data_path.replace('all.csv', 'all_token.csv'), 'r') as fread: for line in fread.readlines(): try: line_list = line.strip().split(' DCTB ') label = line_list[0] text_token = line_list[1].split('\\') self.dictionary.add_documents([text_token]) self.labels.append(label) self.corpus.append(text_token) except BaseException as e: print e continue
'get all tokens of the corpus'
def __get_all_tokens(self):
fwrite = open(self.data_path.replace('all.csv', 'all_token.csv'), 'w') with open(self.data_path, 'r') as fread: i = 0 for line in fread.readlines(): try: line_list = line.strip().split(' DCTB ') label = line_list[0] self.labels.append(label) text = line_list[1] text_tokens = self.cut_doc_obj.run(text) self.corpus.append(text_tokens) self.dictionary.add_documents([text_tokens]) fwrite.write((((label + ' DCTB ') + '\\'.join(text_tokens)) + '\n')) i += 1 except BaseException as e: msg = traceback.format_exc() print msg print '=====>Read Done<======' break self.token_len = self.dictionary.__len__() print ('all token len ' + str(self.token_len)) print 'save the dictionary' self.dictionary.save(self.data_path.replace('all.csv', 'cnn.dict')) self.num_data = i fwrite.close()
'gen_embedding_matrix: generate the embedding matrix'
def gen_embedding_matrix(self, load4file=True):
if load4file: self.__get_all_tokens_v2() else: self.__get_all_tokens() print 'before filter, the tokens len: {0}'.format(self.dictionary.__len__()) self.__filter_tokens() print 'after filter, the tokens len: {0}'.format(self.dictionary.__len__()) self.sequence = [] for file_token in self.corpus: temp_sequence = [x for (x, y) in self.dictionary.doc2bow(file_token)] self.sequence.append(temp_sequence) self.corpus_size = len(self.dictionary.token2id) self.embedding_matrix = np.zeros((self.corpus_size, EMBEDDING_DIM)) print 'corpus size: {0}'.format(len(self.dictionary.token2id)) for (key, v) in self.dictionary.token2id.items(): key_vec = self.w2vec.get(key) if (key_vec is not None): self.embedding_matrix[v] = key_vec else: self.embedding_matrix[v] = (np.random.rand(EMBEDDING_DIM) - 0.5)
'load_word2vec: load the w2v model'
def load_word2vec(self):
print 'Start load word2vec model' self.w2vec = {} with open(self.w2v_file, 'r') as fread: for line in fread.readlines(): line_list = line.strip().split(' ') word = line_list[0] word_vec = np.fromstring(' '.join(line_list[1:]), dtype=float, sep=' ') self.w2vec[word] = word_vec print 'Done load word2vec model'
'get all tokens from file'
def __get_all_tokens_v2(self):
print 'load the tokens from file ' with open(self.data_path.replace('all.csv', 'all_token.csv'), 'r') as fread: for line in fread.readlines(): try: line_list = line.strip().split(' DCTB ') label = line_list[0] text_token = line_list[1].split('\\') self.dictionary.add_documents([text_token]) self.labels.append(label) self.corpus.append(text_token) except BaseException as e: print e continue
'get all tokens of the corpus'
def __get_all_tokens(self):
fwrite = open(self.data_path.replace('all.csv', 'all_token.csv'), 'w') with open(self.data_path, 'r') as fread: i = 0 for line in fread.readlines(): try: line_list = line.strip().split(' DCTB ') label = line_list[0] self.labels.append(label) text = line_list[1] text_tokens = self.cut_doc_obj.run(text) self.corpus.append(text_tokens) self.dictionary.add_documents([text_tokens]) fwrite.write((((label + ' DCTB ') + '\\'.join(text_tokens)) + '\n')) i += 1 except BaseException as e: msg = traceback.format_exc() print msg print '=====>Read Done<======' break self.token_len = self.dictionary.__len__() print ('all token len ' + str(self.token_len)) print 'save the dictionary' self.dictionary.save(self.data_path.replace('all.csv', 'cnn.dict')) self.num_data = i fwrite.close()
'gen_embedding_matrix: generate the embedding matrix'
def gen_embedding_matrix(self, load4file=True):
if load4file: self.__get_all_tokens_v2() else: self.__get_all_tokens() print 'before filter, the tokens len: {0}'.format(self.dictionary.__len__()) self.__filter_tokens() print 'after filter, the tokens len: {0}'.format(self.dictionary.__len__()) self.sequence = [] for file_token in self.corpus: temp_sequence = [x for (x, y) in self.dictionary.doc2bow(file_token)] self.sequence.append(temp_sequence) self.corpus_size = len(self.dictionary.token2id) self.embedding_matrix = np.zeros((self.corpus_size, EMBEDDING_DIM)) print 'corpus size: {0}'.format(len(self.dictionary.token2id)) for (key, v) in self.dictionary.token2id.items(): key_vec = self.w2vec.get(key) if (key_vec is not None): self.embedding_matrix[v] = key_vec else: self.embedding_matrix[v] = (np.random.rand(EMBEDDING_DIM) - 0.5) print 'enbedding_matrix len {0}'.format(len(self.embedding_matrix))
'load_word2vec: load the w2v model'
def load_word2vec(self):
print 'Start load word2vec model' self.w2vec = {} with open(self.w2v_file, 'r') as fread: for line in fread.readlines(): line_list = line.strip().split(' ') word = line_list[0] word_vec = np.fromstring(' '.join(line_list[1:]), dtype=float, sep=' ') self.w2vec[word] = word_vec print 'Done load word2vec model'
'get all tokens from file'
def __get_all_tokens_v2(self):
print 'load the tokens from file ' with open(self.data_path.replace('all_title.csv', 'all_token.csv'), 'r') as fread: for line in fread.readlines(): try: line_list = line.strip().split(' DCTB ') label = line_list[0] text_token = line_list[1].split('\\') self.dictionary.add_documents([text_token]) self.labels.append(label) self.corpus.append(text_token) except BaseException as e: print e continue
'get all tokens of the corpus'
def __get_all_tokens(self):
fwrite = open(self.data_path.replace('all_title.csv', 'all_token.csv'), 'w') with open(self.data_path, 'r') as fread: i = 0 for line in fread.readlines(): try: line_list = line.strip().split(' DCTB ') label = line_list[0] self.labels.append(label) text = line_list[1] text_tokens = self.cut_doc_obj.run(text) self.corpus.append(text_tokens) self.dictionary.add_documents([text_tokens]) fwrite.write((((label + ' DCTB ') + '\\'.join(text_tokens)) + '\n')) i += 1 except BaseException as e: msg = traceback.format_exc() print msg print '=====>Read Done<======' break self.token_len = self.dictionary.__len__() print ('all token len ' + str(self.token_len)) print 'save the dictionary' self.dictionary.save(self.data_path.replace('all_title.csv', 'cnn.dict')) self.num_data = i fwrite.close()
'gen_embedding_matrix: generate the embedding matrix'
def gen_embedding_matrix(self, load4file=True):
if load4file: self.__get_all_tokens_v2() else: self.__get_all_tokens() print 'before filter, the tokens len: {0}'.format(self.dictionary.__len__()) self.__filter_tokens() print 'after filter, the tokens len: {0}'.format(self.dictionary.__len__()) self.sequence = [] for file_token in self.corpus: temp_sequence = [x for (x, y) in self.dictionary.doc2bow(file_token)] self.sequence.append(temp_sequence) self.corpus_size = len(self.dictionary.token2id) self.embedding_matrix = np.zeros((self.corpus_size, EMBEDDING_DIM)) print 'corpus size: {0}'.format(len(self.dictionary.token2id)) for (key, v) in self.dictionary.token2id.items(): key_vec = self.w2vec.get(key) if (key_vec is not None): self.embedding_matrix[v] = key_vec else: self.embedding_matrix[v] = (np.random.rand(EMBEDDING_DIM) - 0.5)
'load_word2vec: load the w2v model'
def load_word2vec(self):
print 'Start load word2vec model' self.w2vec = {} with open(self.w2v_file, 'r') as fread: for line in fread.readlines(): line_list = line.strip().split(' ') word = line_list[0] word_vec = np.fromstring(' '.join(line_list[1:]), dtype=float, sep=' ') self.w2vec[word] = word_vec print 'Done load word2vec model'
'get all tokens from file'
def __get_all_tokens_v2(self):
print 'load the tokens from file ' with open(self.data_path.replace('all.csv', 'all_token.csv'), 'r') as fread: for line in fread.readlines(): try: line_list = line.strip().split(' DCTB ') label = line_list[0] text_token = line_list[1].split('\\') self.dictionary.add_documents([text_token]) self.labels.append(label) self.corpus.append(text_token) except BaseException as e: print e continue
'get all tokens of the corpus'
def __get_all_tokens(self):
fwrite = open(self.data_path.replace('all.csv', 'all_token.csv'), 'w') with open(self.data_path, 'r') as fread: i = 0 for line in fread.readlines(): try: line_list = line.strip().split(' DCTB ') label = line_list[0] self.labels.append(label) text = line_list[1] text_tokens = self.cut_doc_obj.run(text) self.corpus.append(text_tokens) self.dictionary.add_documents([text_tokens]) fwrite.write((((label + ' DCTB ') + '\\'.join(text_tokens)) + '\n')) i += 1 except BaseException as e: msg = traceback.format_exc() print msg print '=====>Read Done<======' break self.token_len = self.dictionary.__len__() print ('all token len ' + str(self.token_len)) print 'save the dictionary' self.dictionary.save(self.data_path.replace('all.csv', 'cnn.dict')) self.num_data = i fwrite.close()
'gen_embedding_matrix: generate the embedding matrix'
def gen_embedding_matrix(self, load4file=True):
if load4file: self.__get_all_tokens_v2() else: self.__get_all_tokens() print 'before filter, the tokens len: {0}'.format(self.dictionary.__len__()) self.__filter_tokens() print 'after filter, the tokens len: {0}'.format(self.dictionary.__len__()) self.sequence = [] for file_token in self.corpus: temp_sequence = [x for (x, y) in self.dictionary.doc2bow(file_token)] self.sequence.append(temp_sequence) self.corpus_size = len(self.dictionary.token2id) self.embedding_matrix = np.zeros((self.corpus_size, EMBEDDING_DIM)) print 'corpus size: {0}'.format(len(self.dictionary.token2id)) for (key, v) in self.dictionary.token2id.items(): key_vec = self.w2vec.get(key) if (key_vec is not None): self.embedding_matrix[v] = key_vec else: self.embedding_matrix[v] = (np.random.rand(EMBEDDING_DIM) - 0.5) print 'enbedding_matrix len {0}'.format(len(self.embedding_matrix))
'delete the stopwords'
def del_stopwords(self, do=True):
if do: for word in self.cut_text: if (word not in self.stop_words): self.tokens.append(word) else: for word in self.cut_text: self.tokens.append(word)
'text : String return: generator'
def cut(self, origin_text):
cut_text = jieba.cut(origin_text) self.cut_text = cut_text
'origin_text: String return: a list of tokens'
def run(self, origin_text):
self.tokens = [] self.cut(origin_text) self.del_stopwords() self.del_digit() self.del_alpha() return self.tokens
'load_word2vec: load the w2v model'
def load_word2vec(self):
print 'Start load word2vec model' self.w2vec = {} with open(self.w2v_file, 'r') as fread: for line in fread.readlines(): line_list = line.strip().split(' ') word = line_list[0] word_vec = np.fromstring(' '.join(line_list[1:]), dtype=float, sep=' ') self.w2vec[word] = word_vec print 'Done load word2vec model'
'get all tokens from file'
def __get_all_tokens_v2(self):
print 'load the tokens from file ' with open(self.data_path.replace('all.csv', 'all_token.csv'), 'r') as fread: for line in fread.readlines(): try: line_list = line.strip().split(' DCTB ') label = line_list[0] text_token = line_list[1].split('\\') self.dictionary.add_documents([text_token]) self.labels.append(label) self.corpus.append(text_token) except BaseException as e: print e continue
'get all tokens of the corpus'
def __get_all_tokens(self):
fwrite = open(self.data_path.replace('all.csv', 'all_token.csv'), 'w') with open(self.data_path, 'r') as fread: i = 0 for line in fread.readlines(): try: line_list = line.strip().split(' DCTB ') label = line_list[0] self.labels.append(label) text = line_list[1] text_tokens = self.cut_doc_obj.run(text) self.corpus.append(text_tokens) self.dictionary.add_documents([text_tokens]) fwrite.write((((label + ' DCTB ') + '\\'.join(text_tokens)) + '\n')) i += 1 except BaseException as e: msg = traceback.format_exc() print msg print '=====>Read Done<======' break self.token_len = self.dictionary.__len__() print ('all token len ' + str(self.token_len)) print 'save the dictionary' self.dictionary.save(self.data_path.replace('all.csv', 'cnn.dict')) self.num_data = i fwrite.close()
'gen_embedding_matrix: generate the embedding matrix'
def gen_embedding_matrix(self, load4file=True):
if load4file: self.__get_all_tokens_v2() else: self.__get_all_tokens() print 'before filter, the tokens len: {0}'.format(self.dictionary.__len__()) self.__filter_tokens(threshold_num=5) print 'after filter, the tokens len: {0}'.format(self.dictionary.__len__()) self.sequence = [] for file_token in self.corpus: temp_sequence = [x for (x, y) in self.dictionary.doc2bow(file_token)] self.sequence.append(temp_sequence) self.corpus_size = len(self.dictionary.token2id) self.embedding_matrix = np.zeros((self.corpus_size, EMBEDDING_DIM)) print 'corpus size: {0}'.format(len(self.dictionary.token2id)) for (key, v) in self.dictionary.token2id.items(): key_vec = self.w2vec.get(key) if (key_vec is not None): self.embedding_matrix[v] = key_vec else: self.embedding_matrix[v] = (np.random.rand(EMBEDDING_DIM) - 0.5) print 'enbedding_matrix len {0}'.format(len(self.embedding_matrix))
'get all tokens of the corpus'
def __get_all_tokens(self):
fwrite = open(self.data_path.replace('all_title.csv', 'all_token.csv'), 'w') with open(self.data_path, 'r') as fread: i = 0 for line in fread.readlines(): try: line_list = line.strip().split(' DCTB ') label = line_list[0] self.labels.append(label) text = line_list[1] text_tokens = self.cut_doc_obj.run(text) self.corpus.append(' '.join(text_tokens)) self.dictionary.add_documents([text_tokens]) fwrite.write((((label + ' DCTB ') + '\\'.join(text_tokens)) + '\n')) i += 1 except BaseException as e: msg = traceback.format_exc() print msg print '=====>Read Done<======' break self.token_len = self.dictionary.__len__() print ('all token len ' + str(self.token_len)) self.num_data = i fwrite.close()
'vec: get a vec representation of bow'
def vec(self):
self.__get_all_tokens() print 'before filter, the tokens len: {0}'.format(self.dictionary.__len__()) vectorizer = CountVectorizer(min_df=1e-05) transformer = TfidfTransformer() self.tfidf = transformer.fit_transform(vectorizer.fit_transform(self.corpus)) words = vectorizer.get_feature_names() print 'word len: {0}'.format(len(words)) print 'tfidf shape ({0},{1})'.format(self.tfidf.shape[0], self.tfidf.shape[1]) tfidf_vec_file = open(self.data_path.replace('all_title.csv', 'tfidf_vec.pl'), 'wb') pickle.dump(self.tfidf, tfidf_vec_file) tfidf_vec_file.close() tfidf_label_file = open(self.data_path.replace('all_title.csv', 'tfidf_label.pl'), 'wb') pickle.dump(self.labels, tfidf_label_file) tfidf_label_file.close()
'Load entries from a file.'
def loadFile(self, filename):
for line in codecs.open(filename, 'r', 'utf-8'): fields = line.split() label = fields[0] idx = int(fields[1]) self.add(label, idx)
'Write entries to a file.'
def writeFile(self, filename):
with codecs.open(filename, 'w', 'utf-8') as file: for i in range(self.size()): label = self.idxToLabel[i] file.write(('%s %d\n' % (label, i))) file.close()
'Find the id of each label in other dict.'
def align(self, other):
alignment = ([Constants.PAD] * self.size()) for (idx, label) in self.idxToLabel.items(): if (label in other.labelToIdx): alignment[idx] = other.labelToIdx[label] return alignment
'Mark this `label` and `idx` as special (i.e. will not be pruned).'
def addSpecial(self, label, idx=None):
idx = self.add(label, idx) self.special += [idx]
'Mark all labels in `labels` as specials (i.e. will not be pruned).'
def addSpecials(self, labels):
for label in labels: self.addSpecial(label)
'Add `label` in the dictionary. Use `idx` as its index if given.'
def add(self, label, idx=None):
label = (label.lower() if self.lower else label) if (idx is not None): self.idxToLabel[idx] = label self.labelToIdx[label] = idx elif (label in self.labelToIdx): idx = self.labelToIdx[label] else: idx = len(self.idxToLabel) self.idxToLabel[idx] = label self.labelToIdx[label] = idx if (idx not in self.frequencies): self.frequencies[idx] = 1 else: self.frequencies[idx] += 1 return idx
'Return a new dictionary with the `size` most frequent entries.'
def prune(self, size):
if (size >= self.size()): return self freq = [self.frequencies[i] for i in range(len(self.frequencies))] print freq[:100] idx = sorted(range(len(freq)), key=(lambda k: freq[k]), reverse=True) print idx[:100] newDict = Dict() newDict.lower = self.lower for i in self.special: newDict.addSpecial(self.idxToLabel[i]) for i in idx[:size]: newDict.add(self.idxToLabel[i]) return newDict
'Convert `labels` to indices. Use `unkWord` if not found. Optionally insert `bosWord` at the beginning and `eosWord` at the .'
def convertToIdx(self, labels, unkWord, bosWord=None, eosWord=None):
vec = [] if (bosWord is not None): vec += [self.lookup(bosWord)] unk = self.lookup(unkWord) vec += [self.lookup(label, default=unk) for label in labels] if (eosWord is not None): vec += [self.lookup(eosWord)] return vec
'Convert `idx` to labels. If index `stop` is reached, convert it and return.'
def convertToLabels(self, idx, stop):
labels = [] for i in idx: labels += [self.getLabel(i)] if (i == stop): break return labels
'return a generator with the specified batch_size'
def read_copus_generator(self, batch_size=64):
logger.info('Beigin read copus {0}'.format(file_name)) data = [] index = 0 with open(file_name, 'r') as fread: while True: try: line = fread.readline() data.append(line) index += 1 if ((index % 100000) == 0): logger.info('The program has processed {0} lines '.format(index)) except: logger.info('Read End') break tokenizer = Tokenizer(nb_words=30000) tokenizer.fit_on_texts(data) logger.info('word num: {0}'.format(len(tokenizer.word_counts))) sorted_word_counts = sorted(tokenizer.word_counts.items(), key=operator.itemgetter(1), reverse=True) with open(file_name.replace('train.', 'meta.'), 'w') as fwrite: for word_cnt in sorted_word_counts: key = word_cnt[0] val = word_cnt[1] line = (((key + ':') + str(val)) + '\n') fwrite.write(line) vectorize_data = tokenizer.texts_to_matrix(data) return vectorize_data
'Reads through the analogy question file. Returns: questions: a [n, 4] numpy array containing the analogy question\'s word ids. questions_skipped: questions skipped due to unknown words.'
def read_analogies(self):
questions = [] questions_skipped = 0 with open(self._options.eval_data, 'rb') as analogy_f: for line in analogy_f: if line.startswith(':'): continue words = line.strip().lower().split(' ') ids = [self._word2id.get(w.strip()) for w in words] if ((None in ids) or (len(ids) != 4)): questions_skipped += 1 else: questions.append(np.array(ids)) print('Eval analogy file: ', self._options.eval_data) print('Questions: ', len(questions)) print('Skipped: ', questions_skipped) self._analogy_questions = np.array(questions, dtype=np.int32)
'Build the graph for the forward pass.'
def forward(self, examples, labels):
opts = self._options init_width = (0.5 / opts.emb_dim) emb = tf.Variable(tf.random_uniform([opts.vocab_size, opts.emb_dim], (- init_width), init_width), name='emb') self._emb = emb sm_w_t = tf.Variable(tf.zeros([opts.vocab_size, opts.emb_dim]), name='sm_w_t') sm_b = tf.Variable(tf.zeros([opts.vocab_size]), name='sm_b') self.global_step = tf.Variable(0, name='global_step') labels_matrix = tf.reshape(tf.cast(labels, dtype=tf.int64), [opts.batch_size, 1]) (sampled_ids, _, _) = tf.nn.fixed_unigram_candidate_sampler(true_classes=labels_matrix, num_true=1, num_sampled=opts.num_samples, unique=True, range_max=opts.vocab_size, distortion=0.75, unigrams=opts.vocab_counts.tolist()) example_emb = tf.nn.embedding_lookup(emb, examples) true_w = tf.nn.embedding_lookup(sm_w_t, labels) true_b = tf.nn.embedding_lookup(sm_b, labels) sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids) sampled_b = tf.nn.embedding_lookup(sm_b, sampled_ids) true_logits = (tf.reduce_sum(tf.multiply(example_emb, true_w), 1) + true_b) sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples]) sampled_logits = (tf.matmul(example_emb, sampled_w, transpose_b=True) + sampled_b_vec) return (true_logits, sampled_logits)
'Build the graph for the NCE loss.'
def nce_loss(self, true_logits, sampled_logits):
opts = self._options true_xent = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(true_logits), logits=true_logits) sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(sampled_logits), logits=sampled_logits) nce_loss_tensor = ((tf.reduce_sum(true_xent) + tf.reduce_sum(sampled_xent)) / opts.batch_size) return nce_loss_tensor
'Build the graph to optimize the loss function.'
def optimize(self, loss):
opts = self._options words_to_train = float((opts.words_per_epoch * opts.epochs_to_train)) lr = (opts.learning_rate * tf.maximum(0.0001, (1.0 - (tf.cast(self._words, tf.float32) / words_to_train)))) self._lr = lr optimizer = tf.train.GradientDescentOptimizer(lr) train = optimizer.minimize(loss, global_step=self.global_step, gate_gradients=optimizer.GATE_NONE) self._train = train
'Build the eval graph.'
def build_eval_graph(self):
analogy_a = tf.placeholder(dtype=tf.int32) analogy_b = tf.placeholder(dtype=tf.int32) analogy_c = tf.placeholder(dtype=tf.int32) nemb = tf.nn.l2_normalize(self._emb, 1) a_emb = tf.gather(nemb, analogy_a) b_emb = tf.gather(nemb, analogy_b) c_emb = tf.gather(nemb, analogy_c) target = (c_emb + (b_emb - a_emb)) dist = tf.matmul(target, nemb, transpose_b=True) (_, pred_idx) = tf.nn.top_k(dist, 4) nearby_word = tf.placeholder(dtype=tf.int32) nearby_emb = tf.gather(nemb, nearby_word) nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True) (nearby_val, nearby_idx) = tf.nn.top_k(nearby_dist, min(1000, self._options.vocab_size)) self._analogy_a = analogy_a self._analogy_b = analogy_b self._analogy_c = analogy_c self._analogy_pred_idx = pred_idx self._nearby_word = nearby_word self._nearby_val = nearby_val self._nearby_idx = nearby_idx
'Build the graph for the full model.'
def build_graph(self):
opts = self._options (words, counts, words_per_epoch, self._epoch, self._words, examples, labels) = word2vec.skipgram_word2vec(filename=opts.train_data, batch_size=opts.batch_size, window_size=opts.window_size, min_count=opts.min_count, subsample=opts.subsample) (opts.vocab_words, opts.vocab_counts, opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch]) opts.vocab_size = len(opts.vocab_words) print('Data file: ', opts.train_data) print('Vocab size: ', (opts.vocab_size - 1), ' + UNK') print('Words per epoch: ', opts.words_per_epoch) self._examples = examples self._labels = labels self._id2word = opts.vocab_words for (i, w) in enumerate(self._id2word): self._word2id[w] = i (true_logits, sampled_logits) = self.forward(examples, labels) loss = self.nce_loss(true_logits, sampled_logits) tf.summary.scalar('NCE loss', loss) self._loss = loss self.optimize(loss) tf.global_variables_initializer().run() self.saver = tf.train.Saver()
'Save the vocabulary to a file so the model can be reloaded.'
def save_vocab(self):
opts = self._options with open(os.path.join(opts.save_path, 'vocab.txt'), 'w') as f: for i in xrange(opts.vocab_size): vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode('utf-8') f.write(('%s %d\n' % (vocab_word, opts.vocab_counts[i])))
'Train the model.'
def train(self):
opts = self._options (initial_epoch, initial_words) = self._session.run([self._epoch, self._words]) summary_op = tf.summary.merge_all() summary_writer = tf.summary.FileWriter(opts.save_path, self._session.graph) workers = [] for _ in xrange(opts.concurrent_steps): t = threading.Thread(target=self._train_thread_body) t.start() workers.append(t) (last_words, last_time, last_summary_time) = (initial_words, time.time(), 0) last_checkpoint_time = 0 while True: time.sleep(opts.statistics_interval) (epoch, step, loss, words, lr) = self._session.run([self._epoch, self.global_step, self._loss, self._words, self._lr]) now = time.time() (last_words, last_time, rate) = (words, now, ((words - last_words) / (now - last_time))) print(('Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r' % (epoch, step, lr, loss, rate)), end='') sys.stdout.flush() if ((now - last_summary_time) > opts.summary_interval): summary_str = self._session.run(summary_op) summary_writer.add_summary(summary_str, step) last_summary_time = now if ((now - last_checkpoint_time) > opts.checkpoint_interval): self.saver.save(self._session, os.path.join(opts.save_path, 'model.ckpt'), global_step=step.astype(int)) last_checkpoint_time = now if (epoch != initial_epoch): break for t in workers: t.join() return epoch
'Predict the top 4 answers for analogy questions.'
def _predict(self, analogy):
(idx,) = self._session.run([self._analogy_pred_idx], {self._analogy_a: analogy[:, 0], self._analogy_b: analogy[:, 1], self._analogy_c: analogy[:, 2]}) return idx
'Evaluate analogy questions and reports accuracy.'
def eval(self):
correct = 0 try: total = self._analogy_questions.shape[0] except AttributeError as e: raise AttributeError('Need to read analogy questions.') start = 0 while (start < total): limit = (start + 2500) sub = self._analogy_questions[start:limit, :] idx = self._predict(sub) start = limit for question in xrange(sub.shape[0]): for j in xrange(4): if (idx[(question, j)] == sub[(question, 3)]): correct += 1 break elif (idx[(question, j)] in sub[question, :3]): continue else: break print() print(('Eval %4d/%d accuracy = %4.1f%%' % (correct, total, ((correct * 100.0) / total))))
'Predict word w3 as in w0:w1 vs w2:w3.'
def analogy(self, w0, w1, w2):
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]]) idx = self._predict(wid) for c in [self._id2word[i] for i in idx[0, :]]: if (c not in [w0, w1, w2]): print(c) break print('unknown')
'Prints out nearby words given a list of words.'
def nearby(self, words, num=20):
ids = np.array([self._word2id.get(x, 0) for x in words]) (vals, idx) = self._session.run([self._nearby_val, self._nearby_idx], {self._nearby_word: ids}) for i in xrange(len(words)): print(('\n%s\n=====================================' % words[i])) for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]): print(('%-20s %6.4f' % (self._id2word[neighbor], distance)))
'Convert the str_label to 10 binary code, 385 to 0001010010'
def get_label(self, str_label):
result = ([0] * 10) for i in str_label: result[int(i)] = 1 return result
'Process a list of strings, each corresponding to the recorded changes. Args: text: A list of lines of text (assumed to contain newlines) Returns: A tuple of the modified text and a textual description of what is done. Raises: ValueError: if substitution source location does not have expected text.'
def process(self, text):
change_report = '' for (line, edits) in self._line_to_edit.items(): offset = 0 edits.sort(key=(lambda x: x.start)) char_array = list(text[(line - 1)]) change_report += ('%r Line %d\n' % (self._filename, line)) change_report += (('-' * 80) + '\n\n') for e in edits: change_report += ('%s\n' % e.comment) change_report += ('\n Old: %s' % text[(line - 1)]) change_list = ([' '] * len(text[(line - 1)])) change_list_new = ([' '] * len(text[(line - 1)])) for e in edits: start_eff = (e.start + offset) end_eff = (start_eff + len(e.old)) old_actual = ''.join(char_array[start_eff:end_eff]) if (old_actual != e.old): raise ValueError(('Expected text %r but got %r' % (''.join(e.old), ''.join(old_actual)))) char_array[start_eff:end_eff] = list(e.new) change_list[e.start:(e.start + len(e.old))] = ('~' * len(e.old)) change_list_new[start_eff:end_eff] = ('~' * len(e.new)) offset += (len(e.new) - len(e.old)) change_report += (' %s\n' % ''.join(change_list)) text[(line - 1)] = ''.join(char_array) change_report += (' New: %s' % text[(line - 1)]) change_report += (' %s\n\n' % ''.join(change_list_new)) return (''.join(text), change_report, self._errors)
'Add a new change that is needed. Args: comment: A description of what was changed line: Line number (1 indexed) start: Column offset (0 indexed) old: old text new: new text error: this "edit" is something that cannot be fixed automatically Returns: None'
def add(self, comment, line, start, old, new, error=None):
self._line_to_edit[line].append(FileEditTuple(comment, line, start, old, new)) if error: self._errors.append(('%s:%d: %s' % (self._filename, line, error)))
'Traverse an attribute to generate a full name e.g. tf.foo.bar. Args: node: A Node of type Attribute. Returns: a \'.\'-delimited full-name or None if the tree was not a simple form. i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".'
def _get_attribute_full_path(self, node):
curr = node items = [] while (not isinstance(curr, ast.Name)): if (not isinstance(curr, ast.Attribute)): return None items.append(curr.attr) curr = curr.value items.append(curr.id) return '.'.join(reversed(items))
'Return correct line number and column offset for a given node. This is necessary mainly because ListComp\'s location reporting reports the next token after the list comprehension list opening. Args: node: Node for which we wish to know the lineno and col_offset'
def _find_true_position(self, node):
import re find_open = re.compile('^\\s*(\\[).*$') find_string_chars = re.compile('[\'"]') if isinstance(node, ast.ListComp): line = node.lineno col = node.col_offset while 1: text = self._lines[(line - 1)] reversed_preceding_text = text[:col][::(-1)] m = find_open.match(reversed_preceding_text) if m: new_col_offset = ((col - m.start(1)) - 1) return (line, new_col_offset) elif ((reversed_preceding_text == '') or reversed_preceding_text.isspace()): line = (line - 1) prev_line = self._lines[(line - 1)] comment_start = prev_line.find('#') if (comment_start == (-1)): col = (len(prev_line) - 1) elif (find_string_chars.search(prev_line[comment_start:]) is None): col = comment_start else: return (None, None) else: return (None, None) return (node.lineno, node.col_offset)
'Handle visiting a call node in the AST. Args: node: Current Node'
def visit_Call(self, node):
full_name = self._get_attribute_full_path(node.func) node.func.is_function_for_call = True if (full_name and full_name.startswith('tf.')): function_handles = self._api_change_spec.function_handle if (full_name in function_handles): function_handles[full_name](self._file_edit, node) function_reorders = self._api_change_spec.function_reorders function_keyword_renames = self._api_change_spec.function_keyword_renames if (full_name in function_reorders): reordered = function_reorders[full_name] for (idx, arg) in enumerate(node.args): (lineno, col_offset) = self._find_true_position(arg) if ((lineno is None) or (col_offset is None)): self._file_edit.add(('Failed to add keyword %r to reordered function %r' % (reordered[idx], full_name)), arg.lineno, arg.col_offset, '', '', error='A necessary keyword argument failed to be inserted.') else: keyword_arg = reordered[idx] if ((full_name in function_keyword_renames) and (keyword_arg in function_keyword_renames[full_name])): keyword_arg = function_keyword_renames[full_name][keyword_arg] self._file_edit.add(('Added keyword %r to reordered function %r' % (reordered[idx], full_name)), lineno, col_offset, '', (keyword_arg + '=')) renamed_keywords = ({} if (full_name not in function_keyword_renames) else function_keyword_renames[full_name]) for keyword in node.keywords: argkey = keyword.arg argval = keyword.value if (argkey in renamed_keywords): (argval_lineno, argval_col_offset) = self._find_true_position(argval) if ((argval_lineno is not None) and (argval_col_offset is not None)): key_start = ((argval_col_offset - len(argkey)) - 1) key_end = ((key_start + len(argkey)) + 1) if (self._lines[(argval_lineno - 1)][key_start:key_end] == (argkey + '=')): self._file_edit.add(('Renamed keyword argument from %r to %r' % (argkey, renamed_keywords[argkey])), argval_lineno, ((argval_col_offset - len(argkey)) - 1), (argkey + '='), (renamed_keywords[argkey] + '=')) continue self._file_edit.add(('Failed to rename keyword argument from %r to %r' % (argkey, renamed_keywords[argkey])), argval.lineno, ((argval.col_offset - len(argkey)) - 1), '', '', error='Failed to find keyword lexographically. Fix manually.') ast.NodeVisitor.generic_visit(self, node)
'Handle bare Attributes i.e. [tf.foo, tf.bar]. Args: node: Node that is of type ast.Attribute'
def visit_Attribute(self, node):
full_name = self._get_attribute_full_path(node) if (full_name and full_name.startswith('tf.')): self._rename_functions(node, full_name) if (full_name in self._api_change_spec.change_to_function): if (not hasattr(node, 'is_function_for_call')): new_text = (full_name + '()') self._file_edit.add(('Changed %r to %r' % (full_name, new_text)), node.lineno, node.col_offset, full_name, new_text) ast.NodeVisitor.generic_visit(self, node)
'Process the given python file for incompatible changes. Args: in_filename: filename to parse out_filename: output file to write to Returns: A tuple representing number of files processed, log of actions, errors'
def process_file(self, in_filename, out_filename):
with open(in_filename, 'r') as in_file: with tempfile.NamedTemporaryFile('w', delete=False) as temp_file: ret = self.process_opened_file(in_filename, in_file, out_filename, temp_file) shutil.move(temp_file.name, out_filename) return ret
'Process the given python file for incompatible changes. This function is split out to facilitate StringIO testing from tf_upgrade_test.py. Args: in_filename: filename to parse in_file: opened file (or StringIO) out_filename: output file to write to out_file: opened file (or StringIO) Returns: A tuple representing number of files processed, log of actions, errors'
def process_opened_file(self, in_filename, in_file, out_filename, out_file):
process_errors = [] text = (('-' * 80) + '\n') text += ('Processing file %r\n outputting to %r\n' % (in_filename, out_filename)) text += (('-' * 80) + '\n\n') parsed_ast = None lines = in_file.readlines() try: parsed_ast = ast.parse(''.join(lines)) except Exception: text += ('Failed to parse %r\n\n' % in_filename) text += traceback.format_exc() if parsed_ast: visitor = TensorFlowCallVisitor(in_filename, lines) visitor.visit(parsed_ast) (out_text, new_text, process_errors) = visitor.process(lines) text += new_text if out_file: out_file.write(out_text) text += '\n' return (1, text, process_errors)
'Processes upgrades on an entire tree of python files in place. Note that only Python files. If you have custom code in other languages, you will need to manually upgrade those. Args: root_directory: Directory to walk and process. output_root_directory: Directory to use as base Returns: A tuple of files processed, the report string ofr all files, and errors'
def process_tree(self, root_directory, output_root_directory):
if (output_root_directory and os.path.exists(output_root_directory)): print(('Output directory %r must not already exist.' % output_root_directory)) sys.exit(1) norm_root = os.path.split(os.path.normpath(root_directory)) norm_output = os.path.split(os.path.normpath(output_root_directory)) if (norm_root == norm_output): print(('Output directory %r same as input directory %r' % (root_directory, output_root_directory))) sys.exit(1) files_to_process = [] for (dir_name, _, file_list) in os.walk(root_directory): py_files = [f for f in file_list if f.endswith('.py')] for filename in py_files: fullpath = os.path.join(dir_name, filename) fullpath_output = os.path.join(output_root_directory, os.path.relpath(fullpath, root_directory)) files_to_process.append((fullpath, fullpath_output)) file_count = 0 tree_errors = [] report = '' report += (('=' * 80) + '\n') report += ('Input tree: %r\n' % root_directory) report += (('=' * 80) + '\n') for (input_path, output_path) in files_to_process: output_directory = os.path.dirname(output_path) if (not os.path.isdir(output_directory)): os.makedirs(output_directory) file_count += 1 (_, l_report, l_errors) = self.process_file(input_path, output_path) tree_errors += l_errors report += l_report return (file_count, report, tree_errors)
'Run predict op for each request. Args: request: The TensorProto which contains the map of "inputs". The request.inputs looks like {\'features\': dtype: DT_FLOAT tensor_shape { dim { size: 2 } } tensor_content: " A?" }. context: The grpc.beta._server_adaptations._FaceServicerContext object. Returns: The TensorProto which contains the map of "outputs". The response.outputs looks like {\'softmax\': dtype: DT_FLOAT tensor_shape { dim { size: 2 } } tensor_content: "\Ö¢=4¥k?\Ö¢=4¥k?" }'
def Predict(self, request, context):
request_map = request.inputs feed_dict = {} for (k, v) in self.inputs.items(): feed_dict[v] = tensor_util.MakeNdarray(request_map[k]) predict_result = self.sess.run(self.outputs, feed_dict=feed_dict) response = predict_pb2.PredictResponse() for (k, v) in predict_result.items(): response.outputs[k].CopyFrom(tensor_util.make_tensor_proto(v)) return response
'Constructor. Args: channel: A grpc.Channel.'
def __init__(self, channel):
self.Predict = channel.unary_unary('/tensorflow.serving.PredictionService/Predict', request_serializer=PredictRequest.SerializeToString, response_deserializer=PredictResponse.FromString)
'Overloads `+` operator. It does NOT overwrite the existing item. For example, ```python import sugartensor as tf opt = tf.sg_opt(size=1) opt += tf.sg_opt(size=2) print(opt) # Should be {\'size\': 1}'
def __add__(self, other):
res = Opt(self.__dict__) for (k, v) in six.iteritems(other): if ((k not in res.__dict__) or (res.__dict__[k] is None)): res.__dict__[k] = v return res
'Overloads `*` operator. It overwrites the existing item. For example, ```python import sugartensor as tf opt = tf.sg_opt(size=1) opt *= tf.sg_opt(size=2) print(opt) # Should be {\'size\': 2}'
def __mul__(self, other):
res = Opt(self.__dict__) for (k, v) in six.iteritems(other): res.__dict__[k] = v return res
'Overloads `+` operator. It does NOT overwrite the existing item. For example, ```python import sugartensor as tf opt = tf.sg_opt(size=1) opt += tf.sg_opt(size=2) print(opt) # Should be {\'size\': 1}'
def __add__(self, other):
res = Opt(self.__dict__) for (k, v) in six.iteritems(other): if ((k not in res.__dict__) or (res.__dict__[k] is None)): res.__dict__[k] = v return res
'Overloads `*` operator. It overwrites the existing item. For example, ```python import sugartensor as tf opt = tf.sg_opt(size=1) opt *= tf.sg_opt(size=2) print(opt) # Should be {\'size\': 2}'
def __mul__(self, other):
res = Opt(self.__dict__) for (k, v) in six.iteritems(other): res.__dict__[k] = v return res
'all_boxes is a list of length number-of-classes. Each list element is a list of length number-of-images. Each of those list elements is either an empty list [] or a numpy array of detection. all_boxes[class][image] = [] or np.array of shape #dets x 5'
def evaluate_detections(self, all_boxes, output_dir=None):
raise NotImplementedError
'Evaluate detection proposal recall metrics. Returns: results: dictionary of results with keys \'ar\': average recall \'recalls\': vector recalls at each IoU overlap threshold \'thresholds\': vector of IoU overlap thresholds \'gt_overlaps\': vector of all ground-truth overlaps'
def evaluate_recall(self, candidate_boxes=None, thresholds=None, area='all', limit=None):
areas = {'all': 0, 'small': 1, 'medium': 2, 'large': 3, '96-128': 4, '128-256': 5, '256-512': 6, '512-inf': 7} area_ranges = [[(0 ** 2), (100000.0 ** 2)], [(0 ** 2), (32 ** 2)], [(32 ** 2), (96 ** 2)], [(96 ** 2), (100000.0 ** 2)], [(96 ** 2), (128 ** 2)], [(128 ** 2), (256 ** 2)], [(256 ** 2), (512 ** 2)], [(512 ** 2), (100000.0 ** 2)]] assert (area in areas), 'unknown area range: {}'.format(area) area_range = area_ranges[areas[area]] gt_overlaps = np.zeros(0) num_pos = 0 for i in range(self.num_images): max_gt_overlaps = self.roidb[i]['gt_overlaps'].toarray().max(axis=1) gt_inds = np.where(((self.roidb[i]['gt_classes'] > 0) & (max_gt_overlaps == 1)))[0] gt_boxes = self.roidb[i]['boxes'][gt_inds, :] gt_areas = self.roidb[i]['seg_areas'][gt_inds] valid_gt_inds = np.where(((gt_areas >= area_range[0]) & (gt_areas <= area_range[1])))[0] gt_boxes = gt_boxes[valid_gt_inds, :] num_pos += len(valid_gt_inds) if (candidate_boxes is None): non_gt_inds = np.where((self.roidb[i]['gt_classes'] == 0))[0] boxes = self.roidb[i]['boxes'][non_gt_inds, :] else: boxes = candidate_boxes[i] if (boxes.shape[0] == 0): continue if ((limit is not None) and (boxes.shape[0] > limit)): boxes = boxes[:limit, :] overlaps = bbox_overlaps(boxes.astype(np.float), gt_boxes.astype(np.float)) _gt_overlaps = np.zeros(gt_boxes.shape[0]) for j in range(gt_boxes.shape[0]): argmax_overlaps = overlaps.argmax(axis=0) max_overlaps = overlaps.max(axis=0) gt_ind = max_overlaps.argmax() gt_ovr = max_overlaps.max() assert (gt_ovr >= 0) box_ind = argmax_overlaps[gt_ind] _gt_overlaps[j] = overlaps[(box_ind, gt_ind)] assert (_gt_overlaps[j] == gt_ovr) overlaps[box_ind, :] = (-1) overlaps[:, gt_ind] = (-1) gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps)) gt_overlaps = np.sort(gt_overlaps) if (thresholds is None): step = 0.05 thresholds = np.arange(0.5, (0.95 + 1e-05), step) recalls = np.zeros_like(thresholds) for (i, t) in enumerate(thresholds): recalls[i] = ((gt_overlaps >= t).sum() / float(num_pos)) ar = recalls.mean() return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds, 'gt_overlaps': gt_overlaps}
'Turn competition mode on or off.'
def competition_mode(self, on):
pass
'Return the absolute path to image i in the image sequence.'
def image_path_at(self, i):
return self.image_path_from_index(self._image_index[i])
'Construct an image path from the image\'s "index" identifier.'
def image_path_from_index(self, index):
image_path = os.path.join(self._data_path, 'JPEGImages', (index + self._image_ext)) assert os.path.exists(image_path), 'Path does not exist: {}'.format(image_path) return image_path
'Load the indexes listed in this dataset\'s image set file.'
def _load_image_set_index(self):
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main', (self._image_set + '.txt')) assert os.path.exists(image_set_file), 'Path does not exist: {}'.format(image_set_file) with open(image_set_file) as f: image_index = [x.strip() for x in f.readlines()] return image_index
'Return the default path where PASCAL VOC is expected to be installed.'
def _get_default_path(self):
return os.path.join(cfg.DATA_DIR, ('VOCdevkit' + self._year))
'Return the database of ground-truth regions of interest. This function loads/saves from/to a cache file to speed up future calls.'
def gt_roidb(self):
cache_file = os.path.join(self.cache_path, (self.name + '_gt_roidb.pkl')) if os.path.exists(cache_file): with open(cache_file, 'rb') as fid: try: roidb = pickle.load(fid) except: roidb = pickle.load(fid, encoding='bytes') print('{} gt roidb loaded from {}'.format(self.name, cache_file)) return roidb gt_roidb = [self._load_pascal_annotation(index) for index in self.image_index] with open(cache_file, 'wb') as fid: pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL) print('wrote gt roidb to {}'.format(cache_file)) return gt_roidb
'Load image and bounding boxes info from XML file in the PASCAL VOC format.'
def _load_pascal_annotation(self, index):
filename = os.path.join(self._data_path, 'Annotations', (index + '.xml')) tree = ET.parse(filename) objs = tree.findall('object') if (not self.config['use_diff']): non_diff_objs = [obj for obj in objs if (int(obj.find('difficult').text) == 0)] objs = non_diff_objs num_objs = len(objs) boxes = np.zeros((num_objs, 4), dtype=np.uint16) gt_classes = np.zeros(num_objs, dtype=np.int32) overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32) seg_areas = np.zeros(num_objs, dtype=np.float32) for (ix, obj) in enumerate(objs): bbox = obj.find('bndbox') x1 = (float(bbox.find('xmin').text) - 1) y1 = (float(bbox.find('ymin').text) - 1) x2 = (float(bbox.find('xmax').text) - 1) y2 = (float(bbox.find('ymax').text) - 1) cls = self._class_to_ind[obj.find('name').text.lower().strip()] boxes[ix, :] = [x1, y1, x2, y2] gt_classes[ix] = cls overlaps[(ix, cls)] = 1.0 seg_areas[ix] = (((x2 - x1) + 1) * ((y2 - y1) + 1)) overlaps = scipy.sparse.csr_matrix(overlaps) return {'boxes': boxes, 'gt_classes': gt_classes, 'gt_overlaps': overlaps, 'flipped': False, 'seg_areas': seg_areas}
'Load image ids.'
def _load_image_set_index(self):
image_ids = self._COCO.getImgIds() return image_ids
'Return the absolute path to image i in the image sequence.'
def image_path_at(self, i):
return self.image_path_from_index(self._image_index[i])
'Construct an image path from the image\'s "index" identifier.'
def image_path_from_index(self, index):
file_name = (((('COCO_' + self._data_name) + '_') + str(index).zfill(12)) + '.jpg') image_path = osp.join(self._data_path, 'images', self._data_name, file_name) assert osp.exists(image_path), 'Path does not exist: {}'.format(image_path) return image_path
'Return the database of ground-truth regions of interest. This function loads/saves from/to a cache file to speed up future calls.'
def gt_roidb(self):
cache_file = osp.join(self.cache_path, (self.name + '_gt_roidb.pkl')) if osp.exists(cache_file): with open(cache_file, 'rb') as fid: roidb = pickle.load(fid) print('{} gt roidb loaded from {}'.format(self.name, cache_file)) return roidb gt_roidb = [self._load_coco_annotation(index) for index in self._image_index] with open(cache_file, 'wb') as fid: pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL) print('wrote gt roidb to {}'.format(cache_file)) return gt_roidb
'Loads COCO bounding-box instance annotations. Crowd instances are handled by marking their overlaps (with all categories) to -1. This overlap value means that crowd "instances" are excluded from training.'
def _load_coco_annotation(self, index):
im_ann = self._COCO.loadImgs(index)[0] width = im_ann['width'] height = im_ann['height'] annIds = self._COCO.getAnnIds(imgIds=index, iscrowd=None) objs = self._COCO.loadAnns(annIds) valid_objs = [] for obj in objs: x1 = np.max((0, obj['bbox'][0])) y1 = np.max((0, obj['bbox'][1])) x2 = np.min(((width - 1), (x1 + np.max((0, (obj['bbox'][2] - 1)))))) y2 = np.min(((height - 1), (y1 + np.max((0, (obj['bbox'][3] - 1)))))) if ((obj['area'] > 0) and (x2 >= x1) and (y2 >= y1)): obj['clean_bbox'] = [x1, y1, x2, y2] valid_objs.append(obj) objs = valid_objs num_objs = len(objs) boxes = np.zeros((num_objs, 4), dtype=np.uint16) gt_classes = np.zeros(num_objs, dtype=np.int32) overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32) seg_areas = np.zeros(num_objs, dtype=np.float32) coco_cat_id_to_class_ind = dict([(self._class_to_coco_cat_id[cls], self._class_to_ind[cls]) for cls in self._classes[1:]]) for (ix, obj) in enumerate(objs): cls = coco_cat_id_to_class_ind[obj['category_id']] boxes[ix, :] = obj['clean_bbox'] gt_classes[ix] = cls seg_areas[ix] = obj['area'] if obj['iscrowd']: overlaps[ix, :] = (-1.0) else: overlaps[(ix, cls)] = 1.0 ds_utils.validate_boxes(boxes, width=width, height=height) overlaps = scipy.sparse.csr_matrix(overlaps) return {'width': width, 'height': height, 'boxes': boxes, 'gt_classes': gt_classes, 'gt_overlaps': overlaps, 'flipped': False, 'seg_areas': seg_areas}
'Set the roidb to be used by this layer during training.'
def __init__(self, roidb, num_classes, random=False):
self._roidb = roidb self._num_classes = num_classes self._random = random self._shuffle_roidb_inds()
'Randomly permute the training roidb.'
def _shuffle_roidb_inds(self):
if self._random: st0 = np.random.get_state() millis = (int(round((time.time() * 1000))) % 4294967295) np.random.seed(millis) if cfg.TRAIN.ASPECT_GROUPING: widths = np.array([r['width'] for r in self._roidb]) heights = np.array([r['height'] for r in self._roidb]) horz = (widths >= heights) vert = np.logical_not(horz) horz_inds = np.where(horz)[0] vert_inds = np.where(vert)[0] inds = np.hstack((np.random.permutation(horz_inds), np.random.permutation(vert_inds))) inds = np.reshape(inds, ((-1), 2)) row_perm = np.random.permutation(np.arange(inds.shape[0])) inds = np.reshape(inds[row_perm, :], ((-1),)) self._perm = inds else: self._perm = np.random.permutation(np.arange(len(self._roidb))) if self._random: np.random.set_state(st0) self._cur = 0
'Return the roidb indices for the next minibatch.'
def _get_next_minibatch_inds(self):
if ((self._cur + cfg.TRAIN.IMS_PER_BATCH) >= len(self._roidb)): self._shuffle_roidb_inds() db_inds = self._perm[self._cur:(self._cur + cfg.TRAIN.IMS_PER_BATCH)] self._cur += cfg.TRAIN.IMS_PER_BATCH return db_inds
'Return the blobs to be used for the next minibatch. If cfg.TRAIN.USE_PREFETCH is True, then blobs will be computed in a separate process and made available through self._blob_queue.'
def _get_next_minibatch(self):
db_inds = self._get_next_minibatch_inds() minibatch_db = [self._roidb[i] for i in db_inds] return get_minibatch(minibatch_db, self._num_classes)
'Get blobs and copy them into this layer\'s top blob vector.'
def forward(self):
blobs = self._get_next_minibatch() return blobs
'Retrieve unique object or create, if it doesn\'t exist. Returns a tuple of ``(object, created)``, where ``object`` is the retrieved or created object and ``created`` is a boolean specifying whether a new object was created. Raises :class:`~mongoengine.queryset.MultipleObjectsReturned` or `DocumentName.MultipleObjectsReturned` if multiple results are found. A new document will be created if the document doesn\'t exists; a dictionary of default values for the new document may be provided as a keyword argument called :attr:`defaults`. .. note:: This requires two separate operations and therefore a race condition exists. Because there are no transactions in mongoDB other approaches should be investigated, to ensure you don\'t accidently duplicate data when using this method. This is now scheduled to be removed before 1.0 :param write_options: optional extra keyword arguments used if we have to create a new document. Passes any write_options onto :meth:`~mongoengine.Document.save` :param auto_save: if the object is to be saved automatically if not found. add to your documents: meta = {\'queryset_class\': ExtendedQuerySet}'
def get_or_create(self, write_options=None, auto_save=True, *q_objs, **query):
defaults = query.get('defaults', {}) if ('defaults' in query): del query['defaults'] try: doc = self.get(*q_objs, **query) return (doc, False) except self._document.DoesNotExist: query.update(defaults) doc = self._document(**query) if auto_save: doc.save(write_options=write_options) return (doc, True)
'method returns the path (url) of the main image'
def get_main_image_url(self, thumb=False, default=None, identifier='mainimage'):
if (not isinstance(identifier, (list, tuple))): identifier = [identifier] for item in identifier: try: if (not thumb): path = self.contents.get(identifier=item).content.path else: path = self.contents.get(identifier=item).content.thumb return url_for('quokka.core.media', filename=path) except Exception as e: logger.warning(('get_main_image_url:' + str(e))) return default
'method returns the path of the main image with http'
def get_main_image_http(self, thumb=False, default=None, identifier='mainimage'):
site_url = get_site_url() image_url = self.get_main_image_url(thumb=thumb, default=default, identifier=identifier) return u'{}{}'.format(site_url, image_url)
'Another implementation data = {"name": name, "rawvalue": value, "formatter": formatter} self.values.update(data, name=name) or self.values.create(**data)'
def add_value(self, name, value, formatter='text'):
custom_value = CustomValue(name=name, value=value, formatter=formatter) self.values.append(custom_value)
'return how many ancestors this node has based on slugs'
def get_ancestors_count(self):
return len(self.get_ancestors_slugs())
'return ancestors slugs including self as 1st item >>> channel = Channel(long_slug=\'articles/technology/programming\') >>> channel.get_ancestors_slugs() [\'articles/technology/programming\', \'articles/technology\', \'articles\']'
def get_ancestors_slugs(self):
channel_list = [] channel_slugs = self.long_slug.split('/') while channel_slugs: channel_list.append('/'.join(channel_slugs)) channel_slugs.pop() return channel_list
'return all ancestors includind self as 1st item'
def get_ancestors(self, **kwargs):
channel_list = self.get_ancestors_slugs() ancestors = self.__class__.objects(long_slug__in=channel_list, **kwargs).order_by('-long_slug') return ancestors
'return direct children 1 level depth'
def get_children(self, **kwargs):
return self.__class__.objects(parent=self, **kwargs).order_by('long_slug')
'return all descendants including self as 1st item'
def get_descendants(self, **kwargs):
return self.__class__.objects(__raw__={'mpath': {'$regex': '^{0}'.format(self.mpath)}}).order_by('long_slug')
'This method should be reviewed Canonical URL is the preferred URL for a content when the content can be served by multiple URLS In the case of channels it will never happen until we implement the channel alias feature'
def get_canonical_url(self, *args, **kwargs):
if self.is_homepage: return '/' return self.get_absolute_url()
'populate inheritance from parent channels'
def heritage(self):
parent = self.parent if ((not parent) or (not self.inherit_parent)): return self.content_filters = (self.content_filters or parent.content_filters) self.include_in_rss = (self.include_in_rss or parent.include_in_rss) self.show_in_menu = (self.show_in_menu or parent.show_in_menu) self.indexable = (self.indexable or parent.indexable) self.channel_type = (self.channel_type or parent.channel_type)
'As config reads data from database on every app.config.get(key)/[key] This data is cached as a cached_property The TTL is fixed in 5 minutes because we can\'t read it from config itself. Find a way to set the config parameter in a file maybe in a config_setting.ini It takes 5 minutes for new values to be available and Make it possible to use REDIS as a cache'
@cached_property_ttl(300) def all_setings_from_db(self):
try: return {item.name: item.value for item in m.config.Config.objects.get(group='settings').values} except Exception as e: logger.warning(('Error reading all settings from db: %s' % e)) return {}
'This meethod is not used, but is here for compatibility'
@staticmethod def get_url(field):
return field.data
'This method should be removed when Flask is >=0.11'
def make_config(self, instance_relative=False):
root_path = self.root_path if instance_relative: root_path = self.instance_path return self.config_class(root_path, self.default_config)
'Fixme: Should include extra paths, fixed paths config based paths, static paths'
def get(self):
return render_template('sitemap.xml', contents=self.get_contents(), channels=self.get_channels())
'Set up fixtures for the class. This methods runs once for the entire class. This test case do not insert or update any record on the database, so there is no problem to be run only once for the class. This way it save some time, instead of populate the test database each time a test is executed.'
@classmethod def setUpClass(cls):
admin = create_admin() app = create_app(config='quokka.test_settings', DEBUG=False, test=True, admin_instance=admin) with app.app_context(): db = list(app.extensions.get('mongoengine').keys())[0] db.connection.drop_database('quokka_test') from quokka.utils.populate import Populate Populate(db)() cls.app = app cls.db = db
'Create app must be implemented. It is mandatory for flask_testing test cases. Only returns the app created in the setUpClass method.'
def create_app(self):
return self.app
'Set the cookie on the Flask test client.'
def set_cookie(self, key, value='', *args, **kwargs):
server_name = (flask.current_app.config['SERVER_NAME'] or 'localhost') return self.client.set_cookie(server_name, key=key, value=value, *args, **kwargs)