desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary.'
def reset_weights(self):
logger.info('resetting layer weights') self.wv.syn0 = empty((len(self.wv.vocab), self.vector_size), dtype=REAL) for i in xrange(len(self.wv.vocab)): self.wv.syn0[i] = self.seeded_vector((self.wv.index2word[i] + str(self.seed))) if self.hs: self.syn1 = zeros((len(self.wv.vocab), self.layer1_size), dtype=REAL) if self.negative: self.syn1neg = zeros((len(self.wv.vocab), self.layer1_size), dtype=REAL) self.wv.syn0norm = None self.syn0_lockf = ones(len(self.wv.vocab), dtype=REAL)
'Create one \'random\' vector (but deterministic by seed_string)'
def seeded_vector(self, seed_string):
once = random.RandomState((self.hashfxn(seed_string) & 4294967295)) return ((once.rand(self.vector_size) - 0.5) / self.vector_size)
'Merge the input-hidden weight matrix from the original C word2vec-tool format given, where it intersects with the current vocabulary. (No words are added to the existing vocabulary, but intersecting words adopt the file\'s weights, and non-intersecting words are left alone.) `binary` is a boolean indicating whether the data is in binary word2vec format. `lockf` is a lock-factor value to be set for any imported word-vectors; the default value of 0.0 prevents further updating of the vector during subsequent training. Use 1.0 to allow further training updates of merged vectors.'
def intersect_word2vec_format(self, fname, lockf=0.0, binary=False, encoding='utf8', unicode_errors='strict'):
overlap_count = 0 logger.info(('loading projection weights from %s' % fname)) with utils.smart_open(fname) as fin: header = utils.to_unicode(fin.readline(), encoding=encoding) (vocab_size, vector_size) = map(int, header.split()) if (not (vector_size == self.vector_size)): raise ValueError(('incompatible vector size %d in file %s' % (vector_size, fname))) if binary: binary_len = (dtype(REAL).itemsize * vector_size) for line_no in xrange(vocab_size): word = [] while True: ch = fin.read(1) if (ch == ' '): break if (ch != '\n'): word.append(ch) word = utils.to_unicode(''.join(word), encoding=encoding, errors=unicode_errors) weights = fromstring(fin.read(binary_len), dtype=REAL) if (word in self.wv.vocab): overlap_count += 1 self.wv.syn0[self.wv.vocab[word].index] = weights self.syn0_lockf[self.wv.vocab[word].index] = lockf else: for (line_no, line) in enumerate(fin): parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(' ') if (len(parts) != (vector_size + 1)): raise ValueError(('invalid vector on line %s (is this really the text format?)' % line_no)) (word, weights) = (parts[0], list(map(REAL, parts[1:]))) if (word in self.wv.vocab): overlap_count += 1 self.wv.syn0[self.wv.vocab[word].index] = weights self.syn0_lockf[self.wv.vocab[word].index] = lockf logger.info(('merged %d vectors into %s matrix from %s' % (overlap_count, self.wv.syn0.shape, fname)))
'Deprecated. Use self.wv.most_similar() instead. Refer to the documentation for `gensim.models.KeyedVectors.most_similar`'
def most_similar(self, positive=[], negative=[], topn=10, restrict_vocab=None, indexer=None):
return self.wv.most_similar(positive, negative, topn, restrict_vocab, indexer)
'Deprecated. Use self.wv.wmdistance() instead. Refer to the documentation for `gensim.models.KeyedVectors.wmdistance`'
def wmdistance(self, document1, document2):
return self.wv.wmdistance(document1, document2)
'Deprecated. Use self.wv.most_similar_cosmul() instead. Refer to the documentation for `gensim.models.KeyedVectors.most_similar_cosmul`'
def most_similar_cosmul(self, positive=[], negative=[], topn=10):
return self.wv.most_similar_cosmul(positive, negative, topn)
'Deprecated. Use self.wv.similar_by_word() instead. Refer to the documentation for `gensim.models.KeyedVectors.similar_by_word`'
def similar_by_word(self, word, topn=10, restrict_vocab=None):
return self.wv.similar_by_word(word, topn, restrict_vocab)
'Deprecated. Use self.wv.similar_by_vector() instead. Refer to the documentation for `gensim.models.KeyedVectors.similar_by_vector`'
def similar_by_vector(self, vector, topn=10, restrict_vocab=None):
return self.wv.similar_by_vector(vector, topn, restrict_vocab)
'Deprecated. Use self.wv.doesnt_match() instead. Refer to the documentation for `gensim.models.KeyedVectors.doesnt_match`'
def doesnt_match(self, words):
return self.wv.doesnt_match(words)
'Deprecated. Use self.wv.__getitem__() instead. Refer to the documentation for `gensim.models.KeyedVectors.__getitem__`'
def __getitem__(self, words):
return self.wv.__getitem__(words)
'Deprecated. Use self.wv.__contains__() instead. Refer to the documentation for `gensim.models.KeyedVectors.__contains__`'
def __contains__(self, word):
return self.wv.__contains__(word)
'Deprecated. Use self.wv.similarity() instead. Refer to the documentation for `gensim.models.KeyedVectors.similarity`'
def similarity(self, w1, w2):
return self.wv.similarity(w1, w2)
'Deprecated. Use self.wv.n_similarity() instead. Refer to the documentation for `gensim.models.KeyedVectors.n_similarity`'
def n_similarity(self, ws1, ws2):
return self.wv.n_similarity(ws1, ws2)
'Report the probability distribution of the center word given the context words as input to the trained model.'
def predict_output_word(self, context_words_list, topn=10):
if (not self.negative): raise RuntimeError('We have currently only implemented predict_output_word for the negative sampling scheme, so you need to have run word2vec with negative > 0 for this to work.') if ((not hasattr(self.wv, 'syn0')) or (not hasattr(self, 'syn1neg'))): raise RuntimeError('Parameters required for predicting the output words not found.') word_vocabs = [self.wv.vocab[w] for w in context_words_list if (w in self.wv.vocab)] if (not word_vocabs): warnings.warn('All the input context words are out-of-vocabulary for the current model.') return None word2_indices = [word.index for word in word_vocabs] l1 = np_sum(self.wv.syn0[word2_indices], axis=0) if (word2_indices and self.cbow_mean): l1 /= len(word2_indices) prob_values = exp(dot(l1, self.syn1neg.T)) prob_values /= sum(prob_values) top_indices = matutils.argsort(prob_values, topn=topn, reverse=True) return [(self.wv.index2word[index1], prob_values[index1]) for index1 in top_indices]
'init_sims() resides in KeyedVectors because it deals with syn0 mainly, but because syn1 is not an attribute of KeyedVectors, it has to be deleted in this class, and the normalizing of syn0 happens inside of KeyedVectors'
def init_sims(self, replace=False):
if (replace and hasattr(self, 'syn1')): del self.syn1 return self.wv.init_sims(replace)
'Estimate required memory for a model using current settings and provided vocabulary size.'
def estimate_memory(self, vocab_size=None, report=None):
vocab_size = (vocab_size or len(self.wv.vocab)) report = (report or {}) report['vocab'] = (vocab_size * (700 if self.hs else 500)) report['syn0'] = ((vocab_size * self.vector_size) * dtype(REAL).itemsize) if self.hs: report['syn1'] = ((vocab_size * self.layer1_size) * dtype(REAL).itemsize) if self.negative: report['syn1neg'] = ((vocab_size * self.layer1_size) * dtype(REAL).itemsize) report['total'] = sum(report.values()) logger.info('estimated required memory for %i words and %i dimensions: %i bytes', vocab_size, self.vector_size, report['total']) return report
'Deprecated. Use self.wv.log_evaluate_word_pairs() instead. Refer to the documentation for `gensim.models.KeyedVectors.log_evaluate_word_pairs`'
@staticmethod def log_evaluate_word_pairs(pearson, spearman, oov, pairs):
return KeyedVectors.log_evaluate_word_pairs(pearson, spearman, oov, pairs)
'Deprecated. Use self.wv.evaluate_word_pairs() instead. Refer to the documentation for `gensim.models.KeyedVectors.evaluate_word_pairs`'
def evaluate_word_pairs(self, pairs, delimiter=' DCTB ', restrict_vocab=300000, case_insensitive=True, dummy4unknown=False):
return self.wv.evaluate_word_pairs(pairs, delimiter, restrict_vocab, case_insensitive, dummy4unknown)
'Discard parameters that are used in training and score. Use if you\'re sure you\'re done training a model. If `replace_word_vectors_with_normalized` is set, forget the original vectors and only keep the normalized ones = saves lots of memory!'
def delete_temporary_training_data(self, replace_word_vectors_with_normalized=False):
if replace_word_vectors_with_normalized: self.init_sims(replace=True) self._minimize_model()
'Deprecated. Use gensim.models.KeyedVectors.load_word2vec_format instead.'
@classmethod def load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict', limit=None, datatype=REAL):
raise DeprecationWarning('Deprecated. Use gensim.models.KeyedVectors.load_word2vec_format instead.')
'Deprecated. Use model.wv.save_word2vec_format instead.'
def save_word2vec_format(self, fname, fvocab=None, binary=False):
raise DeprecationWarning('Deprecated. Use model.wv.save_word2vec_format instead.')
'`source` can be either a string or a file object. Clip the file to the first `limit` lines (or no clipped if limit is None, the default). Example:: sentences = LineSentence(\'myfile.txt\') Or for compressed files:: sentences = LineSentence(\'compressed_text.txt.bz2\') sentences = LineSentence(\'compressed_text.txt.gz\')'
def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):
self.source = source self.max_sentence_length = max_sentence_length self.limit = limit
'Iterate through the lines in the source.'
def __iter__(self):
try: self.source.seek(0) for line in itertools.islice(self.source, self.limit): line = utils.to_unicode(line).split() i = 0 while (i < len(line)): (yield line[i:(i + self.max_sentence_length)]) i += self.max_sentence_length except AttributeError: with utils.smart_open(self.source) as fin: for line in itertools.islice(fin, self.limit): line = utils.to_unicode(line).split() i = 0 while (i < len(line)): (yield line[i:(i + self.max_sentence_length)]) i += self.max_sentence_length
'`source` should be a path to a directory (as a string) where all files can be opened by the LineSentence class. Each file will be read up to `limit` lines (or no clipped if limit is None, the default). Example:: sentences = LineSentencePath(os.getcwd() + \'\corpus\\') The files in the directory should be either text files, .bz2 files, or .gz files.'
def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):
self.source = source self.max_sentence_length = max_sentence_length self.limit = limit if os.path.isfile(self.source): logging.warning('single file read, better to use models.word2vec.LineSentence') self.input_files = [self.source] elif os.path.isdir(self.source): self.source = os.path.join(self.source, '') logging.debug(('reading directory ' + self.source)) self.input_files = os.listdir(self.source) self.input_files = [(self.source + file) for file in self.input_files] self.input_files.sort() else: raise ValueError('input is neither a file nor a path') logging.info(('files read into PathLineSentences:' + '\n'.join(self.input_files)))
'iterate through the files'
def __iter__(self):
for file_name in self.input_files: logging.info(('reading file ' + file_name)) with utils.smart_open(file_name) as fin: for line in itertools.islice(fin, self.limit): line = utils.to_unicode(line).split() i = 0 while (i < len(line)): (yield line[i:(i + self.max_sentence_length)]) i += self.max_sentence_length
'`corpus` is any iterable gensim corpus `time_slice` as described above is a list which contains the number of documents in each time-slice `id2word` is a mapping from word ids (integers) to words (strings). It is used to determine the vocabulary size and printing topics. `alphas` is a prior of your choice and should be a double or float value. default is 0.01 `num_topics` is the number of requested latent topics to be extracted from the training corpus. `initalize` allows the user to decide how he wants to initialise the DTM model. Default is through gensim LDA. You can use your own sstats of an LDA model previously trained as well by specifying \'own\' and passing a np matrix through sstats. If you wish to just pass a previously used LDA model, pass it through `lda_model` Shape of sstats is (vocab_len, num_topics) `chain_variance` is a constant which dictates how the beta values evolve - it is a gaussian parameter defined in the beta distribution. `passes` is the number of passes of the initial LdaModel. `random_state` can be a np.random.RandomState object or the seed for one, for the LdaModel.'
def __init__(self, corpus=None, time_slice=None, id2word=None, alphas=0.01, num_topics=10, initialize='gensim', sstats=None, lda_model=None, obs_variance=0.5, chain_variance=0.005, passes=10, random_state=None, lda_inference_max_iter=25, em_min_iter=6, em_max_iter=20, chunksize=100):
self.id2word = id2word if ((corpus is None) and (self.id2word is None)): raise ValueError('at least one of corpus/id2word must be specified, to establish input space dimensionality') if (self.id2word is None): logger.warning('no word id mapping provided; initializing from corpus, assuming identity') self.id2word = utils.dict_from_corpus(corpus) self.vocab_len = len(self.id2word) elif (len(self.id2word) > 0): self.vocab_len = len(self.id2word) else: self.vocab_len = 0 if (corpus is not None): try: self.corpus_len = len(corpus) except: logger.warning('input corpus stream has no len(); counting documents') self.corpus_len = sum((1 for _ in corpus)) self.time_slice = time_slice if (self.time_slice is not None): self.num_time_slices = len(time_slice) max_doc_len = 0 for (line_no, line) in enumerate(corpus): if (len(line) > max_doc_len): max_doc_len = len(line) self.max_doc_len = max_doc_len self.num_topics = num_topics self.num_time_slices = len(time_slice) self.alphas = np.full(num_topics, alphas) self.topic_chains = [] for topic in range(0, num_topics): sslm_ = sslm(num_time_slices=self.num_time_slices, vocab_len=self.vocab_len, num_topics=self.num_topics, chain_variance=chain_variance, obs_variance=obs_variance) self.topic_chains.append(sslm_) self.top_doc_phis = None self.influence = None self.renormalized_influence = None self.influence_sum_lgl = None if ((corpus is not None) and (time_slice is not None)): if (initialize == 'gensim'): lda_model = ldamodel.LdaModel(corpus, id2word=self.id2word, num_topics=self.num_topics, passes=passes, alpha=self.alphas, random_state=random_state) self.sstats = np.transpose(lda_model.state.sstats) if (initialize == 'ldamodel'): self.sstats = np.transpose(lda_model.state.sstats) if (initialize == 'own'): self.sstats = sstats self.init_ldaseq_ss(chain_variance, obs_variance, self.alphas, self.sstats) self.fit_lda_seq(corpus, lda_inference_max_iter, em_min_iter, em_max_iter, chunksize)
'Method to initialize State Space Language Model, topic wise.'
def init_ldaseq_ss(self, topic_chain_variance, topic_obs_variance, alpha, init_suffstats):
self.alphas = alpha for (k, chain) in enumerate(self.topic_chains): sstats = init_suffstats[:, k] sslm.sslm_counts_init(chain, topic_obs_variance, topic_chain_variance, sstats)
'fit an lda sequence model: for each time period: set up lda model with E[log p(w|z)] and lpha for each document: perform posterior inference update sufficient statistics/likelihood maximize topics'
def fit_lda_seq(self, corpus, lda_inference_max_iter, em_min_iter, em_max_iter, chunksize):
LDASQE_EM_THRESHOLD = 0.0001 LOWER_ITER = 10 ITER_MULT_LOW = 2 MAX_ITER = 500 num_topics = self.num_topics vocab_len = self.vocab_len data_len = self.num_time_slices corpus_len = self.corpus_len bound = 0 convergence = (LDASQE_EM_THRESHOLD + 1) iter_ = 0 while ((iter_ < em_min_iter) or ((convergence > LDASQE_EM_THRESHOLD) and (iter_ <= em_max_iter))): logger.info(' EM iter %i', iter_) logger.info('E Step') old_bound = bound topic_suffstats = [] for topic in range(0, num_topics): topic_suffstats.append(np.resize(np.zeros((vocab_len * data_len)), (vocab_len, data_len))) gammas = np.resize(np.zeros((corpus_len * num_topics)), (corpus_len, num_topics)) lhoods = np.resize(np.zeros(((corpus_len * num_topics) + 1)), (corpus_len, (num_topics + 1))) (bound, gammas) = self.lda_seq_infer(corpus, topic_suffstats, gammas, lhoods, iter_, lda_inference_max_iter, chunksize) self.gammas = gammas logger.info('M Step') topic_bound = self.fit_lda_seq_topics(topic_suffstats) bound += topic_bound if ((bound - old_bound) < 0): if (lda_inference_max_iter < LOWER_ITER): lda_inference_max_iter *= ITER_MULT_LOW logger.info('Bound went down, increasing iterations to %i', lda_inference_max_iter) convergence = np.fabs(((bound - old_bound) / old_bound)) if (convergence < LDASQE_EM_THRESHOLD): lda_inference_max_iter = MAX_ITER logger.info('Starting final iterations, max iter is %i', lda_inference_max_iter) convergence = 1.0 logger.info('iteration %i iteration lda seq bound is %f convergence is %f', iter_, bound, convergence) iter_ += 1 return bound
'Inference or E- Step. This is used to set up the gensim LdaModel to be used for each time-slice. It also allows for Document Influence Model code to be written in.'
def lda_seq_infer(self, corpus, topic_suffstats, gammas, lhoods, iter_, lda_inference_max_iter, chunksize):
num_topics = self.num_topics vocab_len = self.vocab_len bound = 0.0 lda = ldamodel.LdaModel(num_topics=num_topics, alpha=self.alphas, id2word=self.id2word) lda.topics = np.array(np.split(np.zeros((vocab_len * num_topics)), vocab_len)) ldapost = LdaPost(max_doc_len=self.max_doc_len, num_topics=num_topics, lda=lda) model = 'DTM' if (model == 'DTM'): (bound, gammas) = self.inferDTMseq(corpus, topic_suffstats, gammas, lhoods, lda, ldapost, iter_, bound, lda_inference_max_iter, chunksize) elif (model == 'DIM'): self.InfluenceTotalFixed(corpus) (bound, gammas) = self.inferDIMseq(corpus, topic_suffstats, gammas, lhoods, lda, ldapost, iter_, bound, lda_inference_max_iter, chunksize) return (bound, gammas)
'Computes the likelihood of a sequential corpus under an LDA seq model, and return the likelihood bound. Need to pass the LdaSeq model, corpus, sufficient stats, gammas and lhoods matrices previously created, and LdaModel and LdaPost class objects.'
def inferDTMseq(self, corpus, topic_suffstats, gammas, lhoods, lda, ldapost, iter_, bound, lda_inference_max_iter, chunksize):
doc_index = 0 time = 0 doc_num = 0 num_topics = self.num_topics lda = self.make_lda_seq_slice(lda, time) time_slice = np.cumsum(np.array(self.time_slice)) for (chunk_no, chunk) in enumerate(utils.grouper(corpus, chunksize)): for doc in chunk: if (doc_index > time_slice[time]): time += 1 lda = self.make_lda_seq_slice(lda, time) doc_num = 0 gam = gammas[doc_index] lhood = lhoods[doc_index] ldapost.gamma = gam ldapost.lhood = lhood ldapost.doc = doc if (iter_ == 0): doc_lhood = LdaPost.fit_lda_post(ldapost, doc_num, time, None, lda_inference_max_iter=lda_inference_max_iter) else: doc_lhood = LdaPost.fit_lda_post(ldapost, doc_num, time, self, lda_inference_max_iter=lda_inference_max_iter) if (topic_suffstats is not None): topic_suffstats = LdaPost.update_lda_seq_ss(ldapost, time, doc, topic_suffstats) gammas[doc_index] = ldapost.gamma bound += doc_lhood doc_index += 1 doc_num += 1 return (bound, gammas)
'set up the LDA model topic-word values with that of ldaseq.'
def make_lda_seq_slice(self, lda, time):
for k in range(0, self.num_topics): lda.topics[:, k] = np.copy(self.topic_chains[k].e_log_prob[:, time]) lda.alpha = np.copy(self.alphas) return lda
'Fit lda sequence topic wise.'
def fit_lda_seq_topics(self, topic_suffstats):
lhood = 0 lhood_term = 0 for (k, chain) in enumerate(self.topic_chains): logger.info('Fitting topic number %i', k) lhood_term = sslm.fit_sslm(chain, topic_suffstats[k]) lhood += lhood_term return lhood
'Prints one topic showing each time-slice.'
def print_topic_times(self, topic, top_terms=20):
topics = [] for time in range(0, self.num_time_slices): topics.append(self.print_topic(topic, time, top_terms)) return topics
'Prints all topics in a particular time-slice.'
def print_topics(self, time=0, top_terms=20):
topics = [] for topic in range(0, self.num_topics): topics.append(self.print_topic(topic, time, top_terms)) return topics
'Topic is the topic number Time is for a particular time_slice top_terms is the number of terms to display'
def print_topic(self, topic, time=0, top_terms=20):
topic = self.topic_chains[topic].e_log_prob topic = np.transpose(topic) topic = np.exp(topic[time]) topic = (topic / topic.sum()) bestn = matutils.argsort(topic, top_terms, reverse=True) beststr = [(self.id2word[id_], round(topic[id_], 3)) for id_ in bestn] return beststr
'On passing the LdaSeqModel trained ldaseq object, the doc_number of your document in the corpus, it returns the doc-topic probabilities of that document.'
def doc_topics(self, doc_number):
doc_topic = np.copy(self.gammas) doc_topic /= doc_topic.sum(axis=1)[:, np.newaxis] return doc_topic[doc_number]
'returns term_frequency, vocab, doc_lengths, topic-term distributions and doc_topic distributions, specified by pyLDAvis format. all of these are needed to visualise topics for DTM for a particular time-slice via pyLDAvis. input parameter is the year to do the visualisation.'
def dtm_vis(self, time, corpus):
doc_topic = np.copy(self.gammas) doc_topic /= doc_topic.sum(axis=1)[:, np.newaxis] topic_term = [(np.exp(np.transpose(chain.e_log_prob)[time]) / np.exp(np.transpose(chain.e_log_prob)[time]).sum()) for (k, chain) in enumerate(self.topic_chains)] doc_lengths = [len(doc) for (doc_no, doc) in enumerate(corpus)] term_frequency = np.zeros(self.vocab_len) for (doc_no, doc) in enumerate(corpus): for pair in doc: term_frequency[pair[0]] += pair[1] vocab = [self.id2word[i] for i in range(0, len(self.id2word))] return (doc_topic, np.array(topic_term), doc_lengths, term_frequency, vocab)
'returns all topics of a particular time-slice without probabilitiy values for it to be used for either "u_mass" or "c_v" coherence.'
def dtm_coherence(self, time):
coherence_topics = [] for topics in self.print_topics(time): coherence_topic = [] for (word, dist) in topics: coherence_topic.append(word) coherence_topics.append(coherence_topic) return coherence_topics
'Similar to the LdaModel __getitem__ function, it returns topic proportions of a document passed.'
def __getitem__(self, doc):
lda_model = ldamodel.LdaModel(num_topics=self.num_topics, alpha=self.alphas, id2word=self.id2word) lda_model.topics = np.array(np.split(np.zeros((self.vocab_len * self.num_topics)), self.vocab_len)) ldapost = LdaPost(num_topics=self.num_topics, max_doc_len=len(doc), lda=lda_model, doc=doc) time_lhoods = [] for time in range(0, self.num_time_slices): lda_model = self.make_lda_seq_slice(lda_model, time) lhood = LdaPost.fit_lda_post(ldapost, 0, time, self) time_lhoods.append(lhood) doc_topic = (ldapost.gamma / ldapost.gamma.sum()) return doc_topic
'Updates the Zeta Variational Parameter. Zeta is described in the appendix and is equal to sum (exp(mean[word] + Variance[word] / 2)), over every time-slice. It is the value of variational parameter zeta which maximizes the lower bound.'
def update_zeta(self):
for (j, val) in enumerate(self.zeta): self.zeta[j] = np.sum(np.exp((self.mean[:, (j + 1)] + (self.variance[:, (j + 1)] / 2)))) return self.zeta
'Based on the Variational Kalman Filtering approach for Approximate Inference [https://www.cs.princeton.edu/~blei/papers/BleiLafferty2006a.pdf] This function accepts the word to compute variance for, along with the associated sslm class object, and returns variance and fwd_variance Computes Var[eta_{t,w}] for t = 1:T :math:: fwd\_variance[t] \equiv E((beta_{t,w}-mean_{t,w})^2 |beta_{t}\ for\ 1:t) = (obs\_variance / fwd\_variance[t - 1] + chain\_variance + obs\_variance ) * (fwd\_variance[t - 1] + obs\_variance) :math:: variance[t] \equiv E((beta_{t,w}-mean\_cap_{t,w})^2 |beta\_cap_{t}\ for\ 1:t) = fwd\_variance[t - 1] + (fwd\_variance[t - 1] / fwd\_variance[t - 1] + obs\_variance)^2 * (variance[t - 1] - (fwd\_variance[t-1] + obs\_variance))'
def compute_post_variance(self, word, chain_variance):
INIT_VARIANCE_CONST = 1000 T = self.num_time_slices variance = self.variance[word] fwd_variance = self.fwd_variance[word] fwd_variance[0] = (chain_variance * INIT_VARIANCE_CONST) for t in range(1, (T + 1)): if self.obs_variance: c = (self.obs_variance / ((fwd_variance[(t - 1)] + chain_variance) + self.obs_variance)) else: c = 0 fwd_variance[t] = (c * (fwd_variance[(t - 1)] + chain_variance)) variance[T] = fwd_variance[T] for t in range((T - 1), (-1), (-1)): if (fwd_variance[t] > 0.0): c = np.power((fwd_variance[t] / (fwd_variance[t] + chain_variance)), 2) else: c = 0 variance[t] = ((c * (variance[(t + 1)] - chain_variance)) + ((1 - c) * fwd_variance[t])) return (variance, fwd_variance)
'Based on the Variational Kalman Filtering approach for Approximate Inference [https://www.cs.princeton.edu/~blei/papers/BleiLafferty2006a.pdf] This function accepts the word to compute mean for, along with the associated sslm class object, and returns mean and fwd_mean Essentially a forward-backward to compute E[eta_{t,w}] for t = 1:T. Fwd_Mean(t) ≡ E(beta_{t,w} | beta_ˆ 1:t ) = (obs_variance / fwd_variance[t - 1] + chain_variance + obs_variance ) * fwd_mean[t - 1] + (1 - (obs_variance / fwd_variance[t - 1] + chain_variance + obs_variance)) * beta Mean(t) ≡ E(beta_{t,w} | beta_ˆ 1:T ) = fwd_mean[t - 1] + (obs_variance / fwd_variance[t - 1] + obs_variance) + (1 - obs_variance / fwd_variance[t - 1] + obs_variance)) * mean[t]'
def compute_post_mean(self, word, chain_variance):
T = self.num_time_slices obs = self.obs[word] fwd_variance = self.fwd_variance[word] mean = self.mean[word] fwd_mean = self.fwd_mean[word] fwd_mean[0] = 0 for t in range(1, (T + 1)): c = (self.obs_variance / ((fwd_variance[(t - 1)] + chain_variance) + self.obs_variance)) fwd_mean[t] = ((c * fwd_mean[(t - 1)]) + ((1 - c) * obs[(t - 1)])) mean[T] = fwd_mean[T] for t in range((T - 1), (-1), (-1)): if (chain_variance == 0.0): c = 0.0 else: c = (chain_variance / (fwd_variance[t] + chain_variance)) mean[t] = ((c * fwd_mean[t]) + ((1 - c) * mean[(t + 1)])) return (mean, fwd_mean)
'Compute the expected log probability given values of m. The appendix describes the Expectation of log-probabilities in equation 5 of the DTM paper; The below implementation is the result of solving the equation and is as implemented in the original Blei DTM code.'
def compute_expected_log_prob(self):
for ((w, t), val) in np.ndenumerate(self.e_log_prob): self.e_log_prob[w][t] = (self.mean[w][(t + 1)] - np.log(self.zeta[t])) return self.e_log_prob
'Initialize State Space Language Model with LDA sufficient statistics. Called for each topic-chain and initializes intial mean, variance and Topic-Word probabilities for the first time-slice.'
def sslm_counts_init(self, obs_variance, chain_variance, sstats):
W = self.vocab_len T = self.num_time_slices log_norm_counts = np.copy(sstats) log_norm_counts = (log_norm_counts / sum(log_norm_counts)) log_norm_counts = (log_norm_counts + (1.0 / W)) log_norm_counts = (log_norm_counts / sum(log_norm_counts)) log_norm_counts = np.log(log_norm_counts) self.obs = np.repeat(log_norm_counts, T, axis=0).reshape(W, T) self.obs_variance = obs_variance self.chain_variance = chain_variance for w in range(0, W): (self.variance[w], self.fwd_variance[w]) = self.compute_post_variance(w, self.chain_variance) (self.mean[w], self.fwd_mean[w]) = self.compute_post_mean(w, self.chain_variance) self.zeta = self.update_zeta() self.e_log_prob = self.compute_expected_log_prob()
'Fits variational distribution. This is essentially the m-step. Accepts the sstats for a particular topic for input and maximizes values for that topic. Updates the values in the update_obs() and compute_expected_log_prob methods.'
def fit_sslm(self, sstats):
W = self.vocab_len bound = 0 old_bound = 0 sslm_fit_threshold = 1e-06 sslm_max_iter = 2 converged = (sslm_fit_threshold + 1) totals = np.zeros(sstats.shape[1]) (self.variance, self.fwd_variance) = map(np.array, list(zip(*[self.compute_post_variance(w, self.chain_variance) for w in range(0, W)]))) totals = sstats.sum(axis=0) iter_ = 0 model = 'DTM' if (model == 'DTM'): bound = self.compute_bound(sstats, totals) if (model == 'DIM'): bound = self.compute_bound_fixed(sstats, totals) logger.info('initial sslm bound is %f', bound) while ((converged > sslm_fit_threshold) and (iter_ < sslm_max_iter)): iter_ += 1 old_bound = bound (self.obs, self.zeta) = self.update_obs(sstats, totals) if (model == 'DTM'): bound = self.compute_bound(sstats, totals) if (model == 'DIM'): bound = self.compute_bound_fixed(sstats, totals) converged = np.fabs(((bound - old_bound) / old_bound)) logger.info('iteration %i iteration lda seq bound is %f convergence is %f', iter_, bound, converged) self.e_log_prob = self.compute_expected_log_prob() return bound
'Compute log probability bound. Forumula is as described in appendix of DTM by Blei. (formula no. 5)'
def compute_bound(self, sstats, totals):
W = self.vocab_len T = self.num_time_slices term_1 = 0 term_2 = 0 term_3 = 0 val = 0 ent = 0 chain_variance = self.chain_variance (self.mean, self.fwd_mean) = map(np.array, zip(*[self.compute_post_mean(w, self.chain_variance) for w in range(0, W)])) self.zeta = self.update_zeta() for w in range(0, W): val += (((self.variance[w][0] - self.variance[w][T]) / 2) * chain_variance) logger.info('Computing bound, all times') for t in range(1, (T + 1)): term_1 = 0.0 term_2 = 0.0 ent = 0.0 for w in range(0, W): m = self.mean[w][t] prev_m = self.mean[w][(t - 1)] v = self.variance[w][t] term_1 += (((np.power((m - prev_m), 2) / (2 * chain_variance)) - (v / chain_variance)) - np.log(chain_variance)) term_2 += (sstats[w][(t - 1)] * m) ent += (np.log(v) / 2) term_3 = ((- totals[(t - 1)]) * np.log(self.zeta[(t - 1)])) val += (((term_2 + term_3) + ent) - term_1) return val
'Function to perform optimization of obs. Parameters are suff_stats set up in the fit_sslm method. TODO: This is by far the slowest function in the whole algorithm. Replacing or improving the performance of this would greatly speed things up.'
def update_obs(self, sstats, totals):
OBS_NORM_CUTOFF = 2 STEP_SIZE = 0.01 TOL = 0.001 W = self.vocab_len T = self.num_time_slices runs = 0 mean_deriv_mtx = np.resize(np.zeros((T * (T + 1))), (T, (T + 1))) norm_cutoff_obs = None for w in range(0, W): w_counts = sstats[w] counts_norm = 0 for i in range(0, len(w_counts)): counts_norm += (w_counts[i] * w_counts[i]) counts_norm = np.sqrt(counts_norm) if ((counts_norm < OBS_NORM_CUTOFF) and (norm_cutoff_obs is not None)): obs = self.obs[w] norm_cutoff_obs = np.copy(obs) else: if (counts_norm < OBS_NORM_CUTOFF): w_counts = np.zeros(len(w_counts)) for t in range(0, T): mean_deriv = mean_deriv_mtx[t] mean_deriv = self.compute_mean_deriv(w, t, mean_deriv) mean_deriv_mtx[t] = mean_deriv deriv = np.zeros(T) args = (self, w_counts, totals, mean_deriv_mtx, w, deriv) obs = self.obs[w] model = 'DTM' if (model == 'DTM'): obs = optimize.fmin_cg(f=f_obs, fprime=df_obs, x0=obs, gtol=TOL, args=args, epsilon=STEP_SIZE, disp=0) if (model == 'DIM'): pass runs += 1 if (counts_norm < OBS_NORM_CUTOFF): norm_cutoff_obs = obs self.obs[w] = obs self.zeta = self.update_zeta() return (self.obs, self.zeta)
'Used in helping find the optimum function. computes derivative of E[eta_{t,w}]/d obs_{s,w} for t = 1:T. put the result in deriv, allocated T+1 vector'
def compute_mean_deriv(self, word, time, deriv):
T = self.num_time_slices fwd_variance = self.variance[word] deriv[0] = 0 for t in range(1, (T + 1)): if (self.obs_variance > 0.0): w = (self.obs_variance / ((fwd_variance[(t - 1)] + self.chain_variance) + self.obs_variance)) else: w = 0.0 val = (w * deriv[(t - 1)]) if (time == (t - 1)): val += (1 - w) deriv[t] = val for t in range((T - 1), (-1), (-1)): if (self.chain_variance == 0.0): w = 0.0 else: w = (self.chain_variance / (fwd_variance[t] + self.chain_variance)) deriv[t] = ((w * deriv[t]) + ((1 - w) * deriv[(t + 1)])) return deriv
'Derivation of obs which is used in derivative function [df_obs] while optimizing.'
def compute_obs_deriv(self, word, word_counts, totals, mean_deriv_mtx, deriv):
init_mult = 1000 T = self.num_time_slices mean = self.mean[word] variance = self.variance[word] self.temp_vect = np.zeros(T) for u in range(0, T): self.temp_vect[u] = np.exp((mean[(u + 1)] + (variance[(u + 1)] / 2))) for t in range(0, T): mean_deriv = mean_deriv_mtx[t] term1 = 0 term2 = 0 term3 = 0 term4 = 0 for u in range(1, (T + 1)): mean_u = mean[u] variance_u_prev = variance[(u - 1)] mean_u_prev = mean[(u - 1)] dmean_u = mean_deriv[u] dmean_u_prev = mean_deriv[(u - 1)] term1 += ((mean_u - mean_u_prev) * (dmean_u - dmean_u_prev)) term2 += ((word_counts[(u - 1)] - ((totals[(u - 1)] * self.temp_vect[(u - 1)]) / self.zeta[(u - 1)])) * dmean_u) model = 'DTM' if (model == 'DIM'): pass if self.chain_variance: term1 = (- (term1 / self.chain_variance)) term1 = (term1 - ((mean[0] * mean_deriv[0]) / (init_mult * self.chain_variance))) else: term1 = 0.0 deriv[t] = (((term1 + term2) + term3) + term4) return deriv
'Update variational multinomial parameters, based on a document and a time-slice. This is done based on the original Blei-LDA paper, where: log_phi := beta * exp(Κ(gamma)), over every topic for every word. TODO: incorporate lee-sueng trick used in **Lee, Seung: Algorithms for non-negative matrix factorization, NIPS 2001**.'
def update_phi(self, doc_number, time):
num_topics = self.lda.num_topics dig = np.zeros(num_topics) for k in range(0, num_topics): dig[k] = digamma(self.gamma[k]) n = 0 for (word_id, count) in self.doc: for k in range(0, num_topics): self.log_phi[n][k] = (dig[k] + self.lda.topics[word_id][k]) log_phi_row = self.log_phi[n] phi_row = self.phi[n] v = log_phi_row[0] for i in range(1, len(log_phi_row)): v = np.logaddexp(v, log_phi_row[i]) log_phi_row = (log_phi_row - v) phi_row = np.exp(log_phi_row) self.log_phi[n] = log_phi_row self.phi[n] = phi_row n += 1 return (self.phi, self.log_phi)
'update variational dirichlet parameters as described in the original Blei LDA paper: gamma = alpha + sum(phi), over every topic for every word.'
def update_gamma(self):
self.gamma = np.copy(self.lda.alpha) n = 0 for (word_id, count) in self.doc: phi_row = self.phi[n] for k in range(0, self.lda.num_topics): self.gamma[k] += (phi_row[k] * count) n += 1 return self.gamma
'Initialize variational posterior, does not return anything.'
def init_lda_post(self):
total = sum((count for (word_id, count) in self.doc)) self.gamma.fill((self.lda.alpha[0] + (float(total) / self.lda.num_topics))) self.phi[:len(self.doc), :] = (1.0 / self.lda.num_topics)
'compute the likelihood bound'
def compute_lda_lhood(self):
num_topics = self.lda.num_topics gamma_sum = np.sum(self.gamma) lhood = (gammaln(np.sum(self.lda.alpha)) - gammaln(gamma_sum)) self.lhood[num_topics] = lhood digsum = digamma(gamma_sum) model = 'DTM' for k in range(0, num_topics): e_log_theta_k = (digamma(self.gamma[k]) - digsum) lhood_term = ((((self.lda.alpha[k] - self.gamma[k]) * e_log_theta_k) + gammaln(self.gamma[k])) - gammaln(self.lda.alpha[k])) n = 0 for (word_id, count) in self.doc: if (self.phi[n][k] > 0): lhood_term += ((count * self.phi[n][k]) * ((e_log_theta_k + self.lda.topics[word_id][k]) - self.log_phi[n][k])) n += 1 self.lhood[k] = lhood_term lhood += lhood_term return lhood
'Posterior inference for lda. g, g3, g4 and g5 are matrices used in Document Influence Model and not used currently.'
def fit_lda_post(self, doc_number, time, ldaseq, LDA_INFERENCE_CONVERGED=1e-08, lda_inference_max_iter=25, g=None, g3_matrix=None, g4_matrix=None, g5_matrix=None):
self.init_lda_post() total = sum((count for (word_id, count) in self.doc)) model = 'DTM' if (model == 'DIM'): pass lhood = self.compute_lda_lhood() lhood_old = 0 converged = 0 iter_ = 0 iter_ += 1 lhood_old = lhood self.gamma = self.update_gamma() model = 'DTM' if ((model == 'DTM') or (sslm is None)): (self.phi, self.log_phi) = self.update_phi(doc_number, time) elif ((model == 'DIM') and (sslm is not None)): (self.phi, self.log_phi) = self.update_phi_fixed(doc_number, time, sslm, g3_matrix, g4_matrix, g5_matrix) lhood = self.compute_lda_lhood() converged = np.fabs(((lhood_old - lhood) / (lhood_old * total))) while ((converged > LDA_INFERENCE_CONVERGED) and (iter_ <= lda_inference_max_iter)): iter_ += 1 lhood_old = lhood self.gamma = self.update_gamma() model = 'DTM' if ((model == 'DTM') or (sslm is None)): (self.phi, self.log_phi) = self.update_phi(doc_number, time) elif ((model == 'DIM') and (sslm is not None)): (self.phi, self.log_phi) = self.update_phi_fixed(doc_number, time, sslm, g3_matrix, g4_matrix, g5_matrix) lhood = self.compute_lda_lhood() converged = np.fabs(((lhood_old - lhood) / (lhood_old * total))) return lhood
'Update lda sequence sufficient statistics from an lda posterior. This is very similar to the update_gamma method and uses the same formula.'
def update_lda_seq_ss(self, time, doc, topic_suffstats):
num_topics = self.lda.num_topics for k in range(0, num_topics): topic_ss = topic_suffstats[k] n = 0 for (word_id, count) in self.doc: topic_ss[word_id][time] += (count * self.phi[n][k]) n += 1 topic_suffstats[k] = topic_ss return topic_suffstats
'Return representation with the ids transformed.'
def __getitem__(self, bow):
(is_corpus, bow) = utils.is_corpus(bow) if is_corpus: return self._apply(bow) return sorted(((self.old2new[oldid], weight) for (oldid, weight) in bow if (oldid in self.old2new)))
'Compute tf-idf by multiplying a local component (term frequency) with a global component (inverse document frequency), and normalizing the resulting documents to unit length. Formula for unnormalized weight of term `i` in document `j` in a corpus of D documents:: weight_{i,j} = frequency_{i,j} * log_2(D / document_freq_{i}) or, more generally:: weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document_freq_{i}, D) so you can plug in your own custom `wlocal` and `wglobal` functions. Default for `wlocal` is identity (other options: math.sqrt, math.log1p, ...) and default for `wglobal` is `log_2(total_docs / doc_freq)`, giving the formula above. `normalize` dictates how the final transformed vectors will be normalized. `normalize=True` means set to unit length (default); `False` means don\'t normalize. You can also set `normalize` to your own function that accepts and returns a sparse vector. If `dictionary` is specified, it must be a `corpora.Dictionary` object and it will be used to directly construct the inverse document frequency mapping (then `corpus`, if specified, is ignored).'
def __init__(self, corpus=None, id2word=None, dictionary=None, wlocal=utils.identity, wglobal=df2idf, normalize=True):
self.normalize = normalize self.id2word = id2word (self.wlocal, self.wglobal) = (wlocal, wglobal) (self.num_docs, self.num_nnz, self.idfs) = (None, None, None) if (dictionary is not None): if (corpus is not None): logger.warning('constructor received both corpus and explicit inverse document frequencies; ignoring the corpus') (self.num_docs, self.num_nnz) = (dictionary.num_docs, dictionary.num_nnz) self.dfs = dictionary.dfs.copy() self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs) if (id2word is None): self.id2word = dictionary elif (corpus is not None): self.initialize(corpus) else: pass
'Compute inverse document weights, which will be used to modify term frequencies for documents.'
def initialize(self, corpus):
logger.info('collecting document frequencies') dfs = {} (numnnz, docno) = (0, (-1)) for (docno, bow) in enumerate(corpus): if ((docno % 10000) == 0): logger.info('PROGRESS: processing document #%i', docno) numnnz += len(bow) for (termid, _) in bow: dfs[termid] = (dfs.get(termid, 0) + 1) self.num_docs = (docno + 1) self.num_nnz = numnnz self.dfs = dfs n_features = (max(dfs) if dfs else 0) logger.info('calculating IDF weights for %i documents and %i features (%i matrix non-zeros)', self.num_docs, n_features, self.num_nnz) self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
'Return tf-idf representation of the input vector and/or corpus.'
def __getitem__(self, bow, eps=1e-12):
(is_corpus, bow) = utils.is_corpus(bow) if is_corpus: return self._apply(bow) vector = [(termid, (self.wlocal(tf) * self.idfs.get(termid))) for (termid, tf) in bow if (self.idfs.get(termid, 0.0) != 0.0)] if (self.normalize is True): vector = matutils.unitvec(vector) elif self.normalize: vector = self.normalize(vector) vector = [(termid, weight) for (termid, weight) in vector if (abs(weight) > eps)] return vector
'Initialize the model from an iterable of `sentences`. Each sentence must be a list of words (unicode strings) that will be used for training. The `sentences` iterable can be simply a list, but for larger corpora, consider a generator that streams the sentences directly from disk/network, without storing everything in RAM. See :class:`BrownCorpus`, :class:`Text8Corpus` or :class:`LineSentence` in the :mod:`gensim.models.word2vec` module for such examples. `min_count` ignore all words and bigrams with total collected count lower than this. `threshold` represents a score threshold for forming the phrases (higher means fewer phrases). A phrase of words `a` followed by `b` is accepted if the score of the phrase is greater than threshold. see the `scoring\' setting `max_vocab_size` is the maximum size of the vocabulary. Used to control pruning of less common words, to keep memory under control. The default of 40M needs about 3.6GB of RAM; increase/decrease `max_vocab_size` depending on how much available memory you have. `delimiter` is the glue character used to join collocation tokens, and should be a byte string (e.g. b\'_\'). `scoring` specifies how potential phrases are scored for comparison to the `threshold` setting. two settings are available: \'default\': from "Efficient Estimaton of Word Representations in Vector Space" by Mikolov, et. al.: (count(worda followed by wordb) - min_count) * N / (count(worda) * count(wordb)) > threshold`, where `N` is the total vocabulary size. \'npmi\': normalized pointwise mutual information, from "Normalized (Pointwise) Mutual Information in Colocation Extraction" by Gerlof Bouma: ln(prop(worda followed by wordb) / (prop(worda)*prop(wordb))) / - ln(prop(worda followed by wordb) where prop(n) is the count of n / the count of everything in the entire corpus \'npmi\' is more robust when dealing with common words that form part of common bigrams, and ranges from -1 to 1, but is slower to calculate than the default'
def __init__(self, sentences=None, min_count=5, threshold=10.0, max_vocab_size=40000000, delimiter='_', progress_per=10000, scoring='default'):
if (min_count <= 0): raise ValueError('min_count should be at least 1') if ((threshold <= 0) and (scoring == 'default')): raise ValueError('threshold should be positive for default scoring') if ((scoring == 'npmi') and ((threshold < (-1)) or (threshold > 1))): raise ValueError('threshold should be between -1 and 1 for npmi scoring') if (not ((scoring == 'default') or (scoring == 'npmi'))): raise ValueError((('unknown scoring function "' + scoring) + '" specified')) self.min_count = min_count self.threshold = threshold self.max_vocab_size = max_vocab_size self.vocab = defaultdict(int) self.min_reduce = 1 self.delimiter = delimiter self.progress_per = progress_per self.scoring = scoring self.corpus_word_count = 0 if (sentences is not None): self.add_vocab(sentences)
'Get short string representation of this phrase detector.'
def __str__(self):
return ('%s<%i vocab, min_count=%s, threshold=%s, max_vocab_size=%s>' % (self.__class__.__name__, len(self.vocab), self.min_count, self.threshold, self.max_vocab_size))
'Collect unigram/bigram counts from the `sentences` iterable.'
@staticmethod def learn_vocab(sentences, max_vocab_size, delimiter='_', progress_per=10000):
sentence_no = (-1) total_words = 0 logger.info('collecting all words and their counts') vocab = defaultdict(int) min_reduce = 1 for (sentence_no, sentence) in enumerate(sentences): if ((sentence_no % progress_per) == 0): logger.info(('PROGRESS: at sentence #%i, processed %i words and %i word types' % (sentence_no, total_words, len(vocab)))) sentence = [utils.any2utf8(w) for w in sentence] for bigram in zip(sentence, sentence[1:]): vocab[bigram[0]] += 1 vocab[delimiter.join(bigram)] += 1 total_words += 1 if sentence: word = sentence[(-1)] vocab[word] += 1 total_words += 1 if (len(vocab) > max_vocab_size): utils.prune_vocab(vocab, min_reduce) min_reduce += 1 logger.info(('collected %i word types from a corpus of %i words (unigram + bigrams) and %i sentences' % (len(vocab), total_words, (sentence_no + 1)))) return (min_reduce, vocab, total_words)
'Merge the collected counts `vocab` into this phrase detector.'
def add_vocab(self, sentences):
(min_reduce, vocab, total_words) = self.learn_vocab(sentences, self.max_vocab_size, self.delimiter, self.progress_per) self.corpus_word_count += total_words if (len(self.vocab) > 0): logger.info('merging %i counts into %s', len(vocab), self) self.min_reduce = max(self.min_reduce, min_reduce) for (word, count) in iteritems(vocab): self.vocab[word] += count if (len(self.vocab) > self.max_vocab_size): utils.prune_vocab(self.vocab, self.min_reduce) self.min_reduce += 1 logger.info('merged %s', self) else: logger.info('using %i counts as vocab in %s', len(vocab), self) self.vocab = vocab
'Generate an iterator that contains all phrases in given \'sentences\' Example:: >>> sentences = Text8Corpus(path_to_corpus) >>> bigram = Phrases(sentences, min_count=5, threshold=100) >>> for phrase, score in bigram.export_phrases(sentences): ... print(u\'{0} {1}\'.format(phrase, score)) then you can debug the threshold with generated tsv'
def export_phrases(self, sentences, out_delimiter=' ', as_tuples=False):
vocab = self.vocab threshold = self.threshold delimiter = self.delimiter min_count = self.min_count scoring = self.scoring corpus_word_count = self.corpus_word_count if (scoring == 'default'): scoring_function = partial(self.original_scorer, len_vocab=float(len(vocab)), min_count=float(min_count)) elif (scoring == 'npmi'): scoring_function = partial(self.npmi_scorer, corpus_word_count=corpus_word_count) for sentence in sentences: s = [utils.any2utf8(w) for w in sentence] last_bigram = False for (word_a, word_b) in zip(s, s[1:]): if ((word_a in vocab) and (word_b in vocab) and (not last_bigram)): bigram_word = delimiter.join((word_a, word_b)) if (bigram_word in vocab): count_a = float(vocab[word_a]) count_b = float(vocab[word_b]) count_ab = float(vocab[bigram_word]) score = scoring_function(count_a, count_b, count_ab) if ((score > threshold) and (count_ab >= min_count)): if as_tuples: (yield ((word_a, word_b), score)) else: (yield (out_delimiter.join((word_a, word_b)), score)) last_bigram = True continue last_bigram = False
'Convert the input tokens `sentence` (=list of unicode strings) into phrase tokens (=list of unicode strings, where detected phrases are joined by u\'_\'). If `sentence` is an entire corpus (iterable of sentences rather than a single sentence), return an iterable that converts each of the corpus\' sentences into phrases on the fly, one after another. Example:: >>> sentences = Text8Corpus(path_to_corpus) >>> bigram = Phrases(sentences, min_count=5, threshold=100) >>> for sentence in phrases[sentences]: ... print(u\' \'.join(s)) he refuted nechaev other anarchists sometimes identified as pacifist anarchists advocated complete nonviolence leo_tolstoy'
def __getitem__(self, sentence):
warnings.warn('For a faster implementation, use the gensim.models.phrases.Phraser class') (is_single, sentence) = _is_single(sentence) if (not is_single): return self._apply(sentence) (s, new_s) = ([utils.any2utf8(w) for w in sentence], []) last_bigram = False vocab = self.vocab threshold = self.threshold delimiter = self.delimiter min_count = self.min_count for (word_a, word_b) in zip(s, s[1:]): if ((word_a in vocab) and (word_b in vocab)): bigram_word = delimiter.join((word_a, word_b)) if ((bigram_word in vocab) and (not last_bigram)): pa = float(vocab[word_a]) pb = float(vocab[word_b]) pab = float(vocab[bigram_word]) score = ((((pab - min_count) / pa) / pb) * len(vocab)) if (score > threshold): new_s.append(bigram_word) last_bigram = True continue if (not last_bigram): new_s.append(word_a) last_bigram = False if s: last_token = s[(-1)] if (not last_bigram): new_s.append(last_token) return [utils.to_unicode(w) for w in new_s]
'Convert the input tokens `sentence` (=list of unicode strings) into phrase tokens (=list of unicode strings, where detected phrases are joined by u\'_\' (or other configured delimiter-character). If `sentence` is an entire corpus (iterable of sentences rather than a single sentence), return an iterable that converts each of the corpus\' sentences into phrases on the fly, one after another.'
def __getitem__(self, sentence):
(is_single, sentence) = _is_single(sentence) if (not is_single): return self._apply(sentence) (s, new_s) = ([utils.any2utf8(w) for w in sentence], []) last_bigram = False phrasegrams = self.phrasegrams delimiter = self.delimiter for (word_a, word_b) in zip(s, s[1:]): bigram_tuple = (word_a, word_b) if ((phrasegrams.get(bigram_tuple, ((-1), (-1)))[1] > self.threshold) and (not last_bigram)): bigram_word = delimiter.join((word_a, word_b)) new_s.append(bigram_word) last_bigram = True continue if (not last_bigram): new_s.append(word_a) last_bigram = False if s: last_token = s[(-1)] if (not last_bigram): new_s.append(last_token) return [utils.to_unicode(w) for w in new_s]
'Construct the (U, S) projection from a corpus `docs`. The projection can be later updated by merging it with another Projection via `self.merge()`. This is the class taking care of the \'core math\'; interfacing with corpora, splitting large corpora into chunks and merging them etc. is done through the higher-level `LsiModel` class.'
def __init__(self, m, k, docs=None, use_svdlibc=False, power_iters=P2_EXTRA_ITERS, extra_dims=P2_EXTRA_DIMS):
(self.m, self.k) = (m, k) self.power_iters = power_iters self.extra_dims = extra_dims if (docs is not None): if (not use_svdlibc): (u, s) = stochastic_svd(docs, k, chunksize=sys.maxsize, num_terms=m, power_iters=self.power_iters, extra_dims=self.extra_dims) else: try: import sparsesvd except ImportError: raise ImportError('`sparsesvd` module requested but not found; run `easy_install sparsesvd`') logger.info('computing sparse SVD of %s matrix', str(docs.shape)) if (not scipy.sparse.issparse(docs)): docs = matutils.corpus2csc(docs) (ut, s, vt) = sparsesvd.sparsesvd(docs, (k + 30)) u = ut.T del ut, vt k = clip_spectrum((s ** 2), self.k) self.u = u[:, :k].copy() self.s = s[:k].copy() else: (self.u, self.s) = (None, None)
'Merge this Projection with another. The content of `other` is destroyed in the process, so pass this function a copy of `other` if you need it further.'
def merge(self, other, decay=1.0):
if (other.u is None): return if (self.u is None): self.u = other.u.copy() self.s = other.s.copy() return if (self.m != other.m): raise ValueError(('vector space mismatch: update is using %s features, expected %s' % (other.m, self.m))) logger.info('merging projections: %s + %s', str(self.u.shape), str(other.u.shape)) (m, n1, n2) = (self.u.shape[0], self.u.shape[1], other.u.shape[1]) logger.debug('constructing orthogonal component') self.u = asfarray(self.u, 'self.u') c = np.dot(self.u.T, other.u) self.u = ascarray(self.u, 'self.u') other.u -= np.dot(self.u, c) other.u = [other.u] (q, r) = matutils.qr_destroy(other.u) assert (not other.u) k = np.bmat([[np.diag((decay * self.s)), np.multiply(c, other.s)], [matutils.pad(np.array([]).reshape(0, 0), min(m, n2), n1), np.multiply(r, other.s)]]) logger.debug('computing SVD of %s dense matrix', k.shape) try: (u_k, s_k, _) = scipy.linalg.svd(k, full_matrices=False) except scipy.linalg.LinAlgError: logger.error('SVD(A) failed; trying SVD(A * A^T)') (u_k, s_k, _) = scipy.linalg.svd(np.dot(k, k.T), full_matrices=False) s_k = np.sqrt(s_k) k = clip_spectrum((s_k ** 2), self.k) (u1_k, u2_k, s_k) = (np.array(u_k[:n1, :k]), np.array(u_k[n1:, :k]), s_k[:k]) logger.debug('updating orthonormal basis U') self.s = s_k self.u = ascarray(self.u, 'self.u') self.u = np.dot(self.u, u1_k) q = ascarray(q, 'q') q = np.dot(q, u2_k) self.u += q if (self.u.shape[0] > 0): for i in xrange(self.u.shape[1]): if (self.u[(0, i)] < 0.0): self.u[:, i] *= (-1.0)
'`num_topics` is the number of requested factors (latent dimensions). After the model has been trained, you can estimate topics for an arbitrary, unseen document, using the ``topics = self[document]`` dictionary notation. You can also add new training documents, with ``self.add_documents``, so that training can be stopped and resumed at any time, and the LSI transformation is available at any point. If you specify a `corpus`, it will be used to train the model. See the method `add_documents` for a description of the `chunksize` and `decay` parameters. Turn `onepass` off to force a multi-pass stochastic algorithm. `power_iters` and `extra_samples` affect the accuracy of the stochastic multi-pass algorithm, which is used either internally (`onepass=True`) or as the front-end algorithm (`onepass=False`). Increasing the number of power iterations improves accuracy, but lowers performance. See [3]_ for some hard numbers. Turn on `distributed` to enable distributed computing. Example: >>> lsi = LsiModel(corpus, num_topics=10) >>> print(lsi[doc_tfidf]) # project some document into LSI space >>> lsi.add_documents(corpus2) # update LSI on additional documents >>> print(lsi[doc_tfidf]) .. [3] http://nlp.fi.muni.cz/~xrehurek/nips/rehurek_nips.pdf'
def __init__(self, corpus=None, num_topics=200, id2word=None, chunksize=20000, decay=1.0, distributed=False, onepass=True, power_iters=P2_EXTRA_ITERS, extra_samples=P2_EXTRA_DIMS):
self.id2word = id2word self.num_topics = int(num_topics) self.chunksize = int(chunksize) self.decay = float(decay) if distributed: if (not onepass): logger.warning('forcing the one-pass algorithm for distributed LSA') onepass = True self.onepass = onepass (self.extra_samples, self.power_iters) = (extra_samples, power_iters) if ((corpus is None) and (self.id2word is None)): raise ValueError('at least one of corpus/id2word must be specified, to establish input space dimensionality') if (self.id2word is None): logger.warning('no word id mapping provided; initializing from corpus, assuming identity') self.id2word = utils.dict_from_corpus(corpus) self.num_terms = len(self.id2word) else: self.num_terms = (1 + (max(self.id2word.keys()) if self.id2word else (-1))) self.docs_processed = 0 self.projection = Projection(self.num_terms, self.num_topics, power_iters=self.power_iters, extra_dims=self.extra_samples) self.numworkers = 1 if (not distributed): logger.info('using serial LSI version on this node') self.dispatcher = None else: if (not onepass): raise NotImplementedError('distributed stochastic LSA not implemented yet; run either distributed one-pass, or serial randomized.') try: import Pyro4 dispatcher = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher') logger.debug('looking for dispatcher at %s', str(dispatcher._pyroUri)) dispatcher.initialize(id2word=self.id2word, num_topics=num_topics, chunksize=chunksize, decay=decay, power_iters=self.power_iters, extra_samples=self.extra_samples, distributed=False, onepass=onepass) self.dispatcher = dispatcher self.numworkers = len(dispatcher.getworkers()) logger.info('using distributed version with %i workers', self.numworkers) except Exception as err: logger.error('failed to initialize distributed LSI (%s)', err) raise RuntimeError(('failed to initialize distributed LSI (%s)' % err)) if (corpus is not None): self.add_documents(corpus)
'Update singular value decomposition to take into account a new corpus of documents. Training proceeds in chunks of `chunksize` documents at a time. The size of `chunksize` is a tradeoff between increased speed (bigger `chunksize`) vs. lower memory footprint (smaller `chunksize`). If the distributed mode is on, each chunk is sent to a different worker/computer. Setting `decay` < 1.0 causes re-orientation towards new data trends in the input document stream, by giving less emphasis to old observations. This allows LSA to gradually "forget" old observations (documents) and give more preference to new ones.'
def add_documents(self, corpus, chunksize=None, decay=None):
logger.info('updating model with new documents') if (chunksize is None): chunksize = self.chunksize if (decay is None): decay = self.decay if (not scipy.sparse.issparse(corpus)): if (not self.onepass): update = Projection(self.num_terms, self.num_topics, None) (update.u, update.s) = stochastic_svd(corpus, self.num_topics, num_terms=self.num_terms, chunksize=chunksize, extra_dims=self.extra_samples, power_iters=self.power_iters) self.projection.merge(update, decay=decay) self.docs_processed += (len(corpus) if hasattr(corpus, '__len__') else 0) else: doc_no = 0 if self.dispatcher: logger.info('initializing %s workers', self.numworkers) self.dispatcher.reset() for (chunk_no, chunk) in enumerate(utils.grouper(corpus, chunksize)): logger.info('preparing a new chunk of documents') nnz = sum((len(doc) for doc in chunk)) logger.debug('converting corpus to csc format') job = matutils.corpus2csc(chunk, num_docs=len(chunk), num_terms=self.num_terms, num_nnz=nnz) del chunk doc_no += job.shape[1] if self.dispatcher: logger.debug('creating job #%i', chunk_no) self.dispatcher.putjob(job) del job logger.info('dispatched documents up to #%s', doc_no) else: update = Projection(self.num_terms, self.num_topics, job, extra_dims=self.extra_samples, power_iters=self.power_iters) del job self.projection.merge(update, decay=decay) del update logger.info('processed documents up to #%s', doc_no) self.print_topics(5) if self.dispatcher: logger.info('reached the end of input; now waiting for all remaining jobs to finish') self.projection = self.dispatcher.getstate() self.docs_processed += doc_no else: assert (not self.dispatcher), 'must be in serial mode to receive jobs' assert self.onepass, 'distributed two-pass algo not supported yet' update = Projection(self.num_terms, self.num_topics, corpus.tocsc(), extra_dims=self.extra_samples, power_iters=self.power_iters) self.projection.merge(update, decay=decay) logger.info('processed sparse job of %i documents', corpus.shape[1]) self.docs_processed += corpus.shape[1]
'Return latent representation, as a list of (topic_id, topic_value) 2-tuples. This is done by folding input document into the latent topic space. If `scaled` is set, scale topics by the inverse of singular values (default: no scaling).'
def __getitem__(self, bow, scaled=False, chunksize=512):
assert (self.projection.u is not None), 'decomposition not initialized yet' (is_corpus, bow) = utils.is_corpus(bow) if (is_corpus and chunksize): return self._apply(bow, chunksize=chunksize) if (not is_corpus): bow = [bow] vec = matutils.corpus2csc(bow, num_terms=self.num_terms, dtype=self.projection.u.dtype) topic_dist = (vec.T * self.projection.u[:, :self.num_topics]).T if (not is_corpus): topic_dist = topic_dist.reshape((-1)) if scaled: topic_dist = ((1.0 / self.projection.s[:self.num_topics]) * topic_dist) if (not is_corpus): result = matutils.full2sparse(topic_dist) else: result = matutils.Dense2Corpus(topic_dist) return result
'Return a specified topic (=left singular vector), 0 <= `topicno` < `self.num_topics`, as a string. Return only the `topn` words which contribute the most to the direction of the topic (both negative and positive). >>> lsimodel.show_topic(10, topn=5) [("category", -0.340), ("$M$", 0.298), ("algebra", 0.183), ("functor", -0.174), ("operator", -0.168)]'
def show_topic(self, topicno, topn=10):
if (topicno >= len(self.projection.u.T)): return '' c = np.asarray(self.projection.u.T[topicno, :]).flatten() norm = np.sqrt(np.sum(np.dot(c, c))) most = matutils.argsort(np.abs(c), topn, reverse=True) return [(self.id2word[val], ((1.0 * c[val]) / norm)) for val in most]
'Return `num_topics` most significant topics (return all by default). For each topic, show `num_words` most significant words (10 words by default). The topics are returned as a list -- a list of strings if `formatted` is True, or a list of `(word, probability)` 2-tuples if False. If `log` is True, also output this result to log.'
def show_topics(self, num_topics=(-1), num_words=10, log=False, formatted=True):
shown = [] if (num_topics < 0): num_topics = self.num_topics for i in xrange(min(num_topics, self.num_topics)): if (i < len(self.projection.s)): if formatted: topic = self.print_topic(i, topn=num_words) else: topic = self.show_topic(i, topn=num_words) shown.append((i, topic)) if log: logger.info('topic #%i(%.3f): %s', i, self.projection.s[i], topic) return shown
'Print (to log) the most salient words of the first `num_topics` topics. Unlike `print_topics()`, this looks for words that are significant for a particular topic *and* not for others. This *should* result in a more human-interpretable description of topics.'
def print_debug(self, num_topics=5, num_words=10):
print_debug(self.id2word, self.projection.u, self.projection.s, range(min(num_topics, len(self.projection.u.T))), num_words=num_words)
'Save the model to file. Large internal arrays may be stored into separate files, with `fname` as prefix. Note: do not save as a compressed file if you intend to load the file back with `mmap`.'
def save(self, fname, *args, **kwargs):
if (self.projection is not None): self.projection.save(utils.smart_extension(fname, '.projection'), *args, **kwargs) super(LsiModel, self).save(fname, ignore=['projection', 'dispatcher'], *args, **kwargs)
'Load a previously saved object from file (also see `save`). Large arrays can be memmap\'ed back as read-only (shared memory) by setting `mmap=\'r\'`: >>> LsiModel.load(fname, mmap=\'r\')'
@classmethod def load(cls, fname, *args, **kwargs):
kwargs['mmap'] = kwargs.get('mmap', None) result = super(LsiModel, cls).load(fname, *args, **kwargs) projection_fname = utils.smart_extension(fname, '.projection') try: result.projection = super(LsiModel, cls).load(projection_fname, *args, **kwargs) except Exception as e: logging.warning(('failed to load projection from %s: %s' % (projection_fname, e))) return result
'`gamma`: first level concentration `alpha`: second level concentration `eta`: the topic Dirichlet `T`: top level truncation level `K`: second level truncation level `kappa`: learning rate `tau`: slow down parameter `max_time`: stop training after this many seconds `max_chunks`: stop after having processed this many chunks (wrap around corpus beginning in another corpus pass, if there are not enough chunks in the corpus)'
def __init__(self, corpus, id2word, max_chunks=None, max_time=None, chunksize=256, kappa=1.0, tau=64.0, K=15, T=150, alpha=1, gamma=1, eta=0.01, scale=1.0, var_converge=0.0001, outputdir=None, random_state=None):
self.corpus = corpus self.id2word = id2word self.chunksize = chunksize self.max_chunks = max_chunks self.max_time = max_time self.outputdir = outputdir self.random_state = utils.get_random_state(random_state) self.lda_alpha = None self.lda_beta = None self.m_W = len(id2word) self.m_D = 0 if corpus: self.m_D = len(corpus) self.m_T = T self.m_K = K self.m_alpha = alpha self.m_gamma = gamma self.m_var_sticks = np.zeros((2, (T - 1))) self.m_var_sticks[0] = 1.0 self.m_var_sticks[1] = range((T - 1), 0, (-1)) self.m_varphi_ss = np.zeros(T) self.m_lambda = ((((self.random_state.gamma(1.0, 1.0, (T, self.m_W)) * self.m_D) * 100) / (T * self.m_W)) - eta) self.m_eta = eta self.m_Elogbeta = dirichlet_expectation((self.m_eta + self.m_lambda)) self.m_tau = (tau + 1) self.m_kappa = kappa self.m_scale = scale self.m_updatect = 0 self.m_status_up_to_date = True self.m_num_docs_processed = 0 self.m_timestamp = np.zeros(self.m_W, dtype=int) self.m_r = [0] self.m_lambda_sum = np.sum(self.m_lambda, axis=1) self.m_var_converge = var_converge if self.outputdir: self.save_options() if (corpus is not None): self.update(corpus)
'e step for a single doc'
def doc_e_step(self, doc, ss, Elogsticks_1st, word_list, unique_words, doc_word_ids, doc_word_counts, var_converge):
chunkids = [unique_words[id] for id in doc_word_ids] Elogbeta_doc = self.m_Elogbeta[:, doc_word_ids] v = np.zeros((2, (self.m_K - 1))) v[0] = 1.0 v[1] = self.m_alpha phi = ((np.ones((len(doc_word_ids), self.m_K)) * 1.0) / self.m_K) likelihood = 0.0 old_likelihood = (-1e+200) converge = 1.0 eps = 1e-100 iter = 0 max_iter = 100 while ((iter < max_iter) and ((converge < 0.0) or (converge > var_converge))): if (iter < 3): var_phi = np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T) (log_var_phi, log_norm) = matutils.ret_log_normalize_vec(var_phi) var_phi = np.exp(log_var_phi) else: var_phi = (np.dot(phi.T, (Elogbeta_doc * doc_word_counts).T) + Elogsticks_1st) (log_var_phi, log_norm) = matutils.ret_log_normalize_vec(var_phi) var_phi = np.exp(log_var_phi) if (iter < 3): phi = np.dot(var_phi, Elogbeta_doc).T (log_phi, log_norm) = matutils.ret_log_normalize_vec(phi) phi = np.exp(log_phi) else: phi = (np.dot(var_phi, Elogbeta_doc).T + Elogsticks_2nd) (log_phi, log_norm) = matutils.ret_log_normalize_vec(phi) phi = np.exp(log_phi) phi_all = (phi * np.array(doc_word_counts)[:, np.newaxis]) v[0] = (1.0 + np.sum(phi_all[:, :(self.m_K - 1)], 0)) phi_cum = np.flipud(np.sum(phi_all[:, 1:], 0)) v[1] = (self.m_alpha + np.flipud(np.cumsum(phi_cum))) Elogsticks_2nd = expect_log_sticks(v) likelihood = 0.0 likelihood += np.sum(((Elogsticks_1st - log_var_phi) * var_phi)) log_alpha = np.log(self.m_alpha) likelihood += ((self.m_K - 1) * log_alpha) dig_sum = psi(np.sum(v, 0)) likelihood += np.sum(((np.array([1.0, self.m_alpha])[:, np.newaxis] - v) * (psi(v) - dig_sum))) likelihood -= (np.sum(gammaln(np.sum(v, 0))) - np.sum(gammaln(v))) likelihood += np.sum(((Elogsticks_2nd - log_phi) * phi)) likelihood += np.sum((phi.T * np.dot(var_phi, (Elogbeta_doc * doc_word_counts)))) converge = ((likelihood - old_likelihood) / abs(old_likelihood)) old_likelihood = likelihood if (converge < (-1e-06)): logger.warning('likelihood is decreasing!') iter += 1 ss.m_var_sticks_ss += np.sum(var_phi, 0) ss.m_var_beta_ss[:, chunkids] += np.dot(var_phi.T, (phi.T * doc_word_counts)) return likelihood
'ordering the topics'
def optimal_ordering(self):
idx = matutils.argsort(self.m_lambda_sum, reverse=True) self.m_varphi_ss = self.m_varphi_ss[idx] self.m_lambda = self.m_lambda[idx, :] self.m_lambda_sum = self.m_lambda_sum[idx] self.m_Elogbeta = self.m_Elogbeta[idx, :]
'Since we\'re doing lazy updates on lambda, at any given moment the current state of lambda may not be accurate. This function updates all of the elements of lambda and Elogbeta so that if (for example) we want to print out the topics we\'ve learned we\'ll get the correct behavior.'
def update_expectations(self):
for w in xrange(self.m_W): self.m_lambda[:, w] *= np.exp((self.m_r[(-1)] - self.m_r[self.m_timestamp[w]])) self.m_Elogbeta = (psi((self.m_eta + self.m_lambda)) - psi(((self.m_W * self.m_eta) + self.m_lambda_sum[:, np.newaxis]))) self.m_timestamp[:] = self.m_updatect self.m_status_up_to_date = True
'Print the `num_words` most probable words for topic `topic_id`. Set `formatted=True` to return the topics as a list of strings, or `False` as lists of (weight, word) pairs.'
def show_topic(self, topic_id, topn=20, log=False, formatted=False, num_words=None):
if (num_words is not None): logger.warning('The parameter num_words for show_topic() would be deprecated in the updated version.') logger.warning('Please use topn instead.') topn = num_words if (not self.m_status_up_to_date): self.update_expectations() betas = (self.m_lambda + self.m_eta) hdp_formatter = HdpTopicFormatter(self.id2word, betas) return hdp_formatter.show_topic(topic_id, topn, log, formatted)
'Print the `num_words` most probable words for `num_topics` number of topics. Set `num_topics=-1` to print all topics. Set `formatted=True` to return the topics as a list of strings, or `False` as lists of (weight, word) pairs.'
def show_topics(self, num_topics=20, num_words=20, log=False, formatted=True):
if (not self.m_status_up_to_date): self.update_expectations() betas = (self.m_lambda + self.m_eta) hdp_formatter = HdpTopicFormatter(self.id2word, betas) return hdp_formatter.show_topics(num_topics, num_words, log, formatted)
'legacy method; use `self.save()` instead'
def save_topics(self, doc_count=None):
if (not self.outputdir): logger.error('cannot store topics without having specified an output directory') if (doc_count is None): fname = 'final' else: fname = ('doc-%i' % doc_count) fname = ('%s/%s.topics' % (self.outputdir, fname)) logger.info(('saving topics to %s' % fname)) betas = (self.m_lambda + self.m_eta) np.savetxt(fname, betas)
'legacy method; use `self.save()` instead'
def save_options(self):
if (not self.outputdir): logger.error('cannot store options without having specified an output directory') return fname = ('%s/options.dat' % self.outputdir) with utils.smart_open(fname, 'wb') as fout: fout.write(('tau: %s\n' % str((self.m_tau - 1)))) fout.write(('chunksize: %s\n' % str(self.chunksize))) fout.write(('var_converge: %s\n' % str(self.m_var_converge))) fout.write(('D: %s\n' % str(self.m_D))) fout.write(('K: %s\n' % str(self.m_K))) fout.write(('T: %s\n' % str(self.m_T))) fout.write(('W: %s\n' % str(self.m_W))) fout.write(('alpha: %s\n' % str(self.m_alpha))) fout.write(('kappa: %s\n' % str(self.m_kappa))) fout.write(('eta: %s\n' % str(self.m_eta))) fout.write(('gamma: %s\n' % str(self.m_gamma)))
'Compute the LDA almost equivalent HDP.'
def hdp_to_lda(self):
sticks = (self.m_var_sticks[0] / (self.m_var_sticks[0] + self.m_var_sticks[1])) alpha = np.zeros(self.m_T) left = 1.0 for i in xrange(0, (self.m_T - 1)): alpha[i] = (sticks[i] * left) left = (left - alpha[i]) alpha[(self.m_T - 1)] = left alpha = (alpha * self.m_alpha) beta = ((self.m_lambda + self.m_eta) / ((self.m_W * self.m_eta) + self.m_lambda_sum[:, np.newaxis])) return (alpha, beta)
'Returns closest corresponding ldamodel object corresponding to current hdp model. The hdp_to_lda method only returns corresponding alpha, beta values, and this method returns a trained ldamodel. The num_topics is m_T (default is 150) so as to preserve the matrice shapes when we assign alpha and beta.'
def suggested_lda_model(self):
(alpha, beta) = self.hdp_to_lda() ldam = ldamodel.LdaModel(num_topics=self.m_T, alpha=alpha, id2word=self.id2word, random_state=self.random_state) ldam.expElogbeta[:] = beta return ldam
'Request jobs from the dispatcher, in a perpetual loop until `getstate()` is called.'
@Pyro4.expose @Pyro4.oneway def requestjob(self):
if (self.model is None): raise RuntimeError('worker must be initialized before receiving jobs') job = None while ((job is None) and (not self.finished)): try: job = self.dispatcher.getjob(self.myid) except Queue.Empty: continue if (job is not None): logger.info(('worker #%s received job #%i' % (self.myid, self.jobsdone))) self.processjob(job) self.dispatcher.jobdone(self.myid) else: logger.info(('worker #%i stopping asking for jobs' % self.myid))
'Store the input-hidden weight matrix in the same format used by the original C word2vec-tool, for compatibility. `fname` is the file used to save the vectors in `fvocab` is an optional file used to save the vocabulary `binary` is an optional boolean indicating whether the data is to be saved in binary word2vec format (default: False) `total_vec` is an optional parameter to explicitly specify total no. of vectors (in case word vectors are appended with document vectors afterwards)'
def save_word2vec_format(self, fname, fvocab=None, binary=False, total_vec=None):
if (total_vec is None): total_vec = len(self.vocab) vector_size = self.syn0.shape[1] if (fvocab is not None): logger.info(('storing vocabulary in %s' % fvocab)) with utils.smart_open(fvocab, 'wb') as vout: for (word, vocab) in sorted(iteritems(self.vocab), key=(lambda item: (- item[1].count))): vout.write(utils.to_utf8(('%s %s\n' % (word, vocab.count)))) logger.info(('storing %sx%s projection weights into %s' % (total_vec, vector_size, fname))) assert ((len(self.vocab), vector_size) == self.syn0.shape) with utils.smart_open(fname, 'wb') as fout: fout.write(utils.to_utf8(('%s %s\n' % (total_vec, vector_size)))) for (word, vocab) in sorted(iteritems(self.vocab), key=(lambda item: (- item[1].count))): row = self.syn0[vocab.index] if binary: fout.write(((utils.to_utf8(word) + ' ') + row.tostring())) else: fout.write(utils.to_utf8(('%s %s\n' % (word, ' '.join((('%f' % val) for val in row))))))
'Load the input-hidden weight matrix from the original C word2vec-tool format. Note that the information stored in the file is incomplete (the binary tree is missing), so while you can query for word similarity etc., you cannot continue training with a model loaded this way. `binary` is a boolean indicating whether the data is in binary word2vec format. `norm_only` is a boolean indicating whether to only store normalised word2vec vectors in memory. Word counts are read from `fvocab` filename, if set (this is the file generated by `-save-vocab` flag of the original C tool). If you trained the C model using non-utf8 encoding for words, specify that encoding in `encoding`. `unicode_errors`, default \'strict\', is a string suitable to be passed as the `errors` argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source file may include word tokens truncated in the middle of a multibyte unicode character (as is common from the original word2vec.c tool), \'ignore\' or \'replace\' may help. `limit` sets a maximum number of word-vectors to read from the file. The default, None, means read all. `datatype` (experimental) can coerce dimensions to a non-default float type (such as np.float16) to save memory. (Such types may result in much slower bulk operations or incompatibility with optimized routines.)'
@classmethod def load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict', limit=None, datatype=REAL):
counts = None if (fvocab is not None): logger.info('loading word counts from %s', fvocab) counts = {} with utils.smart_open(fvocab) as fin: for line in fin: (word, count) = utils.to_unicode(line).strip().split() counts[word] = int(count) logger.info('loading projection weights from %s', fname) with utils.smart_open(fname) as fin: header = utils.to_unicode(fin.readline(), encoding=encoding) (vocab_size, vector_size) = map(int, header.split()) if limit: vocab_size = min(vocab_size, limit) result = cls() result.vector_size = vector_size result.syn0 = zeros((vocab_size, vector_size), dtype=datatype) def add_word(word, weights): word_id = len(result.vocab) if (word in result.vocab): logger.warning("duplicate word '%s' in %s, ignoring all but first", word, fname) return if (counts is None): result.vocab[word] = Vocab(index=word_id, count=(vocab_size - word_id)) elif (word in counts): result.vocab[word] = Vocab(index=word_id, count=counts[word]) else: logger.warning("vocabulary file is incomplete: '%s' is missing", word) result.vocab[word] = Vocab(index=word_id, count=None) result.syn0[word_id] = weights result.index2word.append(word) if binary: binary_len = (dtype(REAL).itemsize * vector_size) for line_no in xrange(vocab_size): word = [] while True: ch = fin.read(1) if (ch == ' '): break if (ch == ''): raise EOFError('unexpected end of input; is count incorrect or file otherwise damaged?') if (ch != '\n'): word.append(ch) word = utils.to_unicode(''.join(word), encoding=encoding, errors=unicode_errors) weights = fromstring(fin.read(binary_len), dtype=REAL) add_word(word, weights) else: for line_no in xrange(vocab_size): line = fin.readline() if (line == ''): raise EOFError('unexpected end of input; is count incorrect or file otherwise damaged?') parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(' ') if (len(parts) != (vector_size + 1)): raise ValueError(('invalid vector on line %s (is this really the text format?)' % line_no)) (word, weights) = (parts[0], list(map(REAL, parts[1:]))) add_word(word, weights) if (result.syn0.shape[0] != len(result.vocab)): logger.info('duplicate words detected, shrinking matrix size from %i to %i', result.syn0.shape[0], len(result.vocab)) result.syn0 = ascontiguousarray(result.syn0[:len(result.vocab)]) assert ((len(result.vocab), vector_size) == result.syn0.shape) logger.info(('loaded %s matrix from %s' % (result.syn0.shape, fname))) return result
'Accept a single word as input. Returns the word\'s representations in vector space, as a 1D numpy array. If `use_norm` is True, returns the normalized word vector. Example:: >>> trained_model[\'office\'] array([ -1.40128313e-02, ...])'
def word_vec(self, word, use_norm=False):
if (word in self.vocab): if use_norm: return self.syn0norm[self.vocab[word].index] else: return self.syn0[self.vocab[word].index] else: raise KeyError(("word '%s' not in vocabulary" % word))
'Find the top-N most similar words. Positive words contribute positively towards the similarity, negative words negatively. This method computes cosine similarity between a simple mean of the projection weight vectors of the given words and the vectors for each word in the model. The method corresponds to the `word-analogy` and `distance` scripts in the original word2vec implementation. If topn is False, most_similar returns the vector of similarity scores. `restrict_vocab` is an optional integer which limits the range of vectors which are searched for most-similar values. For example, restrict_vocab=10000 would only check the first 10000 word vectors in the vocabulary order. (This may be meaningful if you\'ve sorted the vocabulary by descending frequency.) Example:: >>> trained_model.most_similar(positive=[\'woman\', \'king\'], negative=[\'man\']) [(\'queen\', 0.50882536), ...]'
def most_similar(self, positive=[], negative=[], topn=10, restrict_vocab=None, indexer=None):
self.init_sims() if (isinstance(positive, string_types) and (not negative)): positive = [positive] positive = [((word, 1.0) if isinstance(word, (string_types + (ndarray,))) else word) for word in positive] negative = [((word, (-1.0)) if isinstance(word, (string_types + (ndarray,))) else word) for word in negative] (all_words, mean) = (set(), []) for (word, weight) in (positive + negative): if isinstance(word, ndarray): mean.append((weight * word)) else: mean.append((weight * self.word_vec(word, use_norm=True))) if (word in self.vocab): all_words.add(self.vocab[word].index) if (not mean): raise ValueError('cannot compute similarity with no input') mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL) if (indexer is not None): return indexer.most_similar(mean, topn) limited = (self.syn0norm if (restrict_vocab is None) else self.syn0norm[:restrict_vocab]) dists = dot(limited, mean) if (not topn): return dists best = matutils.argsort(dists, topn=(topn + len(all_words)), reverse=True) result = [(self.index2word[sim], float(dists[sim])) for sim in best if (sim not in all_words)] return result[:topn]
'Compute the Word Mover\'s Distance between two documents. When using this code, please consider citing the following papers: .. Ofir Pele and Michael Werman, "A linear time histogram metric for improved SIFT matching". .. Ofir Pele and Michael Werman, "Fast and robust earth mover\'s distances". .. Matt Kusner et al. "From Word Embeddings To Document Distances". Note that if one of the documents have no words that exist in the Word2Vec vocab, `float(\'inf\')` (i.e. infinity) will be returned. This method only works if `pyemd` is installed (can be installed via pip, but requires a C compiler). Example: >>> # Train word2vec model. >>> model = Word2Vec(sentences) >>> # Some sentences to test. >>> sentence_obama = \'Obama speaks to the media in Illinois\'.lower().split() >>> sentence_president = \'The president greets the press in Chicago\'.lower().split() >>> # Remove their stopwords. >>> from nltk.corpus import stopwords >>> stopwords = nltk.corpus.stopwords.words(\'english\') >>> sentence_obama = [w for w in sentence_obama if w not in stopwords] >>> sentence_president = [w for w in sentence_president if w not in stopwords] >>> # Compute WMD. >>> distance = model.wmdistance(sentence_obama, sentence_president)'
def wmdistance(self, document1, document2):
if (not PYEMD_EXT): raise ImportError('Please install pyemd Python package to compute WMD.') len_pre_oov1 = len(document1) len_pre_oov2 = len(document2) document1 = [token for token in document1 if (token in self)] document2 = [token for token in document2 if (token in self)] diff1 = (len_pre_oov1 - len(document1)) diff2 = (len_pre_oov2 - len(document2)) if ((diff1 > 0) or (diff2 > 0)): logger.info('Removed %d and %d OOV words from document 1 and 2 (respectively).', diff1, diff2) if ((len(document1) == 0) or (len(document2) == 0)): logger.info('At least one of the documents had no words that werein the vocabulary. Aborting (returning inf).') return float('inf') dictionary = Dictionary(documents=[document1, document2]) vocab_len = len(dictionary) if (vocab_len == 1): return 0.0 docset1 = set(document1) docset2 = set(document2) distance_matrix = zeros((vocab_len, vocab_len), dtype=double) for (i, t1) in dictionary.items(): for (j, t2) in dictionary.items(): if ((not (t1 in docset1)) or (not (t2 in docset2))): continue distance_matrix[(i, j)] = sqrt(np_sum(((self[t1] - self[t2]) ** 2))) if (np_sum(distance_matrix) == 0.0): logger.info('The distance matrix is all zeros. Aborting (returning inf).') return float('inf') def nbow(document): d = zeros(vocab_len, dtype=double) nbow = dictionary.doc2bow(document) doc_len = len(document) for (idx, freq) in nbow: d[idx] = (freq / float(doc_len)) return d d1 = nbow(document1) d2 = nbow(document2) return emd(d1, d2, distance_matrix)
'Find the top-N most similar words, using the multiplicative combination objective proposed by Omer Levy and Yoav Goldberg in [4]_. Positive words still contribute positively towards the similarity, negative words negatively, but with less susceptibility to one large distance dominating the calculation. In the common analogy-solving case, of two positive and one negative examples, this method is equivalent to the "3CosMul" objective (equation (4)) of Levy and Goldberg. Additional positive or negative examples contribute to the numerator or denominator, respectively – a potentially sensible but untested extension of the method. (With a single positive example, rankings will be the same as in the default most_similar.) Example:: >>> trained_model.most_similar_cosmul(positive=[\'baghdad\', \'england\'], negative=[\'london\']) [(u\'iraq\', 0.8488819003105164), ...] .. [4] Omer Levy and Yoav Goldberg. Linguistic Regularities in Sparse and Explicit Word Representations, 2014.'
def most_similar_cosmul(self, positive=[], negative=[], topn=10):
self.init_sims() if (isinstance(positive, string_types) and (not negative)): positive = [positive] all_words = set([self.vocab[word].index for word in (positive + negative) if ((not isinstance(word, ndarray)) and (word in self.vocab))]) positive = [(self.word_vec(word, use_norm=True) if isinstance(word, string_types) else word) for word in positive] negative = [(self.word_vec(word, use_norm=True) if isinstance(word, string_types) else word) for word in negative] if (not positive): raise ValueError('cannot compute similarity with no input') pos_dists = [((1 + dot(self.syn0norm, term)) / 2) for term in positive] neg_dists = [((1 + dot(self.syn0norm, term)) / 2) for term in negative] dists = (prod(pos_dists, axis=0) / (prod(neg_dists, axis=0) + 1e-06)) if (not topn): return dists best = matutils.argsort(dists, topn=(topn + len(all_words)), reverse=True) result = [(self.index2word[sim], float(dists[sim])) for sim in best if (sim not in all_words)] return result[:topn]
'Find the top-N most similar words. If topn is False, similar_by_word returns the vector of similarity scores. `restrict_vocab` is an optional integer which limits the range of vectors which are searched for most-similar values. For example, restrict_vocab=10000 would only check the first 10000 word vectors in the vocabulary order. (This may be meaningful if you\'ve sorted the vocabulary by descending frequency.) Example:: >>> trained_model.similar_by_word(\'graph\') [(\'user\', 0.9999163150787354), ...]'
def similar_by_word(self, word, topn=10, restrict_vocab=None):
return self.most_similar(positive=[word], topn=topn, restrict_vocab=restrict_vocab)
'Find the top-N most similar words by vector. If topn is False, similar_by_vector returns the vector of similarity scores. `restrict_vocab` is an optional integer which limits the range of vectors which are searched for most-similar values. For example, restrict_vocab=10000 would only check the first 10000 word vectors in the vocabulary order. (This may be meaningful if you\'ve sorted the vocabulary by descending frequency.) Example:: >>> trained_model.similar_by_vector([1,2]) [(\'survey\', 0.9942699074745178), ...]'
def similar_by_vector(self, vector, topn=10, restrict_vocab=None):
return self.most_similar(positive=[vector], topn=topn, restrict_vocab=restrict_vocab)
'Which word from the given list doesn\'t go with the others? Example:: >>> trained_model.doesnt_match("breakfast cereal dinner lunch".split()) \'cereal\''
def doesnt_match(self, words):
self.init_sims() used_words = [word for word in words if (word in self)] if (len(used_words) != len(words)): ignored_words = (set(words) - set(used_words)) logger.warning('vectors for words %s are not present in the model, ignoring these words', ignored_words) if (not used_words): raise ValueError('cannot select a word from an empty list') vectors = vstack((self.word_vec(word, use_norm=True) for word in used_words)).astype(REAL) mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL) dists = dot(vectors, mean) return sorted(zip(dists, used_words))[0][1]
'Accept a single word or a list of words as input. If a single word: returns the word\'s representations in vector space, as a 1D numpy array. Multiple words: return the words\' representations in vector space, as a 2d numpy array: #words x #vector_size. Matrix rows are in the same order as in input. Example:: >>> trained_model[\'office\'] array([ -1.40128313e-02, ...]) >>> trained_model[[\'office\', \'products\']] array([ -1.40128313e-02, ...] [ -1.70425311e-03, ...]'
def __getitem__(self, words):
if isinstance(words, string_types): return self.word_vec(words) return vstack([self.word_vec(word) for word in words])
'Compute cosine similarity between two words. Example:: >>> trained_model.similarity(\'woman\', \'man\') 0.73723527 >>> trained_model.similarity(\'woman\', \'woman\') 1.0'
def similarity(self, w1, w2):
return dot(matutils.unitvec(self[w1]), matutils.unitvec(self[w2]))
'Compute cosine similarity between two sets of words. Example:: >>> trained_model.n_similarity([\'sushi\', \'shop\'], [\'japanese\', \'restaurant\']) 0.61540466561049689 >>> trained_model.n_similarity([\'restaurant\', \'japanese\'], [\'japanese\', \'restaurant\']) 1.0000000000000004 >>> trained_model.n_similarity([\'sushi\'], [\'restaurant\']) == trained_model.similarity(\'sushi\', \'restaurant\') True'
def n_similarity(self, ws1, ws2):
if (not (len(ws1) and len(ws2))): raise ZeroDivisionError('Atleast one of the passed list is empty.') v1 = [self[word] for word in ws1] v2 = [self[word] for word in ws2] return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0)))
'Compute accuracy of the model. `questions` is a filename where lines are 4-tuples of words, split into sections by ": SECTION NAME" lines. See questions-words.txt in https://storage.googleapis.com/google-code-archive-source/v2/code.google.com/word2vec/source-archive.zip for an example. The accuracy is reported (=printed to log and returned as a list) for each section separately, plus there\'s one aggregate summary at the end. Use `restrict_vocab` to ignore all questions containing a word not in the first `restrict_vocab` words (default 30,000). This may be meaningful if you\'ve sorted the vocabulary by descending frequency. In case `case_insensitive` is True, the first `restrict_vocab` words are taken first, and then case normalization is performed. Use `case_insensitive` to convert all words in questions and vocab to their uppercase form before evaluating the accuracy (default True). Useful in case of case-mismatch between training tokens and question words. In case of multiple case variants of a single word, the vector for the first occurrence (also the most frequent if vocabulary is sorted) is taken. This method corresponds to the `compute-accuracy` script of the original C word2vec.'
def accuracy(self, questions, restrict_vocab=30000, most_similar=most_similar, case_insensitive=True):
ok_vocab = [(w, self.vocab[w]) for w in self.index2word[:restrict_vocab]] ok_vocab = (dict(((w.upper(), v) for (w, v) in reversed(ok_vocab))) if case_insensitive else dict(ok_vocab)) (sections, section) = ([], None) for (line_no, line) in enumerate(utils.smart_open(questions)): line = utils.to_unicode(line) if line.startswith(': '): if section: sections.append(section) self.log_accuracy(section) section = {'section': line.lstrip(': ').strip(), 'correct': [], 'incorrect': []} else: if (not section): raise ValueError(('missing section header before line #%i in %s' % (line_no, questions))) try: if case_insensitive: (a, b, c, expected) = [word.upper() for word in line.split()] else: (a, b, c, expected) = [word for word in line.split()] except: logger.info(('skipping invalid line #%i in %s' % (line_no, questions))) continue if ((a not in ok_vocab) or (b not in ok_vocab) or (c not in ok_vocab) or (expected not in ok_vocab)): logger.debug(('skipping line #%i with OOV words: %s' % (line_no, line.strip()))) continue original_vocab = self.vocab self.vocab = ok_vocab ignore = set([a, b, c]) predicted = None sims = most_similar(self, positive=[b, c], negative=[a], topn=False, restrict_vocab=restrict_vocab) self.vocab = original_vocab for index in matutils.argsort(sims, reverse=True): predicted = (self.index2word[index].upper() if case_insensitive else self.index2word[index]) if ((predicted in ok_vocab) and (predicted not in ignore)): if (predicted != expected): logger.debug('%s: expected %s, predicted %s', line.strip(), expected, predicted) break if (predicted == expected): section['correct'].append((a, b, c, expected)) else: section['incorrect'].append((a, b, c, expected)) if section: sections.append(section) self.log_accuracy(section) total = {'section': 'total', 'correct': sum((s['correct'] for s in sections), []), 'incorrect': sum((s['incorrect'] for s in sections), [])} self.log_accuracy(total) sections.append(total) return sections