desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Compute correlation of the model with human similarity judgments. `pairs` is a filename of a dataset where
lines are 3-tuples, each consisting of a word pair and a similarity value, separated by `delimiter`.
An example dataset is included in Gensim (test/test_data/wordsim353.tsv). More datasets can be found at
http://technion.ac.il/~ira.leviant/MultilingualVSMdata.html or https://www.cl.cam.ac.uk/~fh295/simlex.html.
The model is evaluated using Pearson correlation coefficient and Spearman rank-order correlation coefficient
between the similarities from the dataset and the similarities produced by the model itself.
The results are printed to log and returned as a triple (pearson, spearman, ratio of pairs with unknown words).
Use `restrict_vocab` to ignore all word pairs containing a word not in the first `restrict_vocab`
words (default 300,000). This may be meaningful if you\'ve sorted the vocabulary by descending frequency.
If `case_insensitive` is True, the first `restrict_vocab` words are taken, and then case normalization
is performed.
Use `case_insensitive` to convert all words in the pairs and vocab to their uppercase form before
evaluating the model (default True). Useful when you expect case-mismatch between training tokens
and words pairs in the dataset. If there are multiple case variants of a single word, the vector for the first
occurrence (also the most frequent if vocabulary is sorted) is taken.
Use `dummy4unknown=True` to produce zero-valued similarities for pairs with out-of-vocabulary words.
Otherwise (default False), these pairs are skipped entirely.'
| def evaluate_word_pairs(self, pairs, delimiter=' DCTB ', restrict_vocab=300000, case_insensitive=True, dummy4unknown=False):
| ok_vocab = [(w, self.vocab[w]) for w in self.index2word[:restrict_vocab]]
ok_vocab = (dict(((w.upper(), v) for (w, v) in reversed(ok_vocab))) if case_insensitive else dict(ok_vocab))
similarity_gold = []
similarity_model = []
oov = 0
original_vocab = self.vocab
self.vocab = ok_vocab
for (line_no, line) in enumerate(utils.smart_open(pairs)):
line = utils.to_unicode(line)
if line.startswith('#'):
continue
else:
try:
if case_insensitive:
(a, b, sim) = [word.upper() for word in line.split(delimiter)]
else:
(a, b, sim) = [word for word in line.split(delimiter)]
sim = float(sim)
except:
logger.info('skipping invalid line #%d in %s', line_no, pairs)
continue
if ((a not in ok_vocab) or (b not in ok_vocab)):
oov += 1
if dummy4unknown:
similarity_model.append(0.0)
similarity_gold.append(sim)
continue
else:
logger.debug('skipping line #%d with OOV words: %s', line_no, line.strip())
continue
similarity_gold.append(sim)
similarity_model.append(self.similarity(a, b))
self.vocab = original_vocab
spearman = stats.spearmanr(similarity_gold, similarity_model)
pearson = stats.pearsonr(similarity_gold, similarity_model)
oov_ratio = ((float(oov) / (len(similarity_gold) + oov)) * 100)
logger.debug('Pearson correlation coefficient against %s: %f with p-value %f', pairs, pearson[0], pearson[1])
logger.debug('Spearman rank-order correlation coefficient against %s: %f with p-value %f', pairs, spearman[0], spearman[1])
logger.debug(('Pairs with unknown words: %d' % oov))
self.log_evaluate_word_pairs(pearson, spearman, oov_ratio, pairs)
return (pearson, spearman, oov_ratio)
|
'Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training** after doing a replace. The model becomes
effectively read-only = you can call `most_similar`, `similarity` etc., but not `train`.'
| def init_sims(self, replace=False):
| if ((getattr(self, 'syn0norm', None) is None) or replace):
logger.info('precomputing L2-norms of word weight vectors')
if replace:
for i in xrange(self.syn0.shape[0]):
self.syn0[i, :] /= sqrt((self.syn0[i, :] ** 2).sum((-1)))
self.syn0norm = self.syn0
else:
self.syn0norm = (self.syn0 / sqrt((self.syn0 ** 2).sum((-1)))[..., newaxis]).astype(REAL)
|
'Return a Keras \'Embedding\' layer with weights set as the Word2Vec model\'s learned word embeddings'
| def get_embedding_layer(self, train_embeddings=False):
| if (not KERAS_INSTALLED):
raise ImportError('Please install Keras to use this function')
weights = self.syn0
layer = Embedding(input_dim=weights.shape[0], output_dim=weights.shape[1], weights=[weights])
return layer
|
'Compute the \'l1\' or \'l2\' normalization by normalizing separately
for each doc in a corpus.
Formula for \'l1\' norm for term \'i\' in document \'j\' in a corpus of \'D\' documents is::
norml1_{i, j} = (i / sum(absolute(values in j)))
Formula for \'l2\' norm for term \'i\' in document \'j\' in a corpus of \'D\' documents is::
norml2_{i, j} = (i / sqrt(sum(square(values in j))))'
| def __init__(self, corpus=None, norm='l2'):
| self.norm = norm
if (corpus is not None):
self.calc_norm(corpus)
else:
pass
|
'Calculates the norm by calling matutils.unitvec with the norm parameter.'
| def calc_norm(self, corpus):
| logger.info(('Performing %s normalization...' % self.norm))
norms = []
numnnz = 0
docno = 0
for bow in corpus:
docno += 1
numnnz += len(bow)
norms.append(matutils.unitvec(bow, self.norm))
self.num_docs = docno
self.num_nnz = numnnz
self.norms = norms
|
'Prepare the state for a new EM iteration (reset sufficient stats).'
| def reset(self):
| self.sstats[:] = 0.0
self.numdocs = 0
|
'Merge the result of an E step from one node with that of another node
(summing up sufficient statistics).
The merging is trivial and after merging all cluster nodes, we have the
exact same result as if the computation was run on a single node (no
approximation).'
| def merge(self, other):
| assert (other is not None)
self.sstats += other.sstats
self.numdocs += other.numdocs
|
'Given LdaState `other`, merge it with the current state. Stretch both to
`targetsize` documents before merging, so that they are of comparable
magnitude.
Merging is done by average weighting: in the extremes, `rhot=0.0` means
`other` is completely ignored; `rhot=1.0` means `self` is completely ignored.
This procedure corresponds to the stochastic gradient update from Hoffman
et al., algorithm 2 (eq. 14).'
| def blend(self, rhot, other, targetsize=None):
| assert (other is not None)
if (targetsize is None):
targetsize = self.numdocs
if ((self.numdocs == 0) or (targetsize == self.numdocs)):
scale = 1.0
else:
scale = ((1.0 * targetsize) / self.numdocs)
self.sstats *= ((1.0 - rhot) * scale)
if ((other.numdocs == 0) or (targetsize == other.numdocs)):
scale = 1.0
else:
logger.info('merging changes from %i documents into a model of %i documents', other.numdocs, targetsize)
scale = ((1.0 * targetsize) / other.numdocs)
self.sstats += ((rhot * scale) * other.sstats)
self.numdocs = targetsize
|
'Alternative, more simple blend.'
| def blend2(self, rhot, other, targetsize=None):
| assert (other is not None)
if (targetsize is None):
targetsize = self.numdocs
self.sstats += other.sstats
self.numdocs = targetsize
|
'If given, start training from the iterable `corpus` straight away. If not given,
the model is left untrained (presumably because you want to call `update()` manually).
`num_topics` is the number of requested latent topics to be extracted from
the training corpus.
`id2word` is a mapping from word ids (integers) to words (strings). It is
used to determine the vocabulary size, as well as for debugging and topic
printing.
`alpha` and `eta` are hyperparameters that affect sparsity of the document-topic
(theta) and topic-word (lambda) distributions. Both default to a symmetric
1.0/num_topics prior.
`alpha` can be set to an explicit array = prior of your choice. It also
support special values of \'asymmetric\' and \'auto\': the former uses a fixed
normalized asymmetric 1.0/topicno prior, the latter learns an asymmetric
prior directly from your data.
`eta` can be a scalar for a symmetric prior over topic/word
distributions, or a vector of shape num_words, which can be used to
impose (user defined) asymmetric priors over the word distribution.
It also supports the special value \'auto\', which learns an asymmetric
prior over words directly from your data. `eta` can also be a matrix
of shape num_topics x num_words, which can be used to impose
asymmetric priors over the word distribution on a per-topic basis
(can not be learned from data).
Turn on `distributed` to force distributed computing (see the `web tutorial <http://radimrehurek.com/gensim/distributed.html>`_
on how to set up a cluster of machines for gensim).
Calculate and log perplexity estimate from the latest mini-batch every
`eval_every` model updates (setting this to 1 slows down training ~2x;
default is 10 for better performance). Set to None to disable perplexity estimation.
`decay` and `offset` parameters are the same as Kappa and Tau_0 in
Hoffman et al, respectively.
`minimum_probability` controls filtering the topics returned for a document (bow).
`random_state` can be a np.random.RandomState object or the seed for one
Example:
>>> lda = LdaModel(corpus, num_topics=100) # train model
>>> print(lda[doc_bow]) # get topic probability distribution for a document
>>> lda.update(corpus2) # update the LDA model with additional documents
>>> print(lda[doc_bow])
>>> lda = LdaModel(corpus, num_topics=50, alpha=\'auto\', eval_every=5) # train asymmetric alpha from data'
| def __init__(self, corpus=None, num_topics=100, id2word=None, distributed=False, chunksize=2000, passes=1, update_every=1, alpha='symmetric', eta=None, decay=0.5, offset=1.0, eval_every=10, iterations=50, gamma_threshold=0.001, minimum_probability=0.01, random_state=None, ns_conf={}, minimum_phi_value=0.01, per_word_topics=False):
| self.id2word = id2word
if ((corpus is None) and (self.id2word is None)):
raise ValueError('at least one of corpus/id2word must be specified, to establish input space dimensionality')
if (self.id2word is None):
logger.warning('no word id mapping provided; initializing from corpus, assuming identity')
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
elif (len(self.id2word) > 0):
self.num_terms = (1 + max(self.id2word.keys()))
else:
self.num_terms = 0
if (self.num_terms == 0):
raise ValueError('cannot compute LDA over an empty collection (no terms)')
self.distributed = bool(distributed)
self.num_topics = int(num_topics)
self.chunksize = chunksize
self.decay = decay
self.offset = offset
self.minimum_probability = minimum_probability
self.num_updates = 0
self.passes = passes
self.update_every = update_every
self.eval_every = eval_every
self.minimum_phi_value = minimum_phi_value
self.per_word_topics = per_word_topics
(self.alpha, self.optimize_alpha) = self.init_dir_prior(alpha, 'alpha')
assert (self.alpha.shape == (self.num_topics,)), ('Invalid alpha shape. Got shape %s, but expected (%d, )' % (str(self.alpha.shape), self.num_topics))
if isinstance(eta, six.string_types):
if (eta == 'asymmetric'):
raise ValueError("The 'asymmetric' option cannot be used for eta")
(self.eta, self.optimize_eta) = self.init_dir_prior(eta, 'eta')
self.random_state = utils.get_random_state(random_state)
assert ((self.eta.shape == (self.num_terms,)) or (self.eta.shape == (self.num_topics, self.num_terms))), ('Invalid eta shape. Got shape %s, but expected (%d, 1) or (%d, %d)' % (str(self.eta.shape), self.num_terms, self.num_topics, self.num_terms))
self.iterations = iterations
self.gamma_threshold = gamma_threshold
if (not distributed):
logger.info('using serial LDA version on this node')
self.dispatcher = None
self.numworkers = 1
else:
if self.optimize_alpha:
raise NotImplementedError('auto-optimizing alpha not implemented in distributed LDA')
try:
import Pyro4
with utils.getNS(**ns_conf) as ns:
from gensim.models.lda_dispatcher import LDA_DISPATCHER_PREFIX
self.dispatcher = Pyro4.Proxy(ns.list(prefix=LDA_DISPATCHER_PREFIX)[LDA_DISPATCHER_PREFIX])
logger.debug(('looking for dispatcher at %s' % str(self.dispatcher._pyroUri)))
self.dispatcher.initialize(id2word=self.id2word, num_topics=self.num_topics, chunksize=chunksize, alpha=alpha, eta=eta, distributed=False)
self.numworkers = len(self.dispatcher.getworkers())
logger.info(('using distributed version with %i workers' % self.numworkers))
except Exception as err:
logger.error('failed to initialize distributed LDA (%s)', err)
raise RuntimeError(('failed to initialize distributed LDA (%s)' % err))
self.state = LdaState(self.eta, (self.num_topics, self.num_terms))
self.state.sstats = self.random_state.gamma(100.0, (1.0 / 100.0), (self.num_topics, self.num_terms))
self.expElogbeta = np.exp(dirichlet_expectation(self.state.sstats))
if (corpus is not None):
use_numpy = (self.dispatcher is not None)
self.update(corpus, chunks_as_numpy=use_numpy)
|
'Clear model state (free up some memory). Used in the distributed algo.'
| def clear(self):
| self.state = None
self.Elogbeta = None
|
'Given a chunk of sparse document vectors, estimate gamma (parameters
controlling the topic weights) for each document in the chunk.
This function does not modify the model (=is read-only aka const). The
whole input chunk of document is assumed to fit in RAM; chunking of a
large corpus must be done earlier in the pipeline.
If `collect_sstats` is True, also collect sufficient statistics needed
to update the model\'s topic-word distributions, and return a 2-tuple
`(gamma, sstats)`. Otherwise, return `(gamma, None)`. `gamma` is of shape
`len(chunk) x self.num_topics`.
Avoids computing the `phi` variational parameter directly using the
optimization presented in **Lee, Seung: Algorithms for non-negative matrix factorization, NIPS 2001**.'
| def inference(self, chunk, collect_sstats=False):
| try:
_ = len(chunk)
except:
chunk = list(chunk)
if (len(chunk) > 1):
logger.debug('performing inference on a chunk of %i documents', len(chunk))
gamma = self.random_state.gamma(100.0, (1.0 / 100.0), (len(chunk), self.num_topics))
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
if collect_sstats:
sstats = np.zeros_like(self.expElogbeta)
else:
sstats = None
converged = 0
for (d, doc) in enumerate(chunk):
if ((len(doc) > 0) and (not isinstance(doc[0][0], (six.integer_types + (np.integer,))))):
ids = [int(id) for (id, _) in doc]
else:
ids = [id for (id, _) in doc]
cts = np.array([cnt for (_, cnt) in doc])
gammad = gamma[d, :]
Elogthetad = Elogtheta[d, :]
expElogthetad = expElogtheta[d, :]
expElogbetad = self.expElogbeta[:, ids]
phinorm = (np.dot(expElogthetad, expElogbetad) + 1e-100)
for _ in xrange(self.iterations):
lastgamma = gammad
gammad = (self.alpha + (expElogthetad * np.dot((cts / phinorm), expElogbetad.T)))
Elogthetad = dirichlet_expectation(gammad)
expElogthetad = np.exp(Elogthetad)
phinorm = (np.dot(expElogthetad, expElogbetad) + 1e-100)
meanchange = np.mean(abs((gammad - lastgamma)))
if (meanchange < self.gamma_threshold):
converged += 1
break
gamma[d, :] = gammad
if collect_sstats:
sstats[:, ids] += np.outer(expElogthetad.T, (cts / phinorm))
if (len(chunk) > 1):
logger.debug('%i/%i documents converged within %i iterations', converged, len(chunk), self.iterations)
if collect_sstats:
sstats *= self.expElogbeta
return (gamma, sstats)
|
'Perform inference on a chunk of documents, and accumulate the collected
sufficient statistics in `state` (or `self.state` if None).'
| def do_estep(self, chunk, state=None):
| if (state is None):
state = self.state
(gamma, sstats) = self.inference(chunk, collect_sstats=True)
state.sstats += sstats
state.numdocs += gamma.shape[0]
return gamma
|
'Update parameters for the Dirichlet prior on the per-document
topic weights `alpha` given the last `gammat`.'
| def update_alpha(self, gammat, rho):
| N = float(len(gammat))
logphat = (sum((dirichlet_expectation(gamma) for gamma in gammat)) / N)
self.alpha = update_dir_prior(self.alpha, N, logphat, rho)
logger.info('optimized alpha %s', list(self.alpha))
return self.alpha
|
'Update parameters for the Dirichlet prior on the per-topic
word weights `eta` given the last `lambdat`.'
| def update_eta(self, lambdat, rho):
| N = float(lambdat.shape[0])
logphat = (sum((dirichlet_expectation(lambda_) for lambda_ in lambdat)) / N).reshape((self.num_terms,))
self.eta = update_dir_prior(self.eta, N, logphat, rho)
return self.eta
|
'Calculate and return per-word likelihood bound, using the `chunk` of
documents as evaluation corpus. Also output the calculated statistics. incl.
perplexity=2^(-bound), to log at INFO level.'
| def log_perplexity(self, chunk, total_docs=None):
| if (total_docs is None):
total_docs = len(chunk)
corpus_words = sum((cnt for document in chunk for (_, cnt) in document))
subsample_ratio = ((1.0 * total_docs) / len(chunk))
perwordbound = (self.bound(chunk, subsample_ratio=subsample_ratio) / (subsample_ratio * corpus_words))
logger.info(('%.3f per-word bound, %.1f perplexity estimate based on a held-out corpus of %i documents with %i words' % (perwordbound, np.exp2((- perwordbound)), len(chunk), corpus_words)))
return perwordbound
|
'Train the model with new documents, by EM-iterating over `corpus` until
the topics converge (or until the maximum number of allowed iterations
is reached). `corpus` must be an iterable (repeatable stream of documents),
In distributed mode, the E step is distributed over a cluster of machines.
This update also supports updating an already trained model (`self`)
with new documents from `corpus`; the two models are then merged in
proportion to the number of old vs. new documents. This feature is still
experimental for non-stationary input streams.
For stationary input (no topic drift in new documents), on the other hand,
this equals the online update of Hoffman et al. and is guaranteed to
converge for any `decay` in (0.5, 1.0>. Additionally, for smaller
`corpus` sizes, an increasing `offset` may be beneficial (see
Table 1 in Hoffman et al.)
Args:
corpus (gensim corpus): The corpus with which the LDA model should be updated.
chunks_as_numpy (bool): Whether each chunk passed to `.inference` should be a np
array of not. np can in some settings turn the term IDs
into floats, these will be converted back into integers in
inference, which incurs a performance hit. For distributed
computing it may be desirable to keep the chunks as np
arrays.
For other parameter settings, see :class:`LdaModel` constructor.'
| def update(self, corpus, chunksize=None, decay=None, offset=None, passes=None, update_every=None, eval_every=None, iterations=None, gamma_threshold=None, chunks_as_numpy=False):
| if (decay is None):
decay = self.decay
if (offset is None):
offset = self.offset
if (passes is None):
passes = self.passes
if (update_every is None):
update_every = self.update_every
if (eval_every is None):
eval_every = self.eval_every
if (iterations is None):
iterations = self.iterations
if (gamma_threshold is None):
gamma_threshold = self.gamma_threshold
try:
lencorpus = len(corpus)
except:
logger.warning('input corpus stream has no len(); counting documents')
lencorpus = sum((1 for _ in corpus))
if (lencorpus == 0):
logger.warning('LdaModel.update() called with an empty corpus')
return
if (chunksize is None):
chunksize = min(lencorpus, self.chunksize)
self.state.numdocs += lencorpus
if update_every:
updatetype = 'online'
if (passes == 1):
updatetype += ' (single-pass)'
else:
updatetype += ' (multi-pass)'
updateafter = min(lencorpus, ((update_every * self.numworkers) * chunksize))
else:
updatetype = 'batch'
updateafter = lencorpus
evalafter = min(lencorpus, (((eval_every or 0) * self.numworkers) * chunksize))
updates_per_pass = max(1, (lencorpus / updateafter))
logger.info('running %s LDA training, %s topics, %i passes over the supplied corpus of %i documents, updating model once every %i documents, evaluating perplexity every %i documents, iterating %ix with a convergence threshold of %f', updatetype, self.num_topics, passes, lencorpus, updateafter, evalafter, iterations, gamma_threshold)
if ((updates_per_pass * passes) < 10):
logger.warning('too few updates, training might not converge; consider increasing the number of passes or iterations to improve accuracy')
def rho():
return pow(((offset + pass_) + (self.num_updates / chunksize)), (- decay))
for pass_ in xrange(passes):
if self.dispatcher:
logger.info(('initializing %s workers' % self.numworkers))
self.dispatcher.reset(self.state)
else:
other = LdaState(self.eta, self.state.sstats.shape)
dirty = False
reallen = 0
for (chunk_no, chunk) in enumerate(utils.grouper(corpus, chunksize, as_numpy=chunks_as_numpy)):
reallen += len(chunk)
if (eval_every and ((reallen == lencorpus) or (((chunk_no + 1) % (eval_every * self.numworkers)) == 0))):
self.log_perplexity(chunk, total_docs=lencorpus)
if self.dispatcher:
logger.info('PROGRESS: pass %i, dispatching documents up to #%i/%i', pass_, ((chunk_no * chunksize) + len(chunk)), lencorpus)
self.dispatcher.putjob(chunk)
else:
logger.info('PROGRESS: pass %i, at document #%i/%i', pass_, ((chunk_no * chunksize) + len(chunk)), lencorpus)
gammat = self.do_estep(chunk, other)
if self.optimize_alpha:
self.update_alpha(gammat, rho())
dirty = True
del chunk
if (update_every and (((chunk_no + 1) % (update_every * self.numworkers)) == 0)):
if self.dispatcher:
logger.info('reached the end of input; now waiting for all remaining jobs to finish')
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, (pass_ > 0))
del other
if self.dispatcher:
logger.info('initializing workers')
self.dispatcher.reset(self.state)
else:
other = LdaState(self.eta, self.state.sstats.shape)
dirty = False
if (reallen != lencorpus):
raise RuntimeError("input corpus size changed during training (don't use generators as input)")
if dirty:
if self.dispatcher:
logger.info('reached the end of input; now waiting for all remaining jobs to finish')
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, (pass_ > 0))
del other
dirty = False
|
'M step: use linear interpolation between the existing topics and
collected sufficient statistics in `other` to update the topics.'
| def do_mstep(self, rho, other, extra_pass=False):
| logger.debug('updating topics')
diff = np.log(self.expElogbeta)
self.state.blend(rho, other)
diff -= self.state.get_Elogbeta()
self.sync_state()
self.print_topics(5)
logger.info('topic diff=%f, rho=%f', np.mean(np.abs(diff)), rho)
if self.optimize_eta:
self.update_eta(self.state.get_lambda(), rho)
if (not extra_pass):
self.num_updates += other.numdocs
|
'Estimate the variational bound of documents from `corpus`:
E_q[log p(corpus)] - E_q[log q(corpus)]
`gamma` are the variational parameters on topic weights for each `corpus`
document (=2d matrix=what comes out of `inference()`).
If not supplied, will be inferred from the model.'
| def bound(self, corpus, gamma=None, subsample_ratio=1.0):
| score = 0.0
_lambda = self.state.get_lambda()
Elogbeta = dirichlet_expectation(_lambda)
for (d, doc) in enumerate(corpus):
if ((d % self.chunksize) == 0):
logger.debug('bound: at document #%i', d)
if (gamma is None):
(gammad, _) = self.inference([doc])
else:
gammad = gamma[d]
Elogthetad = dirichlet_expectation(gammad)
score += np.sum(((cnt * logsumexp((Elogthetad + Elogbeta[:, int(id)]))) for (id, cnt) in doc))
score += np.sum(((self.alpha - gammad) * Elogthetad))
score += np.sum((gammaln(gammad) - gammaln(self.alpha)))
score += (gammaln(np.sum(self.alpha)) - gammaln(np.sum(gammad)))
score *= subsample_ratio
score += np.sum(((self.eta - _lambda) * Elogbeta))
score += np.sum((gammaln(_lambda) - gammaln(self.eta)))
if (np.ndim(self.eta) == 0):
sum_eta = (self.eta * self.num_terms)
else:
sum_eta = np.sum(self.eta)
score += np.sum((gammaln(sum_eta) - gammaln(np.sum(_lambda, 1))))
return score
|
'For `num_topics` number of topics, return `num_words` most significant words
(10 words per topic, by default).
The topics are returned as a list -- a list of strings if `formatted` is
True, or a list of `(word, probability)` 2-tuples if False.
If `log` is True, also output this result to log.
Unlike LSA, there is no natural ordering between the topics in LDA.
The returned `num_topics <= self.num_topics` subset of all topics is therefore
arbitrary and may change between two LDA training runs.'
| def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True):
| if ((num_topics < 0) or (num_topics >= self.num_topics)):
num_topics = self.num_topics
chosen_topics = range(num_topics)
else:
num_topics = min(num_topics, self.num_topics)
sort_alpha = (self.alpha + (0.0001 * self.random_state.rand(len(self.alpha))))
sorted_topics = list(matutils.argsort(sort_alpha))
chosen_topics = (sorted_topics[:(num_topics // 2)] + sorted_topics[((- num_topics) // 2):])
shown = []
topic = self.state.get_lambda()
for i in chosen_topics:
topic_ = topic[i]
topic_ = (topic_ / topic_.sum())
bestn = matutils.argsort(topic_, num_words, reverse=True)
topic_ = [(self.id2word[id], topic_[id]) for id in bestn]
if formatted:
topic_ = ' + '.join([('%.3f*"%s"' % (v, k)) for (k, v) in topic_])
shown.append((i, topic_))
if log:
logger.info('topic #%i (%.3f): %s', i, self.alpha[i], topic_)
return shown
|
'Return a list of `(word, probability)` 2-tuples for the most probable
words in topic `topicid`.
Only return 2-tuples for the topn most probable words (ignore the rest).'
| def show_topic(self, topicid, topn=10):
| return [(self.id2word[id], value) for (id, value) in self.get_topic_terms(topicid, topn)]
|
'Return a list of `(word_id, probability)` 2-tuples for the most
probable words in topic `topicid`.
Only return 2-tuples for the topn most probable words (ignore the rest).'
| def get_topic_terms(self, topicid, topn=10):
| topic = self.state.get_lambda()[topicid]
topic = (topic / topic.sum())
bestn = matutils.argsort(topic, topn, reverse=True)
return [(id, topic[id]) for id in bestn]
|
'Calculate the Umass topic coherence for each topic. Algorithm from
**Mimno, Wallach, Talley, Leenders, McCallum: Optimizing Semantic Coherence in Topic Models, CEMNLP 2011.**'
| def top_topics(self, corpus, num_words=20):
| (is_corpus, corpus) = utils.is_corpus(corpus)
if (not is_corpus):
logger.warning('LdaModel.top_topics() called with an empty corpus')
return
topics = []
str_topics = []
for topic in self.state.get_lambda():
topic = (topic / topic.sum())
bestn = matutils.argsort(topic, topn=num_words, reverse=True)
topics.append(bestn)
beststr = [(topic[id], self.id2word[id]) for id in bestn]
str_topics.append(beststr)
top_ids = set(chain.from_iterable(topics))
doc_word_list = {}
for id in top_ids:
id_list = set()
for (n, document) in enumerate(corpus):
if (id in frozenset((x[0] for x in document))):
id_list.add(n)
doc_word_list[id] = id_list
coherence_scores = []
for (t, top_words) in enumerate(topics):
coherence = 0.0
for m in top_words[1:]:
m_docs = doc_word_list[m]
m_index = np.where((top_words == m))[0][0]
for l in top_words[:m_index]:
l_docs = doc_word_list[l]
if (len(l_docs) > 0):
co_doc_frequency = len(m_docs.intersection(l_docs))
coherence += np.log(((co_doc_frequency + 1.0) / len(l_docs)))
coherence_scores.append((str_topics[t], coherence))
top_topics = sorted(coherence_scores, key=(lambda t: t[1]), reverse=True)
return top_topics
|
'Return topic distribution for the given document `bow`, as a list of
(topic_id, topic_probability) 2-tuples.
Ignore topics with very low probability (below `minimum_probability`).
If per_word_topics is True, it also returns a list of topics, sorted in descending order of most likely topics for that word.
It also returns a list of word_ids and each words corresponding topics\' phi_values, multiplied by feature length (i.e, word count)'
| def get_document_topics(self, bow, minimum_probability=None, minimum_phi_value=None, per_word_topics=False):
| if (minimum_probability is None):
minimum_probability = self.minimum_probability
minimum_probability = max(minimum_probability, 1e-08)
if (minimum_phi_value is None):
minimum_phi_value = self.minimum_probability
minimum_phi_value = max(minimum_phi_value, 1e-08)
(is_corpus, corpus) = utils.is_corpus(bow)
if is_corpus:
kwargs = dict(per_word_topics=per_word_topics, minimum_probability=minimum_probability, minimum_phi_value=minimum_phi_value)
return self._apply(corpus, **kwargs)
(gamma, phis) = self.inference([bow], collect_sstats=per_word_topics)
topic_dist = (gamma[0] / sum(gamma[0]))
document_topics = [(topicid, topicvalue) for (topicid, topicvalue) in enumerate(topic_dist) if (topicvalue >= minimum_probability)]
if (not per_word_topics):
return document_topics
else:
word_topic = []
word_phi = []
for (word_type, weight) in bow:
phi_values = []
phi_topic = []
for topic_id in range(0, self.num_topics):
if (phis[topic_id][word_type] >= minimum_phi_value):
phi_values.append((phis[topic_id][word_type], topic_id))
phi_topic.append((topic_id, phis[topic_id][word_type]))
word_phi.append((word_type, phi_topic))
sorted_phi_values = sorted(phi_values, reverse=True)
topics_sorted = [x[1] for x in sorted_phi_values]
word_topic.append((word_type, topics_sorted))
return (document_topics, word_topic, word_phi)
|
'Returns most likely topics for a particular word in vocab.'
| def get_term_topics(self, word_id, minimum_probability=None):
| if (minimum_probability is None):
minimum_probability = self.minimum_probability
minimum_probability = max(minimum_probability, 1e-08)
if isinstance(word_id, str):
word_id = self.id2word.doc2bow([word_id])[0][0]
values = []
for topic_id in range(0, self.num_topics):
if (self.expElogbeta[topic_id][word_id] >= minimum_probability):
values.append((topic_id, self.expElogbeta[topic_id][word_id]))
return values
|
'Calculate difference topic2topic between two Lda models
`other` instances of `LdaMulticore` or `LdaModel`
`distance` is function that will be applied to calculate difference between any topic pair.
Available values: `kullback_leibler`, `hellinger` and `jaccard`
`num_words` is quantity of most relevant words that used if distance == `jaccard` (also used for annotation)
`n_ann_terms` is max quantity of words in intersection/symmetric difference between topics (used for annotation)
`diagonal` set to True if the difference is required only between the identical topic no.s (returns diagonal of diff matrix)
`annotation` whether the intersection or difference of words between two topics should be returned
Returns a matrix Z with shape (m1.num_topics, m2.num_topics), where Z[i][j] - difference between topic_i and topic_j
and matrix annotation (if True) with shape (m1.num_topics, m2.num_topics, 2, None),
where:
annotation[i][j] = [[`int_1`, `int_2`, ...], [`diff_1`, `diff_2`, ...]] and
`int_k` is word from intersection of `topic_i` and `topic_j` and
`diff_l` is word from symmetric difference of `topic_i` and `topic_j`
`normed` is a flag. If `true`, matrix Z will be normalized
Example:
>>> m1, m2 = LdaMulticore.load(path_1), LdaMulticore.load(path_2)
>>> mdiff, annotation = m1.diff(m2)
>>> print(mdiff) # get matrix with difference for each topic pair from `m1` and `m2`
>>> print(annotation) # get array with positive/negative words for each topic pair from `m1` and `m2`'
| def diff(self, other, distance='kullback_leibler', num_words=100, n_ann_terms=10, diagonal=False, annotation=True, normed=True):
| distances = {'kullback_leibler': kullback_leibler, 'hellinger': hellinger, 'jaccard': jaccard_distance}
if (distance not in distances):
valid_keys = ', '.join(('`{}`'.format(x) for x in distances.keys()))
raise ValueError('Incorrect distance, valid only {}'.format(valid_keys))
if (not isinstance(other, self.__class__)):
raise ValueError('The parameter `other` must be of type `{}`'.format(self.__name__))
distance_func = distances[distance]
(d1, d2) = (self.state.get_lambda(), other.state.get_lambda())
(t1_size, t2_size) = (d1.shape[0], d2.shape[0])
annotation_terms = None
fst_topics = [{w for (w, _) in self.show_topic(topic, topn=num_words)} for topic in xrange(t1_size)]
snd_topics = [{w for (w, _) in other.show_topic(topic, topn=num_words)} for topic in xrange(t2_size)]
if (distance == 'jaccard'):
(d1, d2) = (fst_topics, snd_topics)
if diagonal:
assert (t1_size == t2_size), 'Both input models should have same no. of topics, as the diagonal will only be valid in a square matrix'
z = np.zeros(t1_size)
if annotation:
annotation_terms = np.zeros(t1_size, dtype=list)
else:
z = np.zeros((t1_size, t2_size))
if annotation:
annotation_terms = np.zeros((t1_size, t2_size), dtype=list)
for topic in np.ndindex(z.shape):
topic1 = topic[0]
if diagonal:
topic2 = topic1
else:
topic2 = topic[1]
z[topic] = distance_func(d1[topic1], d2[topic2])
if annotation:
pos_tokens = (fst_topics[topic1] & snd_topics[topic2])
neg_tokens = fst_topics[topic1].symmetric_difference(snd_topics[topic2])
pos_tokens = sample(pos_tokens, min(len(pos_tokens), n_ann_terms))
neg_tokens = sample(neg_tokens, min(len(neg_tokens), n_ann_terms))
annotation_terms[topic] = [pos_tokens, neg_tokens]
if normed:
if (np.abs(np.max(z)) > 1e-08):
z /= np.max(z)
return (z, annotation_terms)
|
'Return topic distribution for the given document `bow`, as a list of
(topic_id, topic_probability) 2-tuples.
Ignore topics with very low probability (below `eps`).'
| def __getitem__(self, bow, eps=None):
| return self.get_document_topics(bow, eps, self.minimum_phi_value, self.per_word_topics)
|
'Save the model to file.
Large internal arrays may be stored into separate files, with `fname` as prefix.
`separately` can be used to define which arrays should be stored in separate files.
`ignore` parameter can be used to define which variables should be ignored, i.e. left
out from the pickled lda model. By default the internal `state` is ignored as it uses
its own serialisation not the one provided by `LdaModel`. The `state` and `dispatcher`
will be added to any ignore parameter defined.
Note: do not save as a compressed file if you intend to load the file back with `mmap`.
Note: If you intend to use models across Python 2/3 versions there are a few things to
keep in mind:
1. The pickled Python dictionaries will not work across Python versions
2. The `save` method does not automatically save all np arrays using np, only
those ones that exceed `sep_limit` set in `gensim.utils.SaveLoad.save`. The main
concern here is the `alpha` array if for instance using `alpha=\'auto\'`.
Please refer to the wiki recipes section (https://github.com/piskvorky/gensim/wiki/Recipes-&-FAQ#q9-how-do-i-load-a-model-in-python-3-that-was-trained-and-saved-using-python-2)
for an example on how to work around these issues.'
| def save(self, fname, ignore=['state', 'dispatcher'], separately=None, *args, **kwargs):
| if (self.state is not None):
self.state.save(utils.smart_extension(fname, '.state'), *args, **kwargs)
if ('id2word' not in ignore):
utils.pickle(self.id2word, utils.smart_extension(fname, '.id2word'))
if ((ignore is not None) and ignore):
if isinstance(ignore, six.string_types):
ignore = [ignore]
ignore = [e for e in ignore if e]
ignore = list((set(['state', 'dispatcher', 'id2word']) | set(ignore)))
else:
ignore = ['state', 'dispatcher', 'id2word']
separately_explicit = ['expElogbeta', 'sstats']
if ((isinstance(self.alpha, six.string_types) and (self.alpha == 'auto')) or (isinstance(self.alpha, np.ndarray) and (len(self.alpha.shape) != 1))):
separately_explicit.append('alpha')
if ((isinstance(self.eta, six.string_types) and (self.eta == 'auto')) or (isinstance(self.eta, np.ndarray) and (len(self.eta.shape) != 1))):
separately_explicit.append('eta')
if separately:
if isinstance(separately, six.string_types):
separately = [separately]
separately = [e for e in separately if e]
separately = list((set(separately_explicit) | set(separately)))
else:
separately = separately_explicit
super(LdaModel, self).save(fname, ignore=ignore, separately=separately, *args, **kwargs)
|
'Load a previously saved object from file (also see `save`).
Large arrays can be memmap\'ed back as read-only (shared memory) by setting `mmap=\'r\'`:
>>> LdaModel.load(fname, mmap=\'r\')'
| @classmethod
def load(cls, fname, *args, **kwargs):
| kwargs['mmap'] = kwargs.get('mmap', None)
result = super(LdaModel, cls).load(fname, *args, **kwargs)
if (not hasattr(result, 'random_state')):
result.random_state = utils.get_random_state(None)
logging.warning('random_state not set so using default value')
state_fname = utils.smart_extension(fname, '.state')
try:
result.state = super(LdaModel, cls).load(state_fname, *args, **kwargs)
except Exception as e:
logging.warning('failed to load state from %s: %s', state_fname, e)
id2word_fname = utils.smart_extension(fname, '.id2word')
if os.path.isfile(id2word_fname):
try:
result.id2word = utils.unpickle(id2word_fname)
except Exception as e:
logging.warning('failed to load id2word dictionary from %s: %s', id2word_fname, e)
return result
|
'Note that the constructor does not fully initialize the dispatcher;
use the `initialize()` function to populate it with workers etc.'
| def __init__(self, maxsize=0):
| self.maxsize = maxsize
self.workers = {}
self.callback = None
|
'`model_params` are parameters used to initialize individual workers (gets
handed all the way down to worker.initialize()).'
| @Pyro4.expose
def initialize(self, **model_params):
| self.jobs = Queue(maxsize=self.maxsize)
self.lock_update = threading.Lock()
self._jobsdone = 0
self._jobsreceived = 0
self.workers = {}
with utils.getNS() as ns:
self.callback = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher')
for (name, uri) in iteritems(ns.list(prefix='gensim.lsi_worker')):
try:
worker = Pyro4.Proxy(uri)
workerid = len(self.workers)
logger.info(('registering worker #%i from %s' % (workerid, uri)))
worker.initialize(workerid, dispatcher=self.callback, **model_params)
self.workers[workerid] = worker
except Pyro4.errors.PyroError:
logger.exception(('unresponsive worker at %s, deleting it from the name server' % uri))
ns.remove(name)
if (not self.workers):
raise RuntimeError('no workers found; run some lsi_worker scripts on your machines first!')
|
'Return pyro URIs of all registered workers.'
| @Pyro4.expose
def getworkers(self):
| return [worker._pyroUri for worker in itervalues(self.workers)]
|
'Merge projections from across all workers and return the final projection.'
| @Pyro4.expose
def getstate(self):
| logger.info('end of input, assigning all remaining jobs')
logger.debug(('jobs done: %s, jobs received: %s' % (self._jobsdone, self._jobsreceived)))
while (self._jobsdone < self._jobsreceived):
time.sleep(0.5)
logger.info(('merging states from %i workers' % len(self.workers)))
workers = list(self.workers.items())
result = workers[0][1].getstate()
for (workerid, worker) in workers[1:]:
logger.info(('pulling state from worker %s' % workerid))
result.merge(worker.getstate())
logger.info('sending out merged projection')
return result
|
'Initialize all workers for a new decomposition.'
| @Pyro4.expose
def reset(self):
| for (workerid, worker) in iteritems(self.workers):
logger.info(('resetting worker %s' % workerid))
worker.reset()
worker.requestjob()
self._jobsdone = 0
self._jobsreceived = 0
|
'A worker has finished its job. Log this event and then asynchronously
transfer control back to the worker.
In this way, control flow basically oscillates between dispatcher.jobdone()
worker.requestjob().'
| @Pyro4.expose
@Pyro4.oneway
@utils.synchronous('lock_update')
def jobdone(self, workerid):
| self._jobsdone += 1
logger.info(('worker #%s finished job #%i' % (workerid, self._jobsdone)))
worker = self.workers[workerid]
worker.requestjob()
|
'Wrap self._jobsdone, needed for remote access through proxies'
| def jobsdone(self):
| return self._jobsdone
|
'Terminate all registered workers and then the dispatcher.'
| @Pyro4.oneway
def exit(self):
| for (workerid, worker) in iteritems(self.workers):
logger.info(('terminating worker %s' % workerid))
worker.exit()
logger.info('terminating dispatcher')
os._exit(0)
|
'`normalize` dictates whether the resulting vectors will be
set to unit length.'
| def __init__(self, corpus, id2word=None, normalize=True):
| self.normalize = normalize
self.n_docs = 0
self.n_words = 0
self.entr = {}
if (corpus is not None):
self.initialize(corpus)
|
'Initialize internal statistics based on a training corpus. Called
automatically from the constructor.'
| def initialize(self, corpus):
| logger.info('calculating counts')
glob_freq = {}
(glob_num_words, doc_no) = (0, (-1))
for (doc_no, bow) in enumerate(corpus):
if ((doc_no % 10000) == 0):
logger.info(('PROGRESS: processing document #%i' % doc_no))
glob_num_words += len(bow)
for (term_id, term_count) in bow:
glob_freq[term_id] = (glob_freq.get(term_id, 0) + term_count)
self.n_docs = (doc_no + 1)
self.n_words = glob_num_words
logger.info(('calculating global log entropy weights for %i documents and %i features (%i matrix non-zeros)' % (self.n_docs, len(glob_freq), self.n_words)))
logger.debug('iterating over corpus')
for (doc_no2, bow) in enumerate(corpus):
for (key, freq) in bow:
p = ((float(freq) / glob_freq[key]) * math.log((float(freq) / glob_freq[key])))
self.entr[key] = (self.entr.get(key, 0.0) + p)
if (doc_no2 != doc_no):
raise ValueError("LogEntropyModel doesn't support generators as training data")
logger.debug('iterating over keys')
for key in self.entr:
self.entr[key] = (1 + (self.entr[key] / math.log((self.n_docs + 1))))
|
'Return log entropy representation of the input vector and/or corpus.'
| def __getitem__(self, bow):
| (is_corpus, bow) = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
vector = [(term_id, (math.log((tf + 1)) * self.entr.get(term_id))) for (term_id, tf) in bow if (term_id in self.entr)]
if self.normalize:
vector = matutils.unitvec(vector)
return vector
|
'Note that the constructor does not fully initialize the dispatcher;
use the `initialize()` function to populate it with workers etc.'
| def __init__(self, maxsize=MAX_JOBS_QUEUE, ns_conf={}):
| self.maxsize = maxsize
self.callback = None
self.ns_conf = ns_conf
|
'`model_params` are parameters used to initialize individual workers (gets
handed all the way down to `worker.initialize()`).'
| @Pyro4.expose
def initialize(self, **model_params):
| self.jobs = Queue(maxsize=self.maxsize)
self.lock_update = threading.Lock()
self._jobsdone = 0
self._jobsreceived = 0
self.workers = {}
with utils.getNS(**self.ns_conf) as ns:
self.callback = Pyro4.Proxy(ns.list(prefix=LDA_DISPATCHER_PREFIX)[LDA_DISPATCHER_PREFIX])
for (name, uri) in iteritems(ns.list(prefix=LDA_WORKER_PREFIX)):
try:
worker = Pyro4.Proxy(uri)
workerid = len(self.workers)
logger.info(('registering worker #%i at %s' % (workerid, uri)))
worker.initialize(workerid, dispatcher=self.callback, **model_params)
self.workers[workerid] = worker
except Pyro4.errors.PyroError:
logger.warning(('unresponsive worker at %s, deleting it from the name server' % uri))
ns.remove(name)
if (not self.workers):
raise RuntimeError('no workers found; run some lda_worker scripts on your machines first!')
|
'Return pyro URIs of all registered workers.'
| @Pyro4.expose
def getworkers(self):
| return [worker._pyroUri for worker in itervalues(self.workers)]
|
'Merge states from across all workers and return the result.'
| @Pyro4.expose
def getstate(self):
| logger.info('end of input, assigning all remaining jobs')
logger.debug(('jobs done: %s, jobs received: %s' % (self._jobsdone, self._jobsreceived)))
while (self._jobsdone < self._jobsreceived):
time.sleep(0.5)
logger.info(('merging states from %i workers' % len(self.workers)))
workers = list(self.workers.values())
result = workers[0].getstate()
for worker in workers[1:]:
result.merge(worker.getstate())
logger.info('sending out merged state')
return result
|
'Initialize all workers for a new EM iterations.'
| @Pyro4.expose
def reset(self, state):
| for (workerid, worker) in iteritems(self.workers):
logger.info(('resetting worker %s' % workerid))
worker.reset(state)
worker.requestjob()
self._jobsdone = 0
self._jobsreceived = 0
|
'A worker has finished its job. Log this event and then asynchronously
transfer control back to the worker.
In this way, control flow basically oscillates between `dispatcher.jobdone()`
and `worker.requestjob()`.'
| @Pyro4.expose
@Pyro4.oneway
@utils.synchronous('lock_update')
def jobdone(self, workerid):
| self._jobsdone += 1
logger.info(('worker #%s finished job #%i' % (workerid, self._jobsdone)))
self.workers[workerid].requestjob()
|
'Wrap self._jobsdone, needed for remote access through Pyro proxies'
| def jobsdone(self):
| return self._jobsdone
|
'Terminate all registered workers and then the dispatcher.'
| @Pyro4.oneway
def exit(self):
| for (workerid, worker) in iteritems(self.workers):
logger.info(('terminating worker %s' % workerid))
worker.exit()
logger.info('terminating dispatcher')
os._exit(0)
|
'Request jobs from the dispatcher, in a perpetual loop until `getstate()` is called.'
| @Pyro4.expose
@Pyro4.oneway
def requestjob(self):
| if (self.model is None):
raise RuntimeError('worker must be initialized before receiving jobs')
job = None
while ((job is None) and (not self.finished)):
try:
job = self.dispatcher.getjob(self.myid)
except Queue.Empty:
continue
if (job is not None):
logger.info(('worker #%s received job #%i' % (self.myid, self.jobsdone)))
self.processjob(job)
self.dispatcher.jobdone(self.myid)
else:
logger.info(('worker #%i stopping asking for jobs' % self.myid))
|
'If the iterable corpus and one of author2doc/doc2author dictionaries are given,
start training straight away. If not given, the model is left untrained
(presumably because you want to call the `update` method manually).
`num_topics` is the number of requested latent topics to be extracted from
the training corpus.
`id2word` is a mapping from word ids (integers) to words (strings). It is
used to determine the vocabulary size, as well as for debugging and topic
printing.
`author2doc` is a dictionary where the keys are the names of authors, and the
values are lists of documents that the author contributes to.
`doc2author` is a dictionary where the keys are document IDs (indexes to corpus)
and the values are lists of author names. I.e. this is the reverse mapping of
`author2doc`. Only one of the two, `author2doc` and `doc2author` have to be
supplied.
`passes` is the number of times the model makes a pass over the entire trianing
data.
`iterations` is the maximum number of times the model loops over each document
(M-step). The iterations stop when convergence is reached.
`chunksize` controls the size of the mini-batches.
`alpha` and `eta` are hyperparameters that affect sparsity of the author-topic
(theta) and topic-word (lambda) distributions. Both default to a symmetric
1.0/num_topics prior.
`alpha` can be set to an explicit array = prior of your choice. It also
support special values of \'asymmetric\' and \'auto\': the former uses a fixed
normalized asymmetric 1.0/topicno prior, the latter learns an asymmetric
prior directly from your data.
`eta` can be a scalar for a symmetric prior over topic/word
distributions, or a vector of shape num_words, which can be used to
impose (user defined) asymmetric priors over the word distribution.
It also supports the special value \'auto\', which learns an asymmetric
prior over words directly from your data. `eta` can also be a matrix
of shape num_topics x num_words, which can be used to impose
asymmetric priors over the word distribution on a per-topic basis
(can not be learned from data).
Calculate and log perplexity estimate from the latest mini-batch every
`eval_every` model updates. Set to None to disable perplexity estimation.
`decay` and `offset` parameters are the same as Kappa and Tau_0 in
Hoffman et al, respectively. `decay` controls how quickly old documents are
forgotten, while `offset` down-weights early iterations.
`minimum_probability` controls filtering the topics returned for a document (bow).
`random_state` can be an integer or a numpy.random.RandomState object. Set the
state of the random number generator inside the author-topic model, to ensure
reproducibility of your experiments, for example.
`serialized` indicates whether the input corpora to the model are simple
in-memory lists (`serialized = False`) or saved to the hard-drive
(`serialized = True`). Note that this behaviour is quite different from
other Gensim models. If your data is too large to fit in to memory, use
this functionality. Note that calling `AuthorTopicModel.update` with new
data may be cumbersome as it requires all the existing data to be
re-serialized.
`serialization_path` must be set to a filepath, if `serialized = True` is
used. Use, for example, `serialization_path = /tmp/serialized_model.mm` or use your
working directory by setting `serialization_path = serialized_model.mm`. An existing
file *cannot* be overwritten; either delete the old file or choose a different
name.
Example:
>>> model = AuthorTopicModel(corpus, num_topics=100, author2doc=author2doc, id2word=id2word) # train model
>>> model.update(corpus2) # update the author-topic model with additional documents
>>> model = AuthorTopicModel(corpus, num_topics=50, author2doc=author2doc, id2word=id2word, alpha=\'auto\', eval_every=5) # train asymmetric alpha from data'
| def __init__(self, corpus=None, num_topics=100, id2word=None, author2doc=None, doc2author=None, chunksize=2000, passes=1, iterations=50, decay=0.5, offset=1.0, alpha='symmetric', eta='symmetric', update_every=1, eval_every=10, gamma_threshold=0.001, serialized=False, serialization_path=None, minimum_probability=0.01, random_state=None):
| distributed = False
self.dispatcher = None
self.numworkers = 1
self.id2word = id2word
if ((corpus is None) and (self.id2word is None)):
raise ValueError('at least one of corpus/id2word must be specified, to establish input space dimensionality')
if (self.id2word is None):
logger.warning('no word id mapping provided; initializing from corpus, assuming identity')
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
elif (len(self.id2word) > 0):
self.num_terms = (1 + max(self.id2word.keys()))
else:
self.num_terms = 0
if (self.num_terms == 0):
raise ValueError('cannot compute the author-topic model over an empty collection (no terms)')
logger.info('Vocabulary consists of %d words.', self.num_terms)
self.author2doc = {}
self.doc2author = {}
self.distributed = distributed
self.num_topics = num_topics
self.num_authors = 0
self.chunksize = chunksize
self.decay = decay
self.offset = offset
self.minimum_probability = minimum_probability
self.num_updates = 0
self.total_docs = 0
self.passes = passes
self.update_every = update_every
self.eval_every = eval_every
self.author2id = {}
self.id2author = {}
self.serialized = serialized
if (serialized and (not serialization_path)):
raise ValueError('If serialized corpora are used, a the path to a folder where the corpus should be saved must be provided (serialized_path).')
if (serialized and serialization_path):
assert (not isfile(serialization_path)), 'A file already exists at the serialization_path path; choose a different serialization_path, or delete the file.'
self.serialization_path = serialization_path
self.init_empty_corpus()
(self.alpha, self.optimize_alpha) = self.init_dir_prior(alpha, 'alpha')
assert (self.alpha.shape == (self.num_topics,)), ('Invalid alpha shape. Got shape %s, but expected (%d, )' % (str(self.alpha.shape), self.num_topics))
if isinstance(eta, six.string_types):
if (eta == 'asymmetric'):
raise ValueError("The 'asymmetric' option cannot be used for eta")
(self.eta, self.optimize_eta) = self.init_dir_prior(eta, 'eta')
self.random_state = utils.get_random_state(random_state)
assert ((self.eta.shape == (self.num_terms,)) or (self.eta.shape == (self.num_topics, self.num_terms))), ('Invalid eta shape. Got shape %s, but expected (%d, 1) or (%d, %d)' % (str(self.eta.shape), self.num_terms, self.num_topics, self.num_terms))
self.iterations = iterations
self.gamma_threshold = gamma_threshold
self.state = AuthorTopicState(self.eta, (self.num_topics, self.num_terms), (self.num_authors, self.num_topics))
self.state.sstats = self.random_state.gamma(100.0, (1.0 / 100.0), (self.num_topics, self.num_terms))
self.expElogbeta = np.exp(dirichlet_expectation(self.state.sstats))
if ((corpus is not None) and ((author2doc is not None) or (doc2author is not None))):
use_numpy = (self.dispatcher is not None)
self.update(corpus, author2doc, doc2author, chunks_as_numpy=use_numpy)
|
'Initialize an empty corpus. If the corpora are to be treated as lists, simply
initialize an empty list. If serialization is used, initialize an empty corpus
of the class `gensim.corpora.MmCorpus`.'
| def init_empty_corpus(self):
| if self.serialized:
MmCorpus.serialize(self.serialization_path, [])
self.corpus = MmCorpus(self.serialization_path)
else:
self.corpus = []
|
'Add new documents in `corpus` to `self.corpus`. If serialization is used,
then the entire corpus (`self.corpus`) is re-serialized and the new documents
are added in the process. If serialization is not used, the corpus, as a list
of documents, is simply extended.'
| def extend_corpus(self, corpus):
| if self.serialized:
if isinstance(corpus, MmCorpus):
assert (self.corpus.input != corpus.input), 'Input corpus cannot have the same file path as the model corpus (serialization_path).'
corpus_chain = chain(self.corpus, corpus)
copyfile(self.serialization_path, (self.serialization_path + '.tmp'))
self.corpus.input = (self.serialization_path + '.tmp')
MmCorpus.serialize(self.serialization_path, corpus_chain)
self.corpus = MmCorpus(self.serialization_path)
remove((self.serialization_path + '.tmp'))
else:
assert isinstance(corpus, list), 'If serialized == False, all input corpora must be lists.'
self.corpus.extend(corpus)
|
'Efficiently computes the normalizing factor in phi.'
| def compute_phinorm(self, ids, authors_d, expElogthetad, expElogbetad):
| phinorm = np.zeros(len(ids))
expElogtheta_sum = expElogthetad.sum(axis=0)
phinorm = (expElogtheta_sum.dot(expElogbetad) + 1e-100)
return phinorm
|
'Given a chunk of sparse document vectors, update gamma (parameters
controlling the topic weights) for each author corresponding to the
documents in the chunk.
The whole input chunk of document is assumed to fit in RAM; chunking of
a large corpus must be done earlier in the pipeline.
If `collect_sstats` is True, also collect sufficient statistics needed
to update the model\'s topic-word distributions, and return a 2-tuple
`(gamma_chunk, sstats)`. Otherwise, return `(gamma_chunk, None)`.
`gamma_cunk` is of shape `len(chunk_authors) x self.num_topics`, where
`chunk_authors` is the number of authors in the documents in the
current chunk.
Avoids computing the `phi` variational parameter directly using the
optimization presented in **Lee, Seung: Algorithms for non-negative matrix factorization, NIPS 2001**.'
| def inference(self, chunk, author2doc, doc2author, rhot, collect_sstats=False, chunk_doc_idx=None):
| try:
_ = len(chunk)
except:
chunk = list(chunk)
if (len(chunk) > 1):
logger.debug('performing inference on a chunk of %i documents', len(chunk))
if collect_sstats:
sstats = np.zeros_like(self.expElogbeta)
else:
sstats = None
converged = 0
gamma_chunk = np.zeros((0, self.num_topics))
for (d, doc) in enumerate(chunk):
if (chunk_doc_idx is not None):
doc_no = chunk_doc_idx[d]
else:
doc_no = d
if (doc and (not isinstance(doc[0][0], (six.integer_types + (np.integer,))))):
ids = [int(id) for (id, _) in doc]
else:
ids = [id for (id, _) in doc]
cts = np.array([cnt for (_, cnt) in doc])
authors_d = [self.author2id[a] for a in self.doc2author[doc_no]]
gammad = self.state.gamma[authors_d, :]
tilde_gamma = gammad.copy()
Elogthetad = dirichlet_expectation(tilde_gamma)
expElogthetad = np.exp(Elogthetad)
expElogbetad = self.expElogbeta[:, ids]
phinorm = self.compute_phinorm(ids, authors_d, expElogthetad, expElogbetad)
for iteration in xrange(self.iterations):
lastgamma = tilde_gamma.copy()
for (ai, a) in enumerate(authors_d):
tilde_gamma[ai, :] = (self.alpha + ((len(self.author2doc[self.id2author[a]]) * expElogthetad[ai, :]) * np.dot((cts / phinorm), expElogbetad.T)))
tilde_gamma = (((1 - rhot) * gammad) + (rhot * tilde_gamma))
Elogthetad = dirichlet_expectation(tilde_gamma)
expElogthetad = np.exp(Elogthetad)
phinorm = self.compute_phinorm(ids, authors_d, expElogthetad, expElogbetad)
meanchange_gamma = np.mean(abs((tilde_gamma - lastgamma)))
gamma_condition = (meanchange_gamma < self.gamma_threshold)
if gamma_condition:
converged += 1
break
self.state.gamma[authors_d, :] = tilde_gamma
gamma_chunk = np.vstack([gamma_chunk, tilde_gamma])
if collect_sstats:
expElogtheta_sum_a = expElogthetad.sum(axis=0)
sstats[:, ids] += np.outer(expElogtheta_sum_a.T, (cts / phinorm))
if (len(chunk) > 1):
logger.debug('%i/%i documents converged within %i iterations', converged, len(chunk), self.iterations)
if collect_sstats:
sstats *= self.expElogbeta
return (gamma_chunk, sstats)
|
'Perform inference on a chunk of documents, and accumulate the collected
sufficient statistics in `state` (or `self.state` if None).'
| def do_estep(self, chunk, author2doc, doc2author, rhot, state=None, chunk_doc_idx=None):
| if (state is None):
state = self.state
(gamma, sstats) = self.inference(chunk, author2doc, doc2author, rhot, collect_sstats=True, chunk_doc_idx=chunk_doc_idx)
state.sstats += sstats
state.numdocs += len(chunk)
return gamma
|
'Calculate and return per-word likelihood bound, using the `chunk` of
documents as evaluation corpus. Also output the calculated statistics. incl.
perplexity=2^(-bound), to log at INFO level.'
| def log_perplexity(self, chunk, chunk_doc_idx=None, total_docs=None):
| if (total_docs is None):
total_docs = len(chunk)
corpus_words = sum((cnt for document in chunk for (_, cnt) in document))
subsample_ratio = ((1.0 * total_docs) / len(chunk))
perwordbound = (self.bound(chunk, chunk_doc_idx, subsample_ratio=subsample_ratio) / (subsample_ratio * corpus_words))
logger.info(('%.3f per-word bound, %.1f perplexity estimate based on a corpus of %i documents with %i words' % (perwordbound, np.exp2((- perwordbound)), len(chunk), corpus_words)))
return perwordbound
|
'Train the model with new documents, by EM-iterating over `corpus` until
the topics converge (or until the maximum number of allowed iterations
is reached). `corpus` must be an iterable (repeatable stream of documents),
This update also supports updating an already trained model (`self`)
with new documents from `corpus`; the two models are then merged in
proportion to the number of old vs. new documents. This feature is still
experimental for non-stationary input streams.
For stationary input (no topic drift in new documents), on the other hand,
this equals the online update of Hoffman et al. and is guaranteed to
converge for any `decay` in (0.5, 1.0>. Additionally, for smaller
`corpus` sizes, an increasing `offset` may be beneficial (see
Table 1 in Hoffman et al.)
If update is called with authors that already exist in the model, it will
resume training on not only new documents for that author, but also the
previously seen documents. This is necessary for those authors\' topic
distributions to converge.
Every time `update(corpus, author2doc)` is called, the new documents are
to appended to all the previously seen documents, and author2doc is
combined with the previously seen authors.
To resume training on all the data seen by the model, simply call
`update()`.
It is not possible to add new authors to existing documents, as all
documents in `corpus` are assumed to be new documents.
Args:
corpus (gensim corpus): The corpus with which the author-topic model should be updated.
author2doc (dictionary): author to document mapping corresponding to indexes in input
corpus.
doc2author (dictionary): document to author mapping corresponding to indexes in input
corpus.
chunks_as_numpy (bool): Whether each chunk passed to `.inference` should be a np
array of not. np can in some settings turn the term IDs
into floats, these will be converted back into integers in
inference, which incurs a performance hit. For distributed
computing it may be desirable to keep the chunks as np
arrays.
For other parameter settings, see :class:`AuthorTopicModel` constructor.'
| def update(self, corpus=None, author2doc=None, doc2author=None, chunksize=None, decay=None, offset=None, passes=None, update_every=None, eval_every=None, iterations=None, gamma_threshold=None, chunks_as_numpy=False):
| if (decay is None):
decay = self.decay
if (offset is None):
offset = self.offset
if (passes is None):
passes = self.passes
if (update_every is None):
update_every = self.update_every
if (eval_every is None):
eval_every = self.eval_every
if (iterations is None):
iterations = self.iterations
if (gamma_threshold is None):
gamma_threshold = self.gamma_threshold
author2doc = deepcopy(author2doc)
doc2author = deepcopy(doc2author)
if (corpus is None):
assert (self.total_docs > 0), 'update() was called with no documents to train on.'
train_corpus_idx = [d for d in xrange(self.total_docs)]
num_input_authors = len(self.author2doc)
else:
if ((doc2author is None) and (author2doc is None)):
raise ValueError('at least one of author2doc/doc2author must be specified, to establish input space dimensionality')
if (doc2author is None):
doc2author = construct_doc2author(corpus, author2doc)
elif (author2doc is None):
author2doc = construct_author2doc(corpus, doc2author)
num_input_authors = len(author2doc)
try:
len_input_corpus = len(corpus)
except:
logger.warning('input corpus stream has no len(); counting documents')
len_input_corpus = sum((1 for _ in corpus))
if (len_input_corpus == 0):
logger.warning('AuthorTopicModel.update() called with an empty corpus')
return
self.total_docs += len_input_corpus
self.extend_corpus(corpus)
new_authors = []
for a in sorted(author2doc.keys()):
if (not self.author2doc.get(a)):
new_authors.append(a)
num_new_authors = len(new_authors)
for (a_id, a_name) in enumerate(new_authors):
self.author2id[a_name] = (a_id + self.num_authors)
self.id2author[(a_id + self.num_authors)] = a_name
self.num_authors += num_new_authors
gamma_new = self.random_state.gamma(100.0, (1.0 / 100.0), (num_new_authors, self.num_topics))
self.state.gamma = np.vstack([self.state.gamma, gamma_new])
for (a, doc_ids) in author2doc.items():
doc_ids = [((d + self.total_docs) - len_input_corpus) for d in doc_ids]
for (a, doc_ids) in author2doc.items():
if self.author2doc.get(a):
self.author2doc[a].extend(doc_ids)
else:
self.author2doc[a] = doc_ids
for (d, a_list) in doc2author.items():
self.doc2author[d] = a_list
train_corpus_idx = []
for a in author2doc.keys():
for doc_ids in self.author2doc.values():
train_corpus_idx.extend(doc_ids)
train_corpus_idx = list(set(train_corpus_idx))
lencorpus = len(train_corpus_idx)
if (chunksize is None):
chunksize = min(lencorpus, self.chunksize)
self.state.numdocs += lencorpus
if update_every:
updatetype = 'online'
updateafter = min(lencorpus, ((update_every * self.numworkers) * chunksize))
else:
updatetype = 'batch'
updateafter = lencorpus
evalafter = min(lencorpus, (((eval_every or 0) * self.numworkers) * chunksize))
updates_per_pass = max(1, (lencorpus / updateafter))
logger.info('running %s author-topic training, %s topics, %s authors, %i passes over the supplied corpus of %i documents, updating model once every %i documents, evaluating perplexity every %i documents, iterating %ix with a convergence threshold of %f', updatetype, self.num_topics, num_input_authors, passes, lencorpus, updateafter, evalafter, iterations, gamma_threshold)
if ((updates_per_pass * passes) < 10):
logger.warning('too few updates, training might not converge; consider increasing the number of passes or iterations to improve accuracy')
def rho():
return pow(((offset + pass_) + (self.num_updates / chunksize)), (- decay))
for pass_ in xrange(passes):
if self.dispatcher:
logger.info(('initializing %s workers' % self.numworkers))
self.dispatcher.reset(self.state)
else:
other = AuthorTopicState(self.eta, self.state.sstats.shape, (0, 0))
dirty = False
reallen = 0
for (chunk_no, chunk_doc_idx) in enumerate(utils.grouper(train_corpus_idx, chunksize, as_numpy=chunks_as_numpy)):
chunk = [self.corpus[d] for d in chunk_doc_idx]
reallen += len(chunk)
if (eval_every and ((reallen == lencorpus) or (((chunk_no + 1) % (eval_every * self.numworkers)) == 0))):
self.log_perplexity(chunk, chunk_doc_idx, total_docs=lencorpus)
if self.dispatcher:
logger.info('PROGRESS: pass %i, dispatching documents up to #%i/%i', pass_, ((chunk_no * chunksize) + len(chunk)), lencorpus)
self.dispatcher.putjob(chunk)
else:
logger.info('PROGRESS: pass %i, at document #%i/%i', pass_, ((chunk_no * chunksize) + len(chunk)), lencorpus)
gammat = self.do_estep(chunk, self.author2doc, self.doc2author, rho(), other, chunk_doc_idx)
if self.optimize_alpha:
self.update_alpha(gammat, rho())
dirty = True
del chunk
if (update_every and (((chunk_no + 1) % (update_every * self.numworkers)) == 0)):
if self.dispatcher:
logger.info('reached the end of input; now waiting for all remaining jobs to finish')
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, (pass_ > 0))
del other
if self.dispatcher:
logger.info('initializing workers')
self.dispatcher.reset(self.state)
else:
other = AuthorTopicState(self.eta, self.state.sstats.shape, (0, 0))
dirty = False
if (reallen != lencorpus):
raise RuntimeError("input corpus size changed during training (don't use generators as input)")
if dirty:
if self.dispatcher:
logger.info('reached the end of input; now waiting for all remaining jobs to finish')
other = self.dispatcher.getstate()
self.do_mstep(rho(), other, (pass_ > 0))
del other
dirty = False
|
'Estimate the variational bound of documents from `corpus`:
E_q[log p(corpus)] - E_q[log q(corpus)]
There are basically two use cases of this method:
1. `chunk` is a subset of the training corpus, and `chunk_doc_idx` is provided,
indicating the indexes of the documents in the training corpus.
2. `chunk` is a test set (held-out data), and author2doc and doc2author
corrsponding to this test set are provided. There must not be any new authors
passed to this method. `chunk_doc_idx` is not needed in this case.
To obtain the per-word bound, compute:
>>> corpus_words = sum(cnt for document in corpus for _, cnt in document)
>>> model.bound(corpus, author2doc=author2doc, doc2author=doc2author) / corpus_words'
| def bound(self, chunk, chunk_doc_idx=None, subsample_ratio=1.0, author2doc=None, doc2author=None):
| _lambda = self.state.get_lambda()
Elogbeta = dirichlet_expectation(_lambda)
expElogbeta = np.exp(Elogbeta)
gamma = self.state.gamma
if ((author2doc is None) and (doc2author is None)):
author2doc = self.author2doc
doc2author = self.doc2author
if (not chunk_doc_idx):
raise ValueError('Either author dictionaries or chunk_doc_idx must be provided. Consult documentation of bound method.')
elif ((author2doc is not None) and (doc2author is not None)):
for a in author2doc.keys():
if (not self.author2doc.get(a)):
raise ValueError('bound cannot be called with authors not seen during training.')
if chunk_doc_idx:
raise ValueError('Either author dictionaries or chunk_doc_idx must be provided, not both. Consult documentation of bound method.')
else:
raise ValueError('Either both author2doc and doc2author should be provided, or neither. Consult documentation of bound method.')
Elogtheta = dirichlet_expectation(gamma)
expElogtheta = np.exp(Elogtheta)
word_score = 0.0
theta_score = 0.0
for (d, doc) in enumerate(chunk):
if chunk_doc_idx:
doc_no = chunk_doc_idx[d]
else:
doc_no = d
authors_d = [self.author2id[a] for a in self.doc2author[doc_no]]
ids = np.array([id for (id, _) in doc])
cts = np.array([cnt for (_, cnt) in doc])
if ((d % self.chunksize) == 0):
logger.debug('bound: at document #%i in chunk', d)
phinorm = self.compute_phinorm(ids, authors_d, expElogtheta[authors_d, :], expElogbeta[:, ids])
word_score += ((np.log((1.0 / len(authors_d))) * sum(cts)) + cts.dot(np.log(phinorm)))
word_score *= subsample_ratio
for a in author2doc.keys():
a = self.author2id[a]
theta_score += np.sum(((self.alpha - gamma[a, :]) * Elogtheta[a, :]))
theta_score += np.sum((gammaln(gamma[a, :]) - gammaln(self.alpha)))
theta_score += (gammaln(np.sum(self.alpha)) - gammaln(np.sum(gamma[a, :])))
theta_score *= (self.num_authors / len(author2doc))
beta_score = 0.0
beta_score += np.sum(((self.eta - _lambda) * Elogbeta))
beta_score += np.sum((gammaln(_lambda) - gammaln(self.eta)))
sum_eta = np.sum(self.eta)
beta_score += np.sum((gammaln(sum_eta) - gammaln(np.sum(_lambda, 1))))
total_score = ((word_score + theta_score) + beta_score)
return total_score
|
'This method overwrites `LdaModel.get_document_topics` and simply raises an
exception. `get_document_topics` is not valid for the author-topic model,
use `get_author_topics` instead.'
| def get_document_topics(self, word_id, minimum_probability=None):
| raise NotImplementedError('Method "get_document_topics" is not valid for the author-topic model. Use the "get_author_topics" method.')
|
'Return topic distribution the given author, as a list of
(topic_id, topic_probability) 2-tuples.
Ignore topics with very low probability (below `minimum_probability`).
Obtaining topic probabilities of each word, as in LDA (via `per_word_topics`),
is not supported.'
| def get_author_topics(self, author_name, minimum_probability=None):
| author_id = self.author2id[author_name]
if (minimum_probability is None):
minimum_probability = self.minimum_probability
minimum_probability = max(minimum_probability, 1e-08)
topic_dist = (self.state.gamma[author_id, :] / sum(self.state.gamma[author_id, :]))
author_topics = [(topicid, topicvalue) for (topicid, topicvalue) in enumerate(topic_dist) if (topicvalue >= minimum_probability)]
return author_topics
|
'Return topic distribution for input author as a list of
(topic_id, topic_probabiity) 2-tuples.
Ingores topics with probaility less than `eps`.
Do not call this method directly, instead use `model[author_names]`.'
| def __getitem__(self, author_names, eps=None):
| if isinstance(author_names, list):
items = []
for a in author_names:
items.append(self.get_author_topics(a, minimum_probability=eps))
else:
items = self.get_author_topics(author_names, minimum_probability=eps)
return items
|
'Note a document tag during initial corpus scan, for structure sizing.'
| def note_doctag(self, key, document_no, document_length):
| if isinstance(key, (integer_types + (integer,))):
self.max_rawint = max(self.max_rawint, key)
elif (key in self.doctags):
self.doctags[key] = self.doctags[key].repeat(document_length)
else:
self.doctags[key] = Doctag(len(self.offset2doctag), document_length, 1)
self.offset2doctag.append(key)
self.count = ((self.max_rawint + 1) + len(self.offset2doctag))
|
'Return indexes and backing-arrays used in training examples.'
| def indexed_doctags(self, doctag_tokens):
| return ([self._int_index(index) for index in doctag_tokens if (index in self)], self.doctag_syn0, self.doctag_syn0_lockf, doctag_tokens)
|
'Persist any changes made to the given indexes (matching tuple previously
returned by indexed_doctags()); a no-op for this implementation'
| def trained_item(self, indexed_tuple):
| pass
|
'Return int index for either string or int index'
| def _int_index(self, index):
| if isinstance(index, (integer_types + (integer,))):
return index
else:
return ((self.max_rawint + 1) + self.doctags[index].offset)
|
'Return string index for given int index, if available'
| def _key_index(self, i_index, missing=None):
| warnings.warn('use DocvecsArray.index_to_doctag', DeprecationWarning)
return self.index_to_doctag(i_index)
|
'Return string key for given i_index, if available. Otherwise return raw int doctag (same int).'
| def index_to_doctag(self, i_index):
| candidate_offset = ((i_index - self.max_rawint) - 1)
if (0 <= candidate_offset < len(self.offset2doctag)):
return self.offset2doctag[candidate_offset]
else:
return i_index
|
'Accept a single key (int or string tag) or list of keys as input.
If a single string or int, return designated tag\'s vector
representation, as a 1D numpy array.
If a list, return designated tags\' vector representations as a
2D numpy array: #tags x #vector_size.'
| def __getitem__(self, index):
| if isinstance(index, ((string_types + integer_types) + (integer,))):
return self.doctag_syn0[self._int_index(index)]
return vstack([self[i] for i in index])
|
'Estimated memory for tag lookup; 0 if using pure int tags.'
| def estimated_lookup_memory(self):
| return ((60 * len(self.offset2doctag)) + (140 * len(self.doctags)))
|
'Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training or inference** after doing a replace.
The model becomes effectively read-only = you can call `most_similar`, `similarity`
etc., but not `train` or `infer_vector`.'
| def init_sims(self, replace=False):
| if ((getattr(self, 'doctag_syn0norm', None) is None) or replace):
logger.info('precomputing L2-norms of doc weight vectors')
if replace:
for i in xrange(self.doctag_syn0.shape[0]):
self.doctag_syn0[i, :] /= sqrt((self.doctag_syn0[i, :] ** 2).sum((-1)))
self.doctag_syn0norm = self.doctag_syn0
else:
if self.mapfile_path:
self.doctag_syn0norm = np_memmap((self.mapfile_path + '.doctag_syn0norm'), dtype=REAL, mode='w+', shape=self.doctag_syn0.shape)
else:
self.doctag_syn0norm = empty(self.doctag_syn0.shape, dtype=REAL)
np_divide(self.doctag_syn0, sqrt((self.doctag_syn0 ** 2).sum((-1)))[..., newaxis], self.doctag_syn0norm)
|
'Find the top-N most similar docvecs known from training. Positive docs contribute
positively towards the similarity, negative docs negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given docs. Docs may be specified as vectors, integer indexes
of trained docvecs, or if the documents were originally presented with string tags,
by the corresponding tags.
The \'clip_start\' and \'clip_end\' allow limiting results to a particular contiguous
range of the underlying doctag_syn0norm vectors. (This may be useful if the ordering
there was chosen to be significant, such as more popular tag IDs in lower indexes.)'
| def most_similar(self, positive=[], negative=[], topn=10, clip_start=0, clip_end=None, indexer=None):
| self.init_sims()
clip_end = (clip_end or len(self.doctag_syn0norm))
if (isinstance(positive, ((string_types + integer_types) + (integer,))) and (not negative)):
positive = [positive]
positive = [((doc, 1.0) if isinstance(doc, ((string_types + integer_types) + (ndarray, integer))) else doc) for doc in positive]
negative = [((doc, (-1.0)) if isinstance(doc, ((string_types + integer_types) + (ndarray, integer))) else doc) for doc in negative]
(all_docs, mean) = (set(), [])
for (doc, weight) in (positive + negative):
if isinstance(doc, ndarray):
mean.append((weight * doc))
elif ((doc in self.doctags) or (doc < self.count)):
mean.append((weight * self.doctag_syn0norm[self._int_index(doc)]))
all_docs.add(self._int_index(doc))
else:
raise KeyError(("doc '%s' not in trained set" % doc))
if (not mean):
raise ValueError('cannot compute similarity with no input')
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
if (indexer is not None):
return indexer.most_similar(mean, topn)
dists = dot(self.doctag_syn0norm[clip_start:clip_end], mean)
if (not topn):
return dists
best = matutils.argsort(dists, topn=(topn + len(all_docs)), reverse=True)
result = [(self.index_to_doctag((sim + clip_start)), float(dists[sim])) for sim in best if ((sim + clip_start) not in all_docs)]
return result[:topn]
|
'Which doc from the given list doesn\'t go with the others?
(TODO: Accept vectors of out-of-training-set docs, as if from inference.)'
| def doesnt_match(self, docs):
| self.init_sims()
docs = [doc for doc in docs if ((doc in self.doctags) or (0 <= doc < self.count))]
logger.debug(('using docs %s' % docs))
if (not docs):
raise ValueError('cannot select a doc from an empty list')
vectors = vstack((self.doctag_syn0norm[self._int_index(doc)] for doc in docs)).astype(REAL)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
dists = dot(vectors, mean)
return sorted(zip(dists, docs))[0][1]
|
'Compute cosine similarity between two docvecs in the trained set, specified by int index or
string tag. (TODO: Accept vectors of out-of-training-set docs, as if from inference.)'
| def similarity(self, d1, d2):
| return dot(matutils.unitvec(self[d1]), matutils.unitvec(self[d2]))
|
'Compute cosine similarity between two sets of docvecs from the trained set, specified by int
index or string tag. (TODO: Accept vectors of out-of-training-set docs, as if from inference.)'
| def n_similarity(self, ds1, ds2):
| v1 = [self[doc] for doc in ds1]
v2 = [self[doc] for doc in ds2]
return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0)))
|
'Compute cosine similarity between two post-bulk out of training documents.
Document should be a list of (word) tokens.'
| def similarity_unseen_docs(self, model, doc_words1, doc_words2, alpha=0.1, min_alpha=0.0001, steps=5):
| d1 = model.infer_vector(doc_words=doc_words1, alpha=alpha, min_alpha=min_alpha, steps=steps)
d2 = model.infer_vector(doc_words=doc_words2, alpha=alpha, min_alpha=min_alpha, steps=steps)
return dot(matutils.unitvec(d1), matutils.unitvec(d2))
|
'Initialize the model from an iterable of `documents`. Each document is a
TaggedDocument object that will be used for training.
The `documents` iterable can be simply a list of TaggedDocument elements, but for larger corpora,
consider an iterable that streams the documents directly from disk/network.
If you don\'t supply `documents`, the model is left uninitialized -- use if
you plan to initialize it in some other way.
`dm` defines the training algorithm. By default (`dm=1`), \'distributed memory\' (PV-DM) is used.
Otherwise, `distributed bag of words` (PV-DBOW) is employed.
`size` is the dimensionality of the feature vectors.
`window` is the maximum distance between the predicted word and context words used for prediction
within a document.
`alpha` is the initial learning rate (will linearly drop to `min_alpha` as training progresses).
`seed` = for the random number generator.
Note that for a fully deterministically-reproducible run, you must also limit the model to
a single worker thread, to eliminate ordering jitter from OS thread scheduling. (In Python
3, reproducibility between interpreter launches also requires use of the PYTHONHASHSEED
environment variable to control hash randomization.)
`min_count` = ignore all words with total frequency lower than this.
`max_vocab_size` = limit RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types
need about 1GB of RAM. Set to `None` for no limit (default).
`sample` = threshold for configuring which higher-frequency words are randomly downsampled;
default is 1e-3, values of 1e-5 (or lower) may also be useful, value 0. disable downsampling.
`workers` = use this many worker threads to train the model (=faster training with multicore machines).
`iter` = number of iterations (epochs) over the corpus. The default inherited from Word2Vec is 5,
but values of 10 or 20 are common in published \'Paragraph Vector\' experiments.
`hs` = if 1, hierarchical softmax will be used for model training.
If set to 0 (default), and `negative` is non-zero, negative sampling will be used.
`negative` = if > 0, negative sampling will be used, the int for negative
specifies how many "noise words" should be drawn (usually between 5-20).
Default is 5. If set to 0, no negative samping is used.
`dm_mean` = if 0 (default), use the sum of the context word vectors. If 1, use the mean.
Only applies when dm is used in non-concatenative mode.
`dm_concat` = if 1, use concatenation of context vectors rather than sum/average;
default is 0 (off). Note concatenation results in a much-larger model, as the input
is no longer the size of one (sampled or arithmetically combined) word vector, but the
size of the tag(s) and all words in the context strung together.
`dm_tag_count` = expected constant number of document tags per document, when using
dm_concat mode; default is 1.
`dbow_words` if set to 1 trains word-vectors (in skip-gram fashion) simultaneous with DBOW
doc-vector training; default is 0 (faster training of doc-vectors only).
`trim_rule` = vocabulary trimming rule, specifies whether certain words should remain
in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used), or a callable that accepts parameters (word, count, min_count) and
returns either util.RULE_DISCARD, util.RULE_KEEP or util.RULE_DEFAULT.
Note: The rule, if given, is only used prune vocabulary during build_vocab() and is not stored as part
of the model.'
| def __init__(self, documents=None, dm_mean=None, dm=1, dbow_words=0, dm_concat=0, dm_tag_count=1, docvecs=None, docvecs_mapfile=None, comment=None, trim_rule=None, **kwargs):
| if ('sentences' in kwargs):
raise DeprecationWarning("'sentences' in doc2vec was renamed to 'documents'. Please use documents parameter.")
super(Doc2Vec, self).__init__(sg=((1 + dm) % 2), null_word=dm_concat, **kwargs)
self.load = call_on_class_only
if (dm_mean is not None):
self.cbow_mean = dm_mean
self.dbow_words = dbow_words
self.dm_concat = dm_concat
self.dm_tag_count = dm_tag_count
if (self.dm and self.dm_concat):
self.layer1_size = ((self.dm_tag_count + (2 * self.window)) * self.vector_size)
self.docvecs = (docvecs or DocvecsArray(docvecs_mapfile))
self.comment = comment
if (documents is not None):
self.build_vocab(documents, trim_rule=trim_rule)
self.train(documents, total_examples=self.corpus_count, epochs=self.iter)
|
'Reuse shareable structures from other_model.'
| def reset_from(self, other_model):
| self.docvecs.borrow_from(other_model.docvecs)
super(Doc2Vec, self).reset_from(other_model)
|
'Return the number of words in a given job.'
| def _raw_word_count(self, job):
| return sum((len(sentence.words) for sentence in job))
|
'Infer a vector for given post-bulk training document.
Document should be a list of (word) tokens.'
| def infer_vector(self, doc_words, alpha=0.1, min_alpha=0.0001, steps=5):
| doctag_vectors = empty((1, self.vector_size), dtype=REAL)
doctag_vectors[0] = self.seeded_vector(' '.join(doc_words))
doctag_locks = ones(1, dtype=REAL)
doctag_indexes = [0]
work = zeros(self.layer1_size, dtype=REAL)
if (not self.sg):
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
for i in range(steps):
if self.sg:
train_document_dbow(self, doc_words, doctag_indexes, alpha, work, learn_words=False, learn_hidden=False, doctag_vectors=doctag_vectors, doctag_locks=doctag_locks)
elif self.dm_concat:
train_document_dm_concat(self, doc_words, doctag_indexes, alpha, work, neu1, learn_words=False, learn_hidden=False, doctag_vectors=doctag_vectors, doctag_locks=doctag_locks)
else:
train_document_dm(self, doc_words, doctag_indexes, alpha, work, neu1, learn_words=False, learn_hidden=False, doctag_vectors=doctag_vectors, doctag_locks=doctag_locks)
alpha = (((alpha - min_alpha) / (steps - i)) + min_alpha)
return doctag_vectors[0]
|
'Estimate required memory for a model using current settings.'
| def estimate_memory(self, vocab_size=None, report=None):
| report = (report or {})
report['doctag_lookup'] = self.docvecs.estimated_lookup_memory()
report['doctag_syn0'] = ((self.docvecs.count * self.vector_size) * dtype(REAL).itemsize)
return super(Doc2Vec, self).estimate_memory(vocab_size, report=report)
|
'Abbreviated name reflecting major configuration paramaters.'
| def __str__(self):
| segments = []
if self.comment:
segments.append(('"%s"' % self.comment))
if self.sg:
if self.dbow_words:
segments.append('dbow+w')
else:
segments.append('dbow')
elif self.dm_concat:
segments.append('dm/c')
elif self.cbow_mean:
segments.append('dm/m')
else:
segments.append('dm/s')
segments.append(('d%d' % self.vector_size))
if self.negative:
segments.append(('n%d' % self.negative))
if self.hs:
segments.append('hs')
if ((not self.sg) or (self.sg and self.dbow_words)):
segments.append(('w%d' % self.window))
if (self.min_count > 1):
segments.append(('mc%d' % self.min_count))
if (self.sample > 0):
segments.append(('s%g' % self.sample))
if (self.workers > 1):
segments.append(('t%d' % self.workers))
return ('%s(%s)' % (self.__class__.__name__, ','.join(segments)))
|
'Discard parameters that are used in training and score. Use if you\'re sure you\'re done training a model.
Set `keep_doctags_vectors` to False if you don\'t want to save doctags vectors,
in this case you can\'t to use docvecs\'s most_similar, similarity etc. methods.
Set `keep_inference` to False if you don\'t want to store parameters that is used for infer_vector method'
| def delete_temporary_training_data(self, keep_doctags_vectors=True, keep_inference=True):
| if (not keep_inference):
self._minimize_model(False, False, False)
if (self.docvecs and hasattr(self.docvecs, 'doctag_syn0') and (not keep_doctags_vectors)):
del self.docvecs.doctag_syn0
if (self.docvecs and hasattr(self.docvecs, 'doctag_syn0_lockf')):
del self.docvecs.doctag_syn0_lockf
|
'Store the input-hidden weight matrix.
`fname` is the file used to save the vectors in
`doctag_vec` is an optional boolean indicating whether to store document vectors
`word_vec` is an optional boolean indicating whether to store word vectors
(if both doctag_vec and word_vec are True, then both vectors are stored in the same file)
`prefix` to uniquely identify doctags from word vocab, and avoid collision
in case of repeated string in doctag and word vocab
`fvocab` is an optional file used to save the vocabulary
`binary` is an optional boolean indicating whether the data is to be saved
in binary word2vec format (default: False)'
| def save_word2vec_format(self, fname, doctag_vec=False, word_vec=True, prefix='*dt_', fvocab=None, binary=False):
| total_vec = (len(self.wv.vocab) + len(self.docvecs))
if word_vec:
if (not doctag_vec):
total_vec = len(self.wv.vocab)
KeyedVectors.save_word2vec_format(self.wv, fname, fvocab, binary, total_vec)
if doctag_vec:
with utils.smart_open(fname, 'ab') as fout:
if (not word_vec):
total_vec = len(self.docvecs)
logger.info(('storing %sx%s projection weights into %s' % (total_vec, self.vector_size, fname)))
fout.write(utils.to_utf8(('%s %s\n' % (total_vec, self.vector_size))))
for i in range(len(self.docvecs)):
doctag = (prefix + str(self.docvecs.index_to_doctag(i)))
row = self.docvecs.doctag_syn0[i]
if binary:
fout.write(((utils.to_utf8(doctag) + ' ') + row.tostring()))
else:
fout.write(utils.to_utf8(('%s %s\n' % (doctag, ' '.join((('%f' % val) for val in row))))))
|
'`source` can be either a string (filename) or a file object.
Example::
documents = TaggedLineDocument(\'myfile.txt\')
Or for compressed files::
documents = TaggedLineDocument(\'compressed_text.txt.bz2\')
documents = TaggedLineDocument(\'compressed_text.txt.gz\')'
| def __init__(self, source):
| self.source = source
|
'Iterate through the lines in the source.'
| def __iter__(self):
| try:
self.source.seek(0)
for (item_no, line) in enumerate(self.source):
(yield TaggedDocument(utils.to_unicode(line).split(), [item_no]))
except AttributeError:
with utils.smart_open(self.source) as fin:
for (item_no, line) in enumerate(fin):
(yield TaggedDocument(utils.to_unicode(line).split(), [item_no]))
|
'Load a previously saved object from file (also see `save`).
If the object was saved with large arrays stored separately, you can load
these arrays via mmap (shared memory) using `mmap=\'r\'`. Default: don\'t use
mmap, load large arrays as normal objects.
If the file being loaded is compressed (either \'.gz\' or \'.bz2\'), then
`mmap=None` must be set. Load will raise an `IOError` if this condition
is encountered.'
| @classmethod
def load(cls, fname, mmap=None):
| logger.info(('loading %s object from %s' % (cls.__name__, fname)))
(compress, subname) = SaveLoad._adapt_by_suffix(fname)
obj = unpickle(fname)
obj._load_specials(fname, mmap, compress, subname)
logger.info('loaded %s', fname)
return obj
|
'Loads any attributes that were stored specially, and gives the same
opportunity to recursively included SaveLoad instances.'
| def _load_specials(self, fname, mmap, compress, subname):
| mmap_error = (lambda x, y: IOError((('Cannot mmap compressed object %s in file %s. ' % (x, y)) + 'Use `load(fname, mmap=None)` or uncompress files manually.')))
for attrib in getattr(self, '__recursive_saveloads', []):
cfname = '.'.join((fname, attrib))
logger.info(('loading %s recursively from %s.* with mmap=%s' % (attrib, cfname, mmap)))
getattr(self, attrib)._load_specials(cfname, mmap, compress, subname)
for attrib in getattr(self, '__numpys', []):
logger.info(('loading %s from %s with mmap=%s' % (attrib, subname(fname, attrib), mmap)))
if compress:
if mmap:
raise mmap_error(attrib, subname(fname, attrib))
val = np.load(subname(fname, attrib))['val']
else:
val = np.load(subname(fname, attrib), mmap_mode=mmap)
setattr(self, attrib, val)
for attrib in getattr(self, '__scipys', []):
logger.info(('loading %s from %s with mmap=%s' % (attrib, subname(fname, attrib), mmap)))
sparse = unpickle(subname(fname, attrib))
if compress:
if mmap:
raise mmap_error(attrib, subname(fname, attrib))
with np.load(subname(fname, attrib, 'sparse')) as f:
sparse.data = f['data']
sparse.indptr = f['indptr']
sparse.indices = f['indices']
else:
sparse.data = np.load(subname(fname, attrib, 'data'), mmap_mode=mmap)
sparse.indptr = np.load(subname(fname, attrib, 'indptr'), mmap_mode=mmap)
sparse.indices = np.load(subname(fname, attrib, 'indices'), mmap_mode=mmap)
setattr(self, attrib, sparse)
for attrib in getattr(self, '__ignoreds', []):
logger.info(('setting ignored attribute %s to None' % attrib))
setattr(self, attrib, None)
|
'Give appropriate compress setting and filename formula'
| @staticmethod
def _adapt_by_suffix(fname):
| if (fname.endswith('.gz') or fname.endswith('.bz2')):
compress = True
subname = (lambda *args: '.'.join((list(args) + ['npz'])))
else:
compress = False
subname = (lambda *args: '.'.join((list(args) + ['npy'])))
return (compress, subname)
|
'Save the object to file (also see `load`).
If `separately` is None, automatically detect large
numpy/scipy.sparse arrays in the object being stored, and store
them into separate files. This avoids pickle memory errors and
allows mmap\'ing large arrays back on load efficiently.
You can also set `separately` manually, in which case it must be
a list of attribute names to be stored in separate files. The
automatic check is not performed in this case.
`ignore` is a set of attribute names to *not* serialize (file
handles, caches etc). On subsequent load() these attributes will
be set to None.
`pickle_protocol` defaults to 2 so the pickled object can be imported
in both Python 2 and 3.'
| def _smart_save(self, fname, separately=None, sep_limit=(10 * (1024 ** 2)), ignore=frozenset(), pickle_protocol=2):
| logger.info(('saving %s object under %s, separately %s' % (self.__class__.__name__, fname, separately)))
(compress, subname) = SaveLoad._adapt_by_suffix(fname)
restores = self._save_specials(fname, separately, sep_limit, ignore, pickle_protocol, compress, subname)
try:
pickle(self, fname, protocol=pickle_protocol)
finally:
for (obj, asides) in restores:
for (attrib, val) in iteritems(asides):
setattr(obj, attrib, val)
logger.info('saved %s', fname)
|
'Save aside any attributes that need to be handled separately, including
by recursion any attributes that are themselves SaveLoad instances.
Returns a list of (obj, {attrib: value, ...}) settings that the caller
should use to restore each object\'s attributes that were set aside
during the default pickle().'
| def _save_specials(self, fname, separately, sep_limit, ignore, pickle_protocol, compress, subname):
| asides = {}
sparse_matrices = (scipy.sparse.csr_matrix, scipy.sparse.csc_matrix)
if (separately is None):
separately = []
for (attrib, val) in iteritems(self.__dict__):
if (isinstance(val, np.ndarray) and (val.size >= sep_limit)):
separately.append(attrib)
elif (isinstance(val, sparse_matrices) and (val.nnz >= sep_limit)):
separately.append(attrib)
for attrib in (separately + list(ignore)):
if hasattr(self, attrib):
asides[attrib] = getattr(self, attrib)
delattr(self, attrib)
recursive_saveloads = []
restores = []
for (attrib, val) in iteritems(self.__dict__):
if hasattr(val, '_save_specials'):
recursive_saveloads.append(attrib)
cfname = '.'.join((fname, attrib))
restores.extend(val._save_specials(cfname, None, sep_limit, ignore, pickle_protocol, compress, subname))
try:
(numpys, scipys, ignoreds) = ([], [], [])
for (attrib, val) in iteritems(asides):
if (isinstance(val, np.ndarray) and (attrib not in ignore)):
numpys.append(attrib)
logger.info(("storing np array '%s' to %s" % (attrib, subname(fname, attrib))))
if compress:
np.savez_compressed(subname(fname, attrib), val=np.ascontiguousarray(val))
else:
np.save(subname(fname, attrib), np.ascontiguousarray(val))
elif (isinstance(val, (scipy.sparse.csr_matrix, scipy.sparse.csc_matrix)) and (attrib not in ignore)):
scipys.append(attrib)
logger.info(("storing scipy.sparse array '%s' under %s" % (attrib, subname(fname, attrib))))
if compress:
np.savez_compressed(subname(fname, attrib, 'sparse'), data=val.data, indptr=val.indptr, indices=val.indices)
else:
np.save(subname(fname, attrib, 'data'), val.data)
np.save(subname(fname, attrib, 'indptr'), val.indptr)
np.save(subname(fname, attrib, 'indices'), val.indices)
(data, indptr, indices) = (val.data, val.indptr, val.indices)
(val.data, val.indptr, val.indices) = (None, None, None)
try:
pickle(val, subname(fname, attrib), protocol=pickle_protocol)
finally:
(val.data, val.indptr, val.indices) = (data, indptr, indices)
else:
logger.info(('not storing attribute %s' % attrib))
ignoreds.append(attrib)
self.__dict__['__numpys'] = numpys
self.__dict__['__scipys'] = scipys
self.__dict__['__ignoreds'] = ignoreds
self.__dict__['__recursive_saveloads'] = recursive_saveloads
except:
for (attrib, val) in iteritems(asides):
setattr(self, attrib, val)
raise
return (restores + [(self, asides)])
|
'Save the object to file (also see `load`).
`fname_or_handle` is either a string specifying the file name to
save to, or an open file-like object which can be written to. If
the object is a file handle, no special array handling will be
performed; all attributes will be saved to the same file.
If `separately` is None, automatically detect large
numpy/scipy.sparse arrays in the object being stored, and store
them into separate files. This avoids pickle memory errors and
allows mmap\'ing large arrays back on load efficiently.
You can also set `separately` manually, in which case it must be
a list of attribute names to be stored in separate files. The
automatic check is not performed in this case.
`ignore` is a set of attribute names to *not* serialize (file
handles, caches etc). On subsequent load() these attributes will
be set to None.
`pickle_protocol` defaults to 2 so the pickled object can be imported
in both Python 2 and 3.'
| def save(self, fname_or_handle, separately=None, sep_limit=(10 * (1024 ** 2)), ignore=frozenset(), pickle_protocol=2):
| try:
_pickle.dump(self, fname_or_handle, protocol=pickle_protocol)
logger.info(('saved %s object' % self.__class__.__name__))
except TypeError:
self._smart_save(fname_or_handle, separately, sep_limit, ignore, pickle_protocol=pickle_protocol)
|
'Override the dict.keys() function, which is used to determine the maximum
internal id of a corpus = the vocabulary dimensionality.
HACK: To avoid materializing the whole `range(0, self.num_terms)`, this returns
the highest id = `[self.num_terms - 1]` only.'
| def keys(self):
| return [(self.num_terms - 1)]
|
'Wrap a `corpus` as another corpus of length `reps`. This is achieved by
repeating documents from `corpus` over and over again, until the requested
length `len(result)==reps` is reached. Repetition is done
on-the-fly=efficiently, via `itertools`.
>>> corpus = [[(1, 0.5)], []] # 2 documents
>>> list(RepeatCorpus(corpus, 5)) # repeat 2.5 times to get 5 documents
[[(1, 0.5)], [], [(1, 0.5)], [], [(1, 0.5)]]'
| def __init__(self, corpus, reps):
| self.corpus = corpus
self.reps = reps
|
'Repeat a `corpus` `n` times.
>>> corpus = [[(1, 0.5)], []]
>>> list(RepeatCorpusNTimes(corpus, 3)) # repeat 3 times
[[(1, 0.5)], [], [(1, 0.5)], [], [(1, 0.5)], []]'
| def __init__(self, corpus, n):
| self.corpus = corpus
self.n = n
|
'Return a corpus that is the "head" of input iterable `corpus`.
Any documents after `max_docs` are ignored. This effectively limits the
length of the returned corpus to <= `max_docs`. Set `max_docs=None` for
"no limit", effectively wrapping the entire input corpus.'
| def __init__(self, corpus, max_docs=None):
| self.corpus = corpus
self.max_docs = max_docs
|
'Return a corpus that is the slice of input iterable `corpus`.
Negative slicing can only be used if the corpus is indexable.
Otherwise, the corpus will be iterated over.
Slice can also be a np.ndarray to support fancy indexing.
NOTE: calculating the size of a SlicedCorpus is expensive
when using a slice as the corpus has to be iterated over once.
Using a list or np.ndarray does not have this drawback, but
consumes more memory.'
| def __init__(self, corpus, slice_):
| self.corpus = corpus
self.slice_ = slice_
self.length = None
|
'Return number of docs the word occurs in, once `accumulate` has been called.'
| def get_occurrences(self, word_id):
| return self._get_occurrences(self.id2contiguous[word_id])
|
'Return number of docs the words co-occur in, once `accumulate` has been called.'
| def get_co_occurrences(self, word_id1, word_id2):
| return self._get_co_occurrences(self.id2contiguous[word_id1], self.id2contiguous[word_id2])
|
'Return number of docs the word occurs in, once `accumulate` has been called.'
| def get_occurrences(self, word):
| try:
word_id = self.token2id[word]
except KeyError:
word_id = word
return self._get_occurrences(self.id2contiguous[word_id])
|
'Return number of docs the words co-occur in, once `accumulate` has been called.'
| def get_co_occurrences(self, word1, word2):
| word_id1 = self._word2_contiguous_id(word1)
word_id2 = self._word2_contiguous_id(word2)
return self._get_co_occurrences(word_id1, word_id2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.