desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Args: relevant_ids: the set of words that occurrences should be accumulated for. dictionary: Dictionary instance with mappings for the relevant_ids.'
def __init__(self, relevant_ids, dictionary):
super(WindowedTextsAnalyzer, self).__init__(relevant_ids, dictionary) self._none_token = self._vocab_size
'Return True if the text has any relevant words, else False.'
def text_is_relevant(self, text):
for word in text: if (word in self.relevant_words): return True return False
'Meant to be called several times to accumulate partial results. The final accumulation should be performed with the `accumulate` method as opposed to this one. This method does not ensure the co-occurrence matrix is in lil format and does not symmetrize it after accumulation.'
def partial_accumulate(self, texts, window_size):
self._current_doc_num = (-1) self._token_at_edge = None self._counter.clear() super(WordOccurrenceAccumulator, self).accumulate(texts, window_size) for (combo, count) in viewitems(self._counter): self._co_occurrences[combo] += count return self
'Word pairs may have been encountered in (i, j) and (j, i) order. Rather than enforcing a particular ordering during the update process, we choose to symmetrize the co-occurrence matrix after accumulation has completed.'
def _symmetrize(self):
co_occ = self._co_occurrences co_occ.setdiag(self._occurrences) self._co_occurrences = ((co_occ + co_occ.T) - sps.diags(co_occ.diagonal(), offsets=0, dtype='uint32'))
'Args: processes : number of processes to use; must be at least two. args : should include `relevant_ids` and `dictionary` (see `UsesDictionary.__init__`). kwargs : can include `batch_size`, which is the number of docs to send to a worker at a time. If not included, it defaults to 64.'
def __init__(self, processes, *args, **kwargs):
super(ParallelWordOccurrenceAccumulator, self).__init__(*args) if (processes < 2): raise ValueError(('Must have at least 2 processes to run in parallel; got %d' % processes)) self.processes = processes self.batch_size = kwargs.get('batch_size', 64)
'Set up an input and output queue and start processes for each worker. The input queue is used to transmit batches of documents to the workers. The output queue is used by workers to transmit the WordOccurrenceAccumulator instances. Returns: tuple of (list of workers, input queue, output queue).'
def start_workers(self, window_size):
input_q = mp.Queue(maxsize=self.processes) output_q = mp.Queue() workers = [] for _ in range(self.processes): accumulator = PatchedWordOccurrenceAccumulator(self.relevant_ids, self.dictionary) worker = AccumulatingWorker(input_q, output_q, accumulator, window_size) worker.start() workers.append(worker) return (workers, input_q, output_q)
'Return a generator over the given texts that yields batches of `batch_size` texts at a time.'
def yield_batches(self, texts):
batch = [] for text in self._iter_texts(texts): batch.append(text) if (len(batch) == self.batch_size): (yield batch) batch = [] if batch: (yield batch)
'Sequentially place batches of texts on the given queue until `texts` is consumed. The texts are filtered so that only those with at least one relevant token are queued.'
def queue_all_texts(self, q, texts, window_size):
for (batch_num, batch) in enumerate(self.yield_batches(texts)): q.put(batch, block=True) before = (self._num_docs / self.log_every) self._num_docs += sum((((len(doc) - window_size) + 1) for doc in batch)) if (before < (self._num_docs / self.log_every)): logger.info('%d batches submitted to accumulate stats from %d documents (%d virtual)', (batch_num + 1), ((batch_num + 1) * self.batch_size), self._num_docs)
'Wait until all workers have transmitted their WordOccurrenceAccumulator instances, then terminate each. We do not use join here because it has been shown to have some issues in Python 2.7 (and even in later versions). This method also closes both the input and output queue. If `interrupted` is False (normal execution), a None value is placed on the input queue for each worker. The workers are looking for this sentinel value and interpret it as a signal to terminate themselves. If `interrupted` is True, a KeyboardInterrupt occurred. The workers are programmed to recover from this and continue on to transmit their results before terminating. So in this instance, the sentinel values are not queued, but the rest of the execution continues as usual.'
def terminate_workers(self, input_q, output_q, workers, interrupted=False):
if (not interrupted): for _ in workers: input_q.put(None, block=True) accumulators = [] while (len(accumulators) != len(workers)): accumulators.append(output_q.get()) logger.info('%d accumulators retrieved from output queue', len(accumulators)) for worker in workers: if worker.is_alive(): worker.terminate() input_q.close() output_q.close() return accumulators
'Merge the list of accumulators into a single `WordOccurrenceAccumulator` with all occurrence and co-occurrence counts, and a `num_docs` that reflects the total observed by all the individual accumulators.'
def merge_accumulators(self, accumulators):
accumulator = WordOccurrenceAccumulator(self.relevant_ids, self.dictionary) for other_accumulator in accumulators: accumulator.merge(other_accumulator) accumulator._symmetrize() logger.info('accumulated word occurrence stats for %d virtual documents', accumulator.num_docs) return accumulator
'Step 1. Check if (segment_word_ids, topic_word_ids) context vector has been cached. Step 2. If yes, return corresponding context vector, else compute, cache, and return.'
def compute_context_vector(self, segment_word_ids, topic_word_ids):
key = _key_for_segment(segment_word_ids, topic_word_ids) context_vector = self.context_vector_cache.get(key, None) if (context_vector is None): context_vector = self._make_seg(segment_word_ids, topic_word_ids) self.context_vector_cache[key] = context_vector return context_vector
'Internal helper function to return context vectors for segmentations.'
def _make_seg(self, segment_word_ids, topic_word_ids):
context_vector = sps.lil_matrix((self.vocab_size, 1)) if (not hasattr(segment_word_ids, '__iter__')): segment_word_ids = (segment_word_ids,) for w_j in topic_word_ids: idx = (self.mapping[w_j], 0) for pair in (tuple(sorted((w_i, w_j))) for w_i in segment_word_ids): if (pair not in self.sim_cache): self.sim_cache[pair] = self.similarity(pair, self.accumulator) context_vector[idx] += (self.sim_cache[pair] ** self.gamma) return context_vector.tocsr()
'Build an Annoy index using word vectors from a Word2Vec model'
def build_from_word2vec(self):
self.model.init_sims() return self._build_from_model(self.model.wv.syn0norm, self.model.wv.index2word, self.model.vector_size)
'Build an Annoy index using document vectors from a Doc2Vec model'
def build_from_doc2vec(self):
docvecs = self.model.docvecs docvecs.init_sims() labels = [docvecs.index_to_doctag(i) for i in range(0, docvecs.count)] return self._build_from_model(docvecs.doctag_syn0norm, labels, self.model.vector_size)
'Build an Annoy index using word vectors from a KeyedVectors model'
def build_from_keyedvectors(self):
self.model.init_sims() return self._build_from_model(self.model.syn0norm, self.model.index2word, self.model.vector_size)
'Find the top-N most similar items'
def most_similar(self, vector, num_neighbors):
(ids, distances) = self.index.get_nns_by_vector(vector, num_neighbors, include_distances=True) return [(self.labels[ids[i]], (1 - (distances[i] / 2))) for i in range(len(ids))]
'Return index vector at position `pos`. The vector is of the same type as the underlying index (ie., dense for MatrixSimilarity and scipy.sparse for SparseMatrixSimilarity.'
def get_document_id(self, pos):
assert (0 <= pos < len(self)), 'requested position out of range' return self.get_index().index[pos]
'Construct the index from `corpus`. The index can be later extended by calling the `add_documents` method. **Note**: documents are split (internally, transparently) into shards of `shardsize` documents each, converted to a matrix, for faster BLAS calls. Each shard is stored to disk under `output_prefix.shard_number` (=you need write access to that location). If you don\'t specify an output prefix, a random filename in temp will be used. `shardsize` should be chosen so that a `shardsize x chunksize` matrix of floats fits comfortably into main memory. `num_features` is the number of features in the `corpus` (e.g. size of the dictionary, or the number of latent topics for latent semantic models). `norm` is the user-chosen normalization to use. Accepted values are: \'l1\' and \'l2\'. If `num_best` is left unspecified, similarity queries will return a full vector with one float for every document in the index: >>> index = Similarity(\'/path/to/index\', corpus, num_features=400) # if corpus has 7 documents... >>> index[query] # ... then result will have 7 floats [0.0, 0.0, 0.2, 0.13, 0.8, 0.0, 0.1] If `num_best` is set, queries return only the `num_best` most similar documents, always leaving out documents for which the similarity is 0. If the input vector itself only has features with zero values (=the sparse representation is empty), the returned list will always be empty. >>> index.num_best = 3 >>> index[query] # return at most "num_best" of `(index_of_document, similarity)` tuples [(4, 0.8), (2, 0.13), (3, 0.13)] You can also override `num_best` dynamically, simply by setting e.g. `self.num_best = 10` before doing a query.'
def __init__(self, output_prefix, corpus, num_features, num_best=None, chunksize=256, shardsize=32768, norm='l2'):
if (output_prefix is None): self.output_prefix = utils.randfname(prefix='simserver') else: self.output_prefix = output_prefix logger.info('starting similarity index under %s', self.output_prefix) self.num_features = num_features self.num_best = num_best self.norm = norm self.chunksize = int(chunksize) self.shardsize = shardsize self.shards = [] (self.fresh_docs, self.fresh_nnz) = ([], 0) if (corpus is not None): self.add_documents(corpus)
'Extend the index with new documents. Internally, documents are buffered and then spilled to disk when there\'s `self.shardsize` of them (or when a query is issued).'
def add_documents(self, corpus):
min_ratio = 1.0 if (self.shards and (len(self.shards[(-1)]) < (min_ratio * self.shardsize))): self.reopen_shard() for doc in corpus: if isinstance(doc, numpy.ndarray): doclen = len(doc) elif scipy.sparse.issparse(doc): doclen = doc.nnz else: doclen = len(doc) if (doclen < (0.3 * self.num_features)): doc = matutils.unitvec(matutils.corpus2csc([doc], self.num_features).T, self.norm) else: doc = matutils.unitvec(matutils.sparse2full(doc, self.num_features), self.norm) self.fresh_docs.append(doc) self.fresh_nnz += doclen if (len(self.fresh_docs) >= self.shardsize): self.close_shard() if ((len(self.fresh_docs) % 10000) == 0): logger.info('PROGRESS: fresh_shard size=%i', len(self.fresh_docs))
'Force the latest shard to close (be converted to a matrix and stored to disk). Do nothing if no new documents added since last call. **NOTE**: the shard is closed even if it is not full yet (its size is smaller than `self.shardsize`). If documents are added later via `add_documents()`, this incomplete shard will be loaded again and completed.'
def close_shard(self):
if (not self.fresh_docs): return shardid = len(self.shards) issparse = (0.3 > ((1.0 * self.fresh_nnz) / (len(self.fresh_docs) * self.num_features))) if issparse: index = SparseMatrixSimilarity(self.fresh_docs, num_terms=self.num_features, num_docs=len(self.fresh_docs), num_nnz=self.fresh_nnz) else: index = MatrixSimilarity(self.fresh_docs, num_features=self.num_features) logger.info('creating %s shard #%s', ('sparse' if issparse else 'dense'), shardid) shard = Shard(self.shardid2filename(shardid), index) shard.num_best = self.num_best shard.num_nnz = self.fresh_nnz self.shards.append(shard) (self.fresh_docs, self.fresh_nnz) = ([], 0)
'Return the result of applying shard[query] for each shard in self.shards, as a sequence. If PARALLEL_SHARDS is set, the shards are queried in parallel, using the multiprocessing module.'
def query_shards(self, query):
args = zip(([query] * len(self.shards)), self.shards) if (PARALLEL_SHARDS and (PARALLEL_SHARDS > 1)): logger.debug('spawning %i query processes', PARALLEL_SHARDS) pool = multiprocessing.Pool(PARALLEL_SHARDS) result = pool.imap(query_shard, args, chunksize=(1 + (len(args) / PARALLEL_SHARDS))) else: pool = None result = imap(query_shard, args) return (pool, result)
'Get similarities of document `query` to all documents in the corpus. **or** If `query` is a corpus (iterable of documents), return a matrix of similarities of all query documents vs. all corpus document. This batch query is more efficient than computing the similarities one document after another.'
def __getitem__(self, query):
self.close_shard() for shard in self.shards: shard.num_best = self.num_best shard.normalize = self.norm (pool, shard_results) = self.query_shards(query) if (self.num_best is None): result = numpy.hstack(shard_results) else: offsets = numpy.cumsum(([0] + [len(shard) for shard in self.shards])) convert = (lambda doc, shard_no: [((doc_index + offsets[shard_no]), sim) for (doc_index, sim) in doc]) (is_corpus, query) = utils.is_corpus(query) is_corpus = (is_corpus or (hasattr(query, 'ndim') and (query.ndim > 1) and (query.shape[0] > 1))) if (not is_corpus): results = (convert(result, shard_no) for (shard_no, result) in enumerate(shard_results)) result = heapq.nlargest(self.num_best, itertools.chain(*results), key=(lambda item: item[1])) else: results = [] for (shard_no, result) in enumerate(shard_results): shard_result = [convert(doc, shard_no) for doc in result] results.append(shard_result) result = [] for parts in izip(*results): merged = heapq.nlargest(self.num_best, itertools.chain(*parts), key=(lambda item: item[1])) result.append(merged) if pool: pool.terminate() return result
'Return indexed vector corresponding to the document at position `docpos`.'
def vector_by_id(self, docpos):
self.close_shard() pos = 0 for shard in self.shards: pos += len(shard) if (docpos < pos): break if ((not self.shards) or (docpos < 0) or (docpos >= pos)): raise ValueError(('invalid document position: %s (must be 0 <= x < %s)' % (docpos, len(self)))) result = shard.get_document_id(((docpos - pos) + len(shard))) return result
'Return similarity of the given document only. `docpos` is the position of the query document within index.'
def similarity_by_id(self, docpos):
query = self.vector_by_id(docpos) (norm, self.norm) = (self.norm, False) result = self[query] self.norm = norm return result
'For each index document, compute cosine similarity against all other documents in the index and yield the result.'
def __iter__(self):
(norm, self.norm) = (self.norm, False) for chunk in self.iter_chunks(): if (chunk.shape[0] > 1): for sim in self[chunk]: (yield sim) else: (yield self[chunk]) self.norm = norm
'Iteratively yield the index as chunks of documents, each of size <= chunksize. The chunk is returned in its raw form (matrix or sparse matrix slice). The size of the chunk may be smaller than requested; it is up to the caller to check the result for real length, using `chunk.shape[0]`.'
def iter_chunks(self, chunksize=None):
self.close_shard() if (chunksize is None): chunksize = self.chunksize for shard in self.shards: query = shard.get_index().index for chunk_start in xrange(0, query.shape[0], chunksize): chunk_end = min(query.shape[0], (chunk_start + chunksize)) chunk = query[chunk_start:chunk_end] (yield chunk)
'Update shard locations, in case the server directory has moved on filesystem.'
def check_moved(self):
dirname = os.path.dirname(self.output_prefix) for shard in self.shards: shard.dirname = dirname
'Save the object via pickling (also see load) under filename specified in the constructor. Calls `close_shard` internally to spill any unfinished shards to disk first.'
def save(self, fname=None, *args, **kwargs):
self.close_shard() if (fname is None): fname = self.output_prefix super(Similarity, self).save(fname, *args, **kwargs)
'Delete all files under self.output_prefix. Object is not usable after calling this method anymore. Use with care!'
def destroy(self):
import glob for fname in glob.glob((self.output_prefix + '*')): logger.info('deleting %s', fname) os.remove(fname)
'`num_features` is the number of features in the corpus (will be determined automatically by scanning the corpus if not specified). See `Similarity` class for description of the other parameters.'
def __init__(self, corpus, num_best=None, dtype=numpy.float32, num_features=None, chunksize=256, corpus_len=None):
if (num_features is None): logger.warning('scanning corpus to determine the number of features (consider setting `num_features` explicitly)') num_features = (1 + utils.get_max_id(corpus)) self.num_features = num_features self.num_best = num_best self.normalize = True self.chunksize = chunksize if (corpus_len is None): corpus_len = len(corpus) if (corpus is not None): if (self.num_features <= 0): raise ValueError('cannot index a corpus with zero features (you must specify either `num_features` or a non-empty corpus in the constructor)') logger.info('creating matrix with %i documents and %i features', corpus_len, num_features) self.index = numpy.empty(shape=(corpus_len, num_features), dtype=dtype) for (docno, vector) in enumerate(corpus): if ((docno % 1000) == 0): logger.debug('PROGRESS: at document #%i/%i', docno, corpus_len) if isinstance(vector, numpy.ndarray): pass elif scipy.sparse.issparse(vector): vector = vector.toarray().flatten() else: vector = matutils.unitvec(matutils.sparse2full(vector, num_features)) self.index[docno] = vector
'Return similarity of sparse vector `query` to all documents in the corpus, as a numpy array. If `query` is a collection of documents, return a 2D array of similarities of each document in `query` to all documents in the corpus (=batch query, faster than processing each document in turn). **Do not use this function directly; use the self[query] syntax instead.**'
def get_similarities(self, query):
(is_corpus, query) = utils.is_corpus(query) if is_corpus: query = numpy.asarray([matutils.sparse2full(vec, self.num_features) for vec in query], dtype=self.index.dtype) else: if scipy.sparse.issparse(query): query = query.toarray() elif isinstance(query, numpy.ndarray): pass else: query = matutils.sparse2full(query, self.num_features) query = numpy.asarray(query, dtype=self.index.dtype) result = numpy.dot(self.index, query.T).T return result
'corpus: List of lists of strings, as in gensim.models.word2vec. w2v_model: A trained word2vec model. num_best: Number of results to retrieve. normalize_w2v_and_replace: Whether or not to normalize the word2vec vectors to length 1.'
def __init__(self, corpus, w2v_model, num_best=None, normalize_w2v_and_replace=True, chunksize=256):
self.corpus = corpus self.w2v_model = w2v_model self.num_best = num_best self.chunksize = chunksize self.normalize = False self.index = numpy.array(range(len(corpus))) if normalize_w2v_and_replace: w2v_model.init_sims(replace=True)
'**Do not use this function directly; use the self[query] syntax instead.**'
def get_similarities(self, query):
if isinstance(query, numpy.ndarray): query = [self.corpus[i] for i in query] if (not isinstance(query[0], list)): query = [query] n_queries = len(query) result = [] for qidx in range(n_queries): qresult = [self.w2v_model.wmdistance(document, query[qidx]) for document in self.corpus] qresult = numpy.array(qresult) qresult = (1.0 / (1.0 + qresult)) result.append(qresult) if (len(result) == 1): result = result[0] else: result = numpy.array(result) return result
'Return similarity of sparse vector `query` to all documents in the corpus, as a numpy array. If `query` is a collection of documents, return a 2D array of similarities of each document in `query` to all documents in the corpus (=batch query, faster than processing each document in turn). **Do not use this function directly; use the self[query] syntax instead.**'
def get_similarities(self, query):
(is_corpus, query) = utils.is_corpus(query) if is_corpus: query = matutils.corpus2csc(query, self.index.shape[1], dtype=self.index.dtype) elif scipy.sparse.issparse(query): query = query.T elif isinstance(query, numpy.ndarray): if (query.ndim == 1): query.shape = (1, len(query)) query = scipy.sparse.csr_matrix(query, dtype=self.index.dtype).T else: query = matutils.corpus2csc([query], self.index.shape[1], dtype=self.index.dtype) result = (self.index * query.tocsc()) if ((result.shape[1] == 1) and (not is_corpus)): result = result.toarray().flatten() elif self.maintain_sparsity: result = result.T else: result = result.toarray().T return result
'`vecs` is a sequence of dense and/or sparse vectors, such as a 2d np array, or a scipy.sparse.csc_matrix, or any sequence containing a mix of 1d np/scipy vectors.'
def __init__(self, vecs):
self.vecs = vecs
'Write a single sparse vector to the file. Sparse vector is any iterable yielding (field id, field value) pairs.'
def write_vector(self, docno, vector):
assert self.headers_written, 'must write Matrix Market file headers before writing data!' assert (self.last_docno < docno), ('documents %i and %i not in sequential order!' % (self.last_docno, docno)) vector = sorted(((i, w) for (i, w) in vector if (abs(w) > 1e-12))) for (termid, weight) in vector: self.fout.write(utils.to_utf8(('%i %i %s\n' % ((docno + 1), (termid + 1), weight)))) self.last_docno = docno return ((vector[(-1)][0], len(vector)) if vector else ((-1), 0))
'Save the vector space representation of an entire corpus to disk. Note that the documents are processed one at a time, so the whole corpus is allowed to be larger than the available RAM.'
@staticmethod def write_corpus(fname, corpus, progress_cnt=1000, index=False, num_terms=None, metadata=False):
mw = MmWriter(fname) mw.write_headers((-1), (-1), (-1)) (_num_terms, num_nnz) = (0, 0) (docno, poslast) = ((-1), (-1)) offsets = [] if hasattr(corpus, 'metadata'): orig_metadata = corpus.metadata corpus.metadata = metadata if metadata: docno2metadata = {} else: metadata = False for (docno, doc) in enumerate(corpus): if metadata: (bow, data) = doc docno2metadata[docno] = data else: bow = doc if ((docno % progress_cnt) == 0): logger.info(('PROGRESS: saving document #%i' % docno)) if index: posnow = mw.fout.tell() if (posnow == poslast): offsets[(-1)] = (-1) offsets.append(posnow) poslast = posnow (max_id, veclen) = mw.write_vector(docno, bow) _num_terms = max(_num_terms, (1 + max_id)) num_nnz += veclen if metadata: utils.pickle(docno2metadata, (fname + '.metadata.cpickle')) corpus.metadata = orig_metadata num_docs = (docno + 1) num_terms = (num_terms or _num_terms) if ((num_docs * num_terms) != 0): logger.info(('saved %ix%i matrix, density=%.3f%% (%i/%i)' % (num_docs, num_terms, ((100.0 * num_nnz) / (num_docs * num_terms)), num_nnz, (num_docs * num_terms)))) mw.fake_headers(num_docs, num_terms, num_nnz) mw.close() if index: return offsets
'Automatic destructor which closes the underlying file. There must be no circular references contained in the object for __del__ to work! Closing the file explicitly via the close() method is preferred and safer.'
def __del__(self):
self.close()
'Initialize the matrix reader. The `input` refers to a file on local filesystem, which is expected to be in the sparse (coordinate) Matrix Market format. Documents are assumed to be rows of the matrix (and document features are columns). `input` is either a string (file path) or a file-like object that supports `seek()` (e.g. gzip.GzipFile, bz2.BZ2File).'
def __init__(self, input, transposed=True):
logger.info(('initializing corpus reader from %s' % input)) (self.input, self.transposed) = (input, transposed) with utils.file_or_filename(self.input) as lines: try: header = utils.to_unicode(next(lines)).strip() if (not header.lower().startswith('%%matrixmarket matrix coordinate real general')): raise ValueError(('File %s not in Matrix Market format with coordinate real general; instead found: \n%s' % (self.input, header))) except StopIteration: pass self.num_docs = self.num_terms = self.num_nnz = 0 for (lineno, line) in enumerate(lines): line = utils.to_unicode(line) if (not line.startswith('%')): (self.num_docs, self.num_terms, self.num_nnz) = map(int, line.split()) if (not self.transposed): (self.num_docs, self.num_terms) = (self.num_terms, self.num_docs) break logger.info('accepted corpus with %i documents, %i features, %i non-zero entries', self.num_docs, self.num_terms, self.num_nnz)
'Skip file headers that appear before the first document.'
def skip_headers(self, input_file):
for line in input_file: if line.startswith('%'): continue break
'Iteratively yield vectors from the underlying file, in the format (row_no, vector), where vector is a list of (col_no, value) 2-tuples. Note that the total number of vectors returned is always equal to the number of rows specified in the header; empty documents are inserted and yielded where appropriate, even if they are not explicitly stored in the Matrix Market file.'
def __iter__(self):
with utils.file_or_filename(self.input) as lines: self.skip_headers(lines) previd = (-1) for line in lines: (docid, termid, val) = utils.to_unicode(line).split() if (not self.transposed): (termid, docid) = (docid, termid) (docid, termid, val) = ((int(docid) - 1), (int(termid) - 1), float(val)) assert (previd <= docid), 'matrix columns must come in ascending order' if (docid != previd): if (previd >= 0): (yield (previd, document)) for previd in xrange((previd + 1), docid): (yield (previd, [])) previd = docid document = [] document.append((termid, val)) if (previd >= 0): (yield (previd, document)) for previd in xrange((previd + 1), self.num_docs): (yield (previd, []))
'Return document at file offset `offset` (in bytes)'
def docbyoffset(self, offset):
if (offset == (-1)): return [] if isinstance(self.input, string_types): fin = utils.smart_open(self.input) else: fin = self.input fin.seek(offset) (previd, document) = ((-1), []) for line in fin: (docid, termid, val) = line.split() if (not self.transposed): (termid, docid) = (docid, termid) (docid, termid, val) = ((int(docid) - 1), (int(termid) - 1), float(val)) assert (previd <= docid), 'matrix columns must come in ascending order' if (docid != previd): if (previd >= 0): return document previd = docid document.append((termid, val)) return document
'The function that defines a corpus -- iterating over the corpus yields bag-of-words vectors, one for each document. A bag-of-words vector is simply a list of ``(tokenId, tokenCount)`` 2-tuples.'
def __iter__(self):
for (docNo, (sourceId, docUri)) in enumerate(self.documents): source = self.config.sources[sourceId] contents = source.getContent(docUri) words = [source.normalizeWord(word) for word in source.tokenize(contents)] (yield self.dictionary.doc2bow(words, allowUpdate=False))
'Populate dictionary mapping and statistics. This is done by sequentially retrieving the article fulltexts, splitting them into tokens and converting tokens to their ids (creating new ids as necessary).'
def buildDictionary(self):
logger.info(('creating dictionary from %i articles' % len(self.documents))) self.dictionary = dictionary.Dictionary() numPositions = 0 for (docNo, (sourceId, docUri)) in enumerate(self.documents): if ((docNo % 1000) == 0): logger.info(('PROGRESS: at document #%i/%i (%s, %s)' % (docNo, len(self.documents), sourceId, docUri))) source = self.config.sources[sourceId] contents = source.getContent(docUri) words = [source.normalizeWord(word) for word in source.tokenize(contents)] numPositions += len(words) _ = self.dictionary.doc2bow(words, allowUpdate=True) logger.info(('built %s from %i documents (total %i corpus positions)' % (self.dictionary, len(self.documents), numPositions)))
'Parse the directories specified in the config, looking for suitable articles. This updates the self.documents var, which keeps a list of (source id, article uri) 2-tuples. Each tuple is a unique identifier of one article. Note that some articles are ignored based on config settings (for example if the article\'s language doesn\'t match any language specified in the config etc.).'
def processConfig(self, config, shuffle=False):
self.config = config self.documents = [] logger.info(('processing config %s' % config)) for (sourceId, source) in config.sources.iteritems(): logger.info(("processing source '%s'" % sourceId)) accepted = [] for articleUri in source.findArticles(): meta = source.getMeta(articleUri) if config.acceptArticle(meta): accepted.append((sourceId, articleUri)) logger.info(("accepted %i articles for source '%s'" % (len(accepted), sourceId))) self.documents.extend(accepted) if (not self.documents): logger.warning('no articles at all found from the config; something went wrong!') if shuffle: logger.info(('shuffling %i documents for random order' % len(self.documents))) import random random.shuffle(self.documents) logger.info(('accepted total of %i articles for %s' % (len(self.documents), str(config))))
'Store the corpus to disk, in a human-readable text format. This actually saves multiple files: 1. Pure document-term co-occurence frequency counts, as a Matrix Market file. 2. Token to integer mapping, as a text file. 3. Document to document URI mapping, as a text file. The exact filesystem paths and filenames are determined from the config.'
def saveAsText(self):
self.saveDictionary(self.config.resultFile('wordids.txt')) self.saveDocuments(self.config.resultFile('docids.txt')) matutils.MmWriter.writeCorpus(self.config.resultFile('bow.mm'), self)
'Return absolute normalized path on filesystem to article no. `docNo`.'
def articleDir(self, docNo):
(sourceId, (_, outPath)) = self.documents[docNo] source = self.config.sources[sourceId] return os.path.join(source.baseDir, outPath)
'Return metadata for article no. `docNo`.'
def getMeta(self, docNo):
(sourceId, uri) = self.documents[docNo] source = self.config.sources[sourceId] return source.getMeta(uri)
'Parse out all fields from meta.xml, return them as a dictionary.'
@classmethod def parseDmlMeta(cls, xmlfile):
result = {} xml = open(xmlfile) for line in xml: if (line.find('<article>') >= 0): break for line in xml: if (line.find('</article>') >= 0): break p = re.search(PAT_TAG, line) if p: (name, cont) = p.groups() name = name.split()[0] (name, cont) = (name.strip(), cont.strip()) if (name == 'msc'): if (len(cont) != 5): logger.warning(('invalid MSC=%s in %s' % (cont, xmlfile))) result.setdefault('msc', []).append(cont) continue if (name == 'idMR'): cont = cont[2:] if (name and cont): result[name] = cont xml.close() return result
'Return article content as a single large string.'
def getContent(self, uri):
(intId, pathId) = uri filename = os.path.join(self.baseDir, pathId, 'fulltext.txt') return open(filename).read()
'Return article metadata as a attribute->value dictionary.'
def getMeta(self, uri):
(intId, pathId) = uri filename = os.path.join(self.baseDir, pathId, 'meta.xml') return DmlSource.parseDmlMeta(filename)
'Return article content as a single large string.'
def getContent(self, uri):
(intId, pathId) = uri filename1 = os.path.join(self.baseDir, pathId, 'fulltext.txt') filename2 = os.path.join(self.baseDir, pathId, 'fulltext-dspace.txt') if (os.path.exists(filename1) and os.path.exists(filename2)): if (os.path.getsize(filename1) < os.path.getsize(filename2)): filename = filename2 else: filename = filename1 elif os.path.exists(filename1): filename = filename1 else: assert os.path.exists(filename2) filename = filename2 return open(filename).read()
'Return article content as a single large string.'
def getContent(self, uri):
(intId, pathId) = uri filename = os.path.join(self.baseDir, pathId, 'tex.xml') return open(filename).read()
'Return article metadata as an attribute->value dictionary.'
def getMeta(self, uri):
return {'language': 'eng'}
'Parse tokens out of xml. There are two types of token: normal text and mathematics. Both are returned interspersed in a single list, in the same order as they appeared in the content. The math tokens will be returned in the form $tex_expression$, ie. with a dollar sign prefix and suffix.'
def tokenize(self, content):
handler = ArxmlivSource.ArxmlivContentHandler() xml.sax.parseString(content, handler, ArxmlivSource.ArxmlivErrorHandler()) return handler.tokens
'Iterate over the corpus, yielding one document at a time.'
def __iter__(self):
raise NotImplementedError('cannot instantiate abstract base class')
'Return the number of documents in the corpus. This method is just the least common denominator and should really be overridden when possible.'
def __len__(self):
raise NotImplementedError('must override __len__() before calling len(corpus)')
'Save an existing `corpus` to disk. Some formats also support saving the dictionary (`feature_id->word` mapping), which can in this case be provided by the optional `id2word` parameter. >>> MmCorpus.save_corpus(\'file.mm\', corpus) Some corpora also support an index of where each document begins, so that the documents on disk can be accessed in O(1) time (see the `corpora.IndexedCorpus` base class). In this case, `save_corpus` is automatically called internally by `serialize`, which does `save_corpus` plus saves the index at the same time, so you want to store the corpus with:: >>> MmCorpus.serialize(\'file.mm\', corpus) # stores index as well, allowing random access to individual documents Calling `serialize()` is preferred to calling `save_corpus()`.'
@staticmethod def save_corpus(fname, corpus, id2word=None, metadata=False):
raise NotImplementedError('cannot instantiate abstract base class') logger.info(('converting corpus to ??? format: %s' % fname)) with utils.smart_open(fname, 'wb') as fout: for doc in corpus: fmt = str(doc) fout.write(utils.to_utf8(('%s\n' % fmt)))
'Transform vector from one vector space into another **or** Transform a whole corpus into another.'
def __getitem__(self, vec):
raise NotImplementedError('cannot instantiate abstract base class')
'Apply the transformation to a whole corpus (as opposed to a single document) and return the result as another corpus.'
def _apply(self, corpus, chunksize=None, **kwargs):
return TransformedCorpus(self, corpus, chunksize, **kwargs)
'Get similarities of document `query` to all documents in the corpus. **or** If `query` is a corpus (iterable of documents), return a matrix of similarities of all query documents vs. all corpus document. Using this type of batch query is more efficient than computing the similarities one document after another.'
def __getitem__(self, query):
(is_corpus, query) = utils.is_corpus(query) if self.normalize: if matutils.ismatrix(query): import warnings elif is_corpus: query = [matutils.unitvec(v) for v in query] else: query = matutils.unitvec(query) result = self.get_similarities(query) if (self.num_best is None): return result if getattr(self, 'maintain_sparsity', False): return matutils.scipy2scipy_clipped(result, self.num_best) if matutils.ismatrix(result): return [matutils.full2sparse_clipped(v, self.num_best) for v in result] else: return matutils.full2sparse_clipped(result, self.num_best)
'For each index document, compute cosine similarity against all other documents in the index and yield the result.'
def __iter__(self):
norm = self.normalize self.normalize = False try: chunking = (self.chunksize > 1) except AttributeError: chunking = False if chunking: for chunk_start in xrange(0, self.index.shape[0], self.chunksize): chunk_end = min(self.index.shape[0], (chunk_start + self.chunksize)) chunk = self.index[chunk_start:chunk_end] for sim in self[chunk]: (yield sim) else: for doc in self.index: (yield self[doc]) self.normalize = norm
'First two articles in this sample are 1) anarchism 2) autism'
def test_first_element(self):
wc = WikiCorpus(datapath(FILENAME), processes=1) l = wc.get_texts() self.assertTrue((u'anarchism' in next(l))) self.assertTrue((u'autism' in next(l)))
'First unicode article in this sample is 1) папа'
def test_unicode_element(self):
wc = WikiCorpus(datapath(FILENAME_U), processes=1) l = wc.get_texts() self.assertTrue((u'\u043f\u0430\u043f\u0430' in next(l)))
'Parse documents from the .cor file provided in the constructor. Lowercase each document and ignore some stopwords. .cor format: one document per line, words separated by whitespace.'
def get_texts(self):
for doc in self.getstream(): (yield [word for word in utils.to_unicode(doc).lower().split() if (word not in CorpusMiislita.stoplist)])
'Define this so we can use `len(corpus)`'
def __len__(self):
if ('length' not in self.__dict__): logger.info('caching corpus size (calculating number of documents)') self.length = sum((1 for _ in self.get_texts())) return self.length
'Make sure TextCorpus can be serialized to disk.'
def test_textcorpus(self):
miislita = CorpusMiislita(datapath('head500.noblanks.cor.bz2')) ftmp = get_tmpfile('test_textcorpus.mm') corpora.MmCorpus.save_corpus(ftmp, miislita) self.assertTrue(os.path.exists(ftmp)) miislita2 = corpora.MmCorpus(ftmp) self.assertEqual(list(miislita), list(miislita2))
'Make sure we can save and load (un/pickle) TextCorpus objects (as long as the underlying input isn\'t a file-like object; we cannot pickle those).'
def test_save_load_ability(self):
corpusname = datapath('miIslita.cor') miislita = CorpusMiislita(corpusname) tmpf = get_tmpfile('tc_test.cpickle') miislita.save(tmpf) miislita2 = CorpusMiislita.load(tmpf) self.assertEqual(len(miislita), len(miislita2)) self.assertEqual(miislita.dictionary.token2id, miislita2.dictionary.token2id)
'Check that similarities are non-increasing when `num_best` is not `None`.'
def testNonIncreasing(self):
if (not PYEMD_EXT): return index = self.cls(texts, self.w2v_model, num_best=3) query = texts[0] sims = index[query] sims2 = numpy.asarray(sims)[:, 1] cond = (sum((numpy.diff(sims2) < 0)) == (len(sims2) - 1)) self.assertTrue(cond)
'Sparsity is correctly maintained when maintain_sparsity=True'
def testMaintainSparsity(self):
num_features = len(dictionary) index = self.cls(corpus, num_features=num_features) dense_sims = index[corpus] index = self.cls(corpus, num_features=num_features, maintain_sparsity=True) sparse_sims = index[corpus] self.assertFalse(scipy.sparse.issparse(dense_sims)) self.assertTrue(scipy.sparse.issparse(sparse_sims)) numpy.testing.assert_array_equal(dense_sims, sparse_sims.todense())
'Tests that sparsity is correctly maintained when maintain_sparsity=True and num_best is not None'
def testMaintainSparsityWithNumBest(self):
num_features = len(dictionary) index = self.cls(corpus, num_features=num_features, maintain_sparsity=False, num_best=3) dense_topn_sims = index[corpus] index = self.cls(corpus, num_features=num_features, maintain_sparsity=True, num_best=3) scipy_topn_sims = index[corpus] self.assertFalse(scipy.sparse.issparse(dense_topn_sims)) self.assertTrue(scipy.sparse.issparse(scipy_topn_sims)) self.assertEqual(dense_topn_sims, [matutils.scipy2sparse(v) for v in scipy_topn_sims])
'test re-opening partially full shards'
def testReopen(self):
index = similarities.Similarity(None, corpus[:5], num_features=len(dictionary), shardsize=9) _ = index[corpus[0]] index.add_documents(corpus[5:]) query = corpus[0] sims = index[query] expected = [(0, 0.99999994), (2, 0.28867513), (3, 0.23570226), (1, 0.23570226)] expected = matutils.sparse2full(expected, len(index)) self.assertTrue(numpy.allclose(expected, sims)) index.destroy()
'Test model successfully loaded from Wordrank format file'
def testLoadWordrankFormat(self):
model = wordrank.Wordrank.load_wordrank_model(self.wr_file) (vocab_size, dim) = (76, 50) self.assertEqual(model.syn0.shape, (vocab_size, dim)) self.assertEqual(len(model.vocab), vocab_size) os.remove((self.wr_file + '.w2vformat'))
'Test ensemble of two embeddings'
def testEnsemble(self):
if (not self.wr_path): return new_emb = self.test_model.ensemble_embedding(self.wr_file, self.wr_file) self.assertEqual(new_emb.shape, (76, 50)) os.remove((self.wr_file + '.w2vformat'))
'Test storing/loading the entire model'
def testPersistence(self):
if (not self.wr_path): return self.test_model.save(testfile()) loaded = wordrank.Wordrank.load(testfile()) self.models_equal(self.test_model, loaded)
'Test n_similarity for vocab words'
def testSimilarity(self):
if (not self.wr_path): return self.assertTrue(numpy.allclose(self.test_model.n_similarity(['the', 'and'], ['and', 'the']), 1.0)) self.assertEqual(self.test_model.similarity('the', 'and'), self.test_model.similarity('the', 'and'))
'Test lsi[vector] transformation.'
def testTransform(self):
model = self.model (u, s, vt) = scipy.linalg.svd(matutils.corpus2dense(self.corpus, self.corpus.num_terms), full_matrices=False) self.assertTrue(np.allclose(s[:2], model.projection.s)) doc = list(self.corpus)[0] transformed = model[doc] vec = matutils.sparse2full(transformed, 2) expected = np.array([(-0.6594664), 0.142115444]) self.assertTrue(np.allclose(abs(vec), abs(expected)))
'Test lsi[corpus] transformation.'
def testCorpusTransform(self):
model = self.model got = np.vstack((matutils.sparse2full(doc, 2) for doc in model[self.corpus])) expected = np.array([[0.65946639, 0.14211544], [2.02454305, (-0.42088759)], [1.54655361, 0.32358921], [1.81114125, 0.5890525], [0.9336738, (-0.27138939)], [0.01274618, (-0.49016181)], [0.04888203, (-1.11294699)], [0.08063836, (-1.56345594)], [0.27381003, (-1.34694159)]]) self.assertTrue(np.allclose(abs(got), abs(expected)))
'Test storing/loading the entire model.'
def testLoadVarembedFormat(self):
model = varembed.VarEmbed.load_varembed_format(vectors=varembed_model_vector_file) self.model_sanity(model)
'Test n_similarity for vocab words'
def testSimilarity(self):
model = varembed.VarEmbed.load_varembed_format(vectors=varembed_model_vector_file) self.assertTrue((model.n_similarity(['result'], ['targets']) == model.similarity('result', 'targets')))
'Check vocabulary and vector size'
def model_sanity(self, model):
self.assertEqual(model.syn0.shape, (model.vocab_size, model.vector_size)) self.assertTrue((model.syn0.shape[0] == len(model.vocab)))
'Test add morphemes to Embeddings Test only in Python 2.7 and above. Add Morphemes is not supported in earlier versions.'
@unittest.skipIf((sys.version_info < (2, 7)), 'Supported only on Python 2.7 and above') def testAddMorphemesToEmbeddings(self):
model = varembed.VarEmbed.load_varembed_format(vectors=varembed_model_vector_file) model_with_morphemes = varembed.VarEmbed.load_varembed_format(vectors=varembed_model_vector_file, morfessor_model=varembed_model_morfessor_file) self.model_sanity(model_with_morphemes) self.assertFalse(np.allclose(model.syn0, model_with_morphemes.syn0))
'Test lookup of vector for a particular word and list'
def testLookup(self):
model = varembed.VarEmbed.load_varembed_format(vectors=varembed_model_vector_file) self.assertTrue(np.allclose(model['language'], model[['language']]))
'Test log_conditional_probability()'
def testLogConditionalProbability(self):
obtained = direct_confirmation_measure.log_conditional_probability(self.segmentation, self.accumulator)[0] expected = (-0.693147181) self.assertAlmostEqual(obtained, expected)
'Test log_ratio_measure()'
def testLogRatioMeasure(self):
obtained = direct_confirmation_measure.log_ratio_measure(self.segmentation, self.accumulator)[0] expected = (-0.182321557) self.assertAlmostEqual(obtained, expected)
'Test normalized_log_ratio_measure()'
def testNormalizedLogRatioMeasure(self):
obtained = direct_confirmation_measure.log_ratio_measure(self.segmentation, self.accumulator, normalize=True)[0] expected = (-0.113282753) self.assertAlmostEqual(obtained, expected)
'Check show topics method'
def testTopicValues(self):
results = self.model.show_topics()[0] (expected_prob, expected_word) = ('0.264', 'trees ') (prob, word) = results[1].split('+')[0].split('*') self.assertEqual(results[0], 0) self.assertEqual(prob, expected_prob) self.assertEqual(word, expected_word) return
'Create ldamodel object, and check if the corresponding alphas are equal.'
def testLDAmodel(self):
ldam = self.model.suggested_lda_model() self.assertEqual(ldam.alpha[0], self.model.lda_alpha[0])
'Test that the algorithm is able to add new words to the vocabulary and to a trained model when using a sorted vocabulary'
def testOnlineLearning(self):
model_hs = word2vec.Word2Vec(sentences, size=10, min_count=0, seed=42, hs=1, negative=0) model_neg = word2vec.Word2Vec(sentences, size=10, min_count=0, seed=42, hs=0, negative=5) self.assertTrue(len(model_hs.wv.vocab), 12) self.assertTrue(model_hs.wv.vocab['graph'].count, 3) model_hs.build_vocab(new_sentences, update=True) model_neg.build_vocab(new_sentences, update=True) self.assertTrue(model_hs.wv.vocab['graph'].count, 4) self.assertTrue(model_hs.wv.vocab['artificial'].count, 4) self.assertEqual(len(model_hs.wv.vocab), 14) self.assertEqual(len(model_neg.wv.vocab), 14)
'Test that the algorithm is able to add new words to the vocabulary and to a trained model when using a sorted vocabulary'
def testOnlineLearningAfterSave(self):
model_neg = word2vec.Word2Vec(sentences, size=10, min_count=0, seed=42, hs=0, negative=5) model_neg.save(testfile()) model_neg = word2vec.Word2Vec.load(testfile()) self.assertTrue(len(model_neg.wv.vocab), 12) model_neg.build_vocab(new_sentences, update=True) model_neg.train(new_sentences, total_examples=model_neg.corpus_count, epochs=model_neg.iter) self.assertEqual(len(model_neg.wv.vocab), 14)
'Test skipgram w/ hierarchical softmax'
def test_sg_hs_online(self):
model = word2vec.Word2Vec(sg=1, window=5, hs=1, negative=0, min_count=3, iter=10, seed=42, workers=2) self.onlineSanity(model)
'Test skipgram w/ negative sampling'
def test_sg_neg_online(self):
model = word2vec.Word2Vec(sg=1, window=4, hs=0, negative=15, min_count=3, iter=10, seed=42, workers=2) self.onlineSanity(model)
'Test CBOW w/ hierarchical softmax'
def test_cbow_hs_online(self):
model = word2vec.Word2Vec(sg=0, cbow_mean=1, alpha=0.05, window=5, hs=1, negative=0, min_count=3, iter=10, seed=42, workers=2) self.onlineSanity(model)
'Test CBOW w/ negative sampling'
def test_cbow_neg_online(self):
model = word2vec.Word2Vec(sg=0, cbow_mean=1, alpha=0.05, window=5, hs=0, negative=15, min_count=5, iter=10, seed=42, workers=2, sample=0) self.onlineSanity(model)
'Test storing/loading the entire model.'
def testPersistence(self):
model = word2vec.Word2Vec(sentences, min_count=1) model.save(testfile()) self.models_equal(model, word2vec.Word2Vec.load(testfile())) wv = model.wv wv.save(testfile()) loaded_wv = keyedvectors.KeyedVectors.load(testfile()) self.assertTrue(np.allclose(wv.syn0, loaded_wv.syn0)) self.assertEqual(len(wv.vocab), len(loaded_wv.vocab))
'Test storing/loading the entire model with a vocab trimming rule passed in the constructor.'
def testPersistenceWithConstructorRule(self):
model = word2vec.Word2Vec(sentences, min_count=1, trim_rule=_rule) model.save(testfile()) self.models_equal(model, word2vec.Word2Vec.load(testfile()))
'Test that returning RULE_DEFAULT from trim_rule triggers min_count.'
def testRuleWithMinCount(self):
model = word2vec.Word2Vec((sentences + [['occurs_only_once']]), min_count=2, trim_rule=_rule) self.assertTrue(('human' not in model.wv.vocab)) self.assertTrue(('occurs_only_once' not in model.wv.vocab)) self.assertTrue(('interface' in model.wv.vocab))
'Test applying vocab trim_rule to build_vocab instead of constructor.'
def testRule(self):
model = word2vec.Word2Vec(min_count=1) model.build_vocab(sentences, trim_rule=_rule) self.assertTrue(('human' not in model.wv.vocab))
'Test that lambda trim_rule works.'
def testLambdaRule(self):
rule = (lambda word, count, min_count: (utils.RULE_DISCARD if (word == 'human') else utils.RULE_DEFAULT)) model = word2vec.Word2Vec(sentences, min_count=1, trim_rule=rule) self.assertTrue(('human' not in model.wv.vocab))
'Test syn0norm isn\'t saved in model file'
def testSyn0NormNotSaved(self):
model = word2vec.Word2Vec(sentences, min_count=1) model.init_sims() model.save(testfile()) loaded_model = word2vec.Word2Vec.load(testfile()) self.assertTrue((loaded_model.wv.syn0norm is None)) wv = model.wv wv.save(testfile()) loaded_kv = keyedvectors.KeyedVectors.load(testfile()) self.assertTrue((loaded_kv.syn0norm is None))