desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Test loading pre-KeyedVectors word2vec model'
| def testLoadPreKeyedVectorModel(self):
| if (sys.version_info[:2] == (3, 4)):
model_file_suffix = '_py3_4'
elif (sys.version_info < (3,)):
model_file_suffix = '_py2'
else:
model_file_suffix = '_py3'
model_file = ('word2vec_pre_kv%s' % model_file_suffix)
model = word2vec.Word2Vec.load(datapath(model_file))
self.assertTrue((model.wv.syn0.shape == (len(model.wv.vocab), model.vector_size)))
self.assertTrue((model.syn1neg.shape == (len(model.wv.vocab), model.vector_size)))
model_file = ('word2vec_pre_kv_sep%s' % model_file_suffix)
model = word2vec.Word2Vec.load(datapath(model_file))
self.assertTrue((model.wv.syn0.shape == (len(model.wv.vocab), model.vector_size)))
self.assertTrue((model.syn1neg.shape == (len(model.wv.vocab), model.vector_size)))
|
'Test loading pre-KeyedVectors word2vec model saved in word2vec format'
| def testLoadPreKeyedVectorModelCFormat(self):
| model = keyedvectors.KeyedVectors.load_word2vec_format(datapath('word2vec_pre_kv_c'))
self.assertTrue((model.syn0.shape[0] == len(model.vocab)))
|
'Test storing/loading the entire model in word2vec format.'
| def testPersistenceWord2VecFormat(self):
| model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.wv.save_word2vec_format(testfile(), binary=True)
binary_model_kv = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=True)
binary_model_kv.init_sims(replace=False)
self.assertTrue(np.allclose(model['human'], binary_model_kv['human']))
norm_only_model = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=True)
norm_only_model.init_sims(replace=True)
self.assertFalse(np.allclose(model['human'], norm_only_model['human']))
self.assertTrue(np.allclose(model.wv.syn0norm[model.wv.vocab['human'].index], norm_only_model['human']))
limited_model_kv = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=True, limit=3)
self.assertEquals(len(limited_model_kv.syn0), 3)
half_precision_model_kv = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=True, datatype=np.float16)
self.assertEquals(binary_model_kv.syn0.nbytes, (half_precision_model_kv.syn0.nbytes * 2))
|
'Test storing/loading the entire model in word2vec non-binary format.'
| def testPersistenceWord2VecFormatNonBinary(self):
| model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
model.wv.save_word2vec_format(testfile(), binary=False)
text_model = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=False)
text_model.init_sims(False)
self.assertTrue(np.allclose(model['human'], text_model['human'], atol=1e-06))
norm_only_model = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=False)
norm_only_model.init_sims(True)
self.assertFalse(np.allclose(model['human'], norm_only_model['human'], atol=1e-06))
self.assertTrue(np.allclose(model.wv.syn0norm[model.wv.vocab['human'].index], norm_only_model['human'], atol=0.0001))
|
'Test storing/loading the entire model and vocabulary in word2vec format.'
| def testPersistenceWord2VecFormatWithVocab(self):
| model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
testvocab = os.path.join(tempfile.gettempdir(), 'gensim_word2vec.vocab')
model.wv.save_word2vec_format(testfile(), testvocab, binary=True)
binary_model_with_vocab_kv = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), testvocab, binary=True)
self.assertEqual(model.wv.vocab['human'].count, binary_model_with_vocab_kv.vocab['human'].count)
|
'Test storing/loading the entire model and vocabulary in word2vec format.'
| def testPersistenceKeyedVectorsFormatWithVocab(self):
| model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
testvocab = os.path.join(tempfile.gettempdir(), 'gensim_word2vec.vocab')
model.wv.save_word2vec_format(testfile(), testvocab, binary=True)
kv_binary_model_with_vocab = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), testvocab, binary=True)
self.assertEqual(model.wv.vocab['human'].count, kv_binary_model_with_vocab.vocab['human'].count)
|
'Test storing/loading the entire model and vocabulary in word2vec format chained with
saving and loading via `save` and `load` methods`.
It was possible prior to 1.0.0 release, now raises Exception'
| def testPersistenceWord2VecFormatCombinationWithStandardPersistence(self):
| model = word2vec.Word2Vec(sentences, min_count=1)
model.init_sims()
testvocab = os.path.join(tempfile.gettempdir(), 'gensim_word2vec.vocab')
model.wv.save_word2vec_format(testfile(), testvocab, binary=True)
binary_model_with_vocab_kv = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), testvocab, binary=True)
binary_model_with_vocab_kv.save(testfile())
self.assertRaises(AttributeError, word2vec.Word2Vec.load, testfile())
|
'Test storing/loading the entire model.'
| def testLargeMmap(self):
| model = word2vec.Word2Vec(sentences, min_count=1)
model.save(testfile(), sep_limit=0)
self.models_equal(model, word2vec.Word2Vec.load(testfile()))
self.models_equal(model, word2vec.Word2Vec.load(testfile(), mmap='r'))
|
'Test word2vec vocabulary building.'
| def testVocab(self):
| corpus = LeeCorpus()
total_words = sum((len(sentence) for sentence in corpus))
model = word2vec.Word2Vec(min_count=1, hs=1, negative=0)
model.build_vocab(corpus)
self.assertTrue((len(model.wv.vocab) == 6981))
self.assertEqual(sum((v.count for v in model.wv.vocab.values())), total_words)
np.allclose(model.wv.vocab['the'].code, [1, 1, 0, 0])
model = word2vec.Word2Vec(hs=1, negative=0)
model.build_vocab(corpus)
self.assertTrue((len(model.wv.vocab) == 1750))
np.allclose(model.wv.vocab['the'].code, [1, 1, 1, 0])
self.assertRaises(RuntimeError, word2vec.Word2Vec, [])
self.assertRaises(RuntimeError, word2vec.Word2Vec, corpus, min_count=(total_words + 1))
|
'Test word2vec training.'
| def testTraining(self):
| model = word2vec.Word2Vec(size=2, min_count=1, hs=1, negative=0)
model.build_vocab(sentences)
self.assertTrue((model.wv.syn0.shape == (len(model.wv.vocab), 2)))
self.assertTrue((model.syn1.shape == (len(model.wv.vocab), 2)))
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
sims = model.most_similar('graph', topn=10)
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for (w, sim) in sims2 if (w != 'graph')]
self.assertEqual(sims, sims2)
model2 = word2vec.Word2Vec(sentences, size=2, min_count=1, hs=1, negative=0)
self.models_equal(model, model2)
|
'Test word2vec scoring.'
| def testScoring(self):
| model = word2vec.Word2Vec(sentences, size=2, min_count=1, hs=1, negative=0)
scores = model.score(sentences, len(sentences))
self.assertEqual(len(scores), len(sentences))
|
'Test word2vec training doesn\'t change locked vectors.'
| def testLocking(self):
| corpus = LeeCorpus()
for sg in range(2):
model = word2vec.Word2Vec(size=4, hs=1, negative=5, min_count=1, sg=sg, window=5)
model.build_vocab(corpus)
locked0 = np.copy(model.wv.syn0[0])
unlocked1 = np.copy(model.wv.syn0[1])
model.syn0_lockf[0] = 0.0
model.train(corpus, total_examples=model.corpus_count, epochs=model.iter)
self.assertFalse((unlocked1 == model.wv.syn0[1]).all())
self.assertTrue((locked0 == model.wv.syn0[0]).all())
|
'Test Word2Vec accuracy and KeyedVectors accuracy give the same result'
| def testAccuracy(self):
| model = word2vec.Word2Vec(LeeCorpus())
w2v_accuracy = model.accuracy(datapath('questions-words.txt'))
kv_accuracy = model.wv.accuracy(datapath('questions-words.txt'))
self.assertEqual(w2v_accuracy, kv_accuracy)
|
'Test Spearman and Pearson correlation coefficients give sane results on similarity datasets'
| def testEvaluateWordPairs(self):
| corpus = word2vec.LineSentence(datapath('head500.noblanks.cor.bz2'))
model = word2vec.Word2Vec(corpus, min_count=3, iter=10)
correlation = model.evaluate_word_pairs(datapath('wordsim353.tsv'))
pearson = correlation[0][0]
spearman = correlation[1][0]
oov = correlation[2]
self.assertTrue((0.1 < pearson < 1.0))
self.assertTrue((0.1 < spearman < 1.0))
self.assertTrue((0.0 <= oov < 90.0))
|
'Even tiny models trained on LeeCorpus should pass these sanity checks'
| def model_sanity(self, model, train=True):
| if train:
model.build_vocab(list_corpus)
orig0 = np.copy(model.wv.syn0[0])
model.train(list_corpus, total_examples=model.corpus_count, epochs=model.iter)
self.assertFalse((orig0 == model.wv.syn0[1]).all())
sims = model.most_similar('war', topn=len(model.wv.index2word))
t_rank = [word for (word, score) in sims].index('terrorism')
self.assertLess(t_rank, 50)
war_vec = model['war']
sims2 = model.most_similar([war_vec], topn=51)
self.assertTrue(('war' in [word for (word, score) in sims2]))
self.assertTrue(('terrorism' in [word for (word, score) in sims2]))
|
'Test skipgram w/ hierarchical softmax'
| def test_sg_hs(self):
| model = word2vec.Word2Vec(sg=1, window=4, hs=1, negative=0, min_count=5, iter=10, workers=2)
self.model_sanity(model)
|
'Test skipgram w/ negative sampling'
| def test_sg_neg(self):
| model = word2vec.Word2Vec(sg=1, window=4, hs=0, negative=15, min_count=5, iter=10, workers=2)
self.model_sanity(model)
|
'Test CBOW w/ hierarchical softmax'
| def test_cbow_hs(self):
| model = word2vec.Word2Vec(sg=0, cbow_mean=1, alpha=0.05, window=8, hs=1, negative=0, min_count=5, iter=10, workers=2, batch_words=1000)
self.model_sanity(model)
|
'Test CBOW w/ negative sampling'
| def test_cbow_neg(self):
| model = word2vec.Word2Vec(sg=0, cbow_mean=1, alpha=0.05, window=5, hs=0, negative=15, min_count=5, iter=10, workers=2, sample=0)
self.model_sanity(model)
|
'Test CBOW word2vec training.'
| def testTrainingCbow(self):
| model = word2vec.Word2Vec(size=2, min_count=1, sg=0, hs=1, negative=0)
model.build_vocab(sentences)
self.assertTrue((model.wv.syn0.shape == (len(model.wv.vocab), 2)))
self.assertTrue((model.syn1.shape == (len(model.wv.vocab), 2)))
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
sims = model.most_similar('graph', topn=10)
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for (w, sim) in sims2 if (w != 'graph')]
self.assertEqual(sims, sims2)
model2 = word2vec.Word2Vec(sentences, size=2, min_count=1, sg=0, hs=1, negative=0)
self.models_equal(model, model2)
|
'Test skip-gram (negative sampling) word2vec training.'
| def testTrainingSgNegative(self):
| model = word2vec.Word2Vec(size=2, min_count=1, sg=1, hs=0, negative=2)
model.build_vocab(sentences)
self.assertTrue((model.wv.syn0.shape == (len(model.wv.vocab), 2)))
self.assertTrue((model.syn1neg.shape == (len(model.wv.vocab), 2)))
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
sims = model.most_similar('graph', topn=10)
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for (w, sim) in sims2 if (w != 'graph')]
self.assertEqual(sims, sims2)
model2 = word2vec.Word2Vec(sentences, size=2, min_count=1, sg=1, hs=0, negative=2)
self.models_equal(model, model2)
|
'Test CBOW (negative sampling) word2vec training.'
| def testTrainingCbowNegative(self):
| model = word2vec.Word2Vec(size=2, min_count=1, sg=0, hs=0, negative=2)
model.build_vocab(sentences)
self.assertTrue((model.wv.syn0.shape == (len(model.wv.vocab), 2)))
self.assertTrue((model.syn1neg.shape == (len(model.wv.vocab), 2)))
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
sims = model.most_similar('graph', topn=10)
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for (w, sim) in sims2 if (w != 'graph')]
self.assertEqual(sims, sims2)
model2 = word2vec.Word2Vec(sentences, size=2, min_count=1, sg=0, hs=0, negative=2)
self.models_equal(model, model2)
|
'Test similarity and n_similarity methods.'
| def testSimilarities(self):
| model = word2vec.Word2Vec(size=2, min_count=1, sg=0, hs=0, negative=2)
model.build_vocab(sentences)
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
self.assertTrue(model.n_similarity(['graph', 'trees'], ['trees', 'graph']))
self.assertTrue((model.n_similarity(['graph'], ['trees']) == model.similarity('graph', 'trees')))
self.assertRaises(ZeroDivisionError, model.n_similarity, ['graph', 'trees'], [])
self.assertRaises(ZeroDivisionError, model.n_similarity, [], ['graph', 'trees'])
self.assertRaises(ZeroDivisionError, model.n_similarity, [], [])
|
'Test word2vec similar_by_word and similar_by_vector.'
| def testSimilarBy(self):
| model = word2vec.Word2Vec(sentences, size=2, min_count=1, hs=1, negative=0)
wordsims = model.similar_by_word('graph', topn=10)
wordsims2 = model.most_similar(positive='graph', topn=10)
vectorsims = model.similar_by_vector(model['graph'], topn=10)
vectorsims2 = model.most_similar([model['graph']], topn=10)
self.assertEqual(wordsims, wordsims2)
self.assertEqual(vectorsims, vectorsims2)
|
'Test word2vec parallel training.'
| def testParallel(self):
| if (word2vec.FAST_VERSION < 0):
return
corpus = utils.RepeatCorpus(LeeCorpus(), 10000)
for workers in [2, 4]:
model = word2vec.Word2Vec(corpus, workers=workers)
sims = model.most_similar('israeli')
|
'Test word2vec results identical with identical RNG seed.'
| def testRNG(self):
| model = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1)
model2 = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1)
self.models_equal(model, model2)
|
'Test word2vec model after delete_temporary_training_data'
| def testDeleteTemporaryTrainingData(self):
| for i in [0, 1]:
for j in [0, 1]:
model = word2vec.Word2Vec(sentences, size=10, min_count=0, seed=42, hs=i, negative=j)
if i:
self.assertTrue(hasattr(model, 'syn1'))
if j:
self.assertTrue(hasattr(model, 'syn1neg'))
self.assertTrue(hasattr(model, 'syn0_lockf'))
model.delete_temporary_training_data(replace_word_vectors_with_normalized=True)
self.assertTrue(len(model['human']), 10)
self.assertTrue(len(model.wv.vocab), 12)
self.assertTrue(model.wv.vocab['graph'].count, 3)
self.assertTrue((not hasattr(model, 'syn1')))
self.assertTrue((not hasattr(model, 'syn1neg')))
self.assertTrue((not hasattr(model, 'syn0_lockf')))
|
'Test word2vec predict_output_word method handling for negative sampling scheme'
| def testPredictOutputWord(self):
| model_with_neg = word2vec.Word2Vec(sentences, min_count=1)
predictions_with_neg = model_with_neg.predict_output_word(['system', 'human'], topn=5)
self.assertTrue((len(predictions_with_neg) == 5))
predictions_out_of_vocab = model_with_neg.predict_output_word(['some', 'random', 'words'], topn=5)
self.assertEqual(predictions_out_of_vocab, None)
model_with_neg.init_sims()
model_with_neg.wv.save_word2vec_format(testfile(), binary=True)
kv_model_with_neg = keyedvectors.KeyedVectors.load_word2vec_format(testfile(), binary=True)
binary_model_with_neg = word2vec.Word2Vec()
binary_model_with_neg.wv = kv_model_with_neg
self.assertRaises(RuntimeError, binary_model_with_neg.predict_output_word, ['system', 'human'])
model_without_neg = word2vec.Word2Vec(sentences, min_count=1, negative=0)
self.assertRaises(RuntimeError, model_without_neg.predict_output_word, ['system', 'human'])
|
'Test if warning is raised on non-ideal input to a word2vec model'
| @log_capture()
def testBuildVocabWarning(self, l):
| sentences = ['human', 'machine']
model = word2vec.Word2Vec()
model.build_vocab(sentences)
warning = "Each 'sentences' item should be a list of words (usually unicode strings)."
self.assertTrue((warning in str(l)))
|
'Test if warning is raised if alpha rises during subsequent calls to train()'
| @log_capture()
def testTrainWarning(self, l):
| sentences = [['human'], ['graph', 'trees']]
model = word2vec.Word2Vec(min_count=1)
model.build_vocab(sentences)
for epoch in range(10):
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
model.alpha -= 0.002
model.min_alpha = model.alpha
if (epoch == 5):
model.alpha += 0.05
warning = "Effective 'alpha' higher than previous training cycles"
self.assertTrue((warning in str(l)))
|
'Is sentences a generator object?'
| def test_sentences_should_not_be_a_generator(self):
| gen = (s for s in sentences)
self.assertRaises(TypeError, word2vec.Word2Vec, (gen,))
|
'Test if exception is raised when loading word2vec model on instance'
| def testLoadOnClassError(self):
| self.assertRaises(AttributeError, load_on_instance)
|
'Test if reset_from() uses pre-built structures from other model'
| def test_reset_from(self):
| model = word2vec.Word2Vec(sentences, min_count=1)
other_model = word2vec.Word2Vec(new_sentences, min_count=1)
other_vocab = other_model.wv.vocab
model.reset_from(other_model)
self.assertEqual(model.wv.vocab, other_vocab)
|
'Test basic functionality with a test sentence.'
| def testNonzero(self):
| if (not PYEMD_EXT):
return
model = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1)
sentence1 = ['human', 'interface', 'computer']
sentence2 = ['survey', 'user', 'computer', 'system', 'response', 'time']
distance = model.wmdistance(sentence1, sentence2)
self.assertFalse((distance == 0.0))
|
'Check that distance is symmetric.'
| def testSymmetry(self):
| if (not PYEMD_EXT):
return
model = word2vec.Word2Vec(sentences, min_count=2, seed=42, workers=1)
sentence1 = ['human', 'interface', 'computer']
sentence2 = ['survey', 'user', 'computer', 'system', 'response', 'time']
distance1 = model.wmdistance(sentence1, sentence2)
distance2 = model.wmdistance(sentence2, sentence1)
self.assertTrue(np.allclose(distance1, distance2))
|
'Check that the distance from a sentence to itself is zero.'
| def testIdenticalSentences(self):
| if (not PYEMD_EXT):
return
model = word2vec.Word2Vec(sentences, min_count=1)
sentence = ['survey', 'user', 'computer', 'system', 'response', 'time']
distance = model.wmdistance(sentence, sentence)
self.assertEqual(0.0, distance)
|
'Does LineSentence work with a filename argument?'
| def testLineSentenceWorksWithFilename(self):
| with utils.smart_open(datapath('lee_background.cor')) as orig:
sentences = word2vec.LineSentence(datapath('lee_background.cor'))
for words in sentences:
self.assertEqual(words, utils.to_unicode(orig.readline()).split())
|
'Does LineSentence work with a compressed file object argument?'
| def testLineSentenceWorksWithCompressedFile(self):
| with utils.smart_open(datapath('head500.noblanks.cor')) as orig:
sentences = word2vec.LineSentence(bz2.BZ2File(datapath('head500.noblanks.cor.bz2')))
for words in sentences:
self.assertEqual(words, utils.to_unicode(orig.readline()).split())
|
'Does LineSentence work with a file object argument, rather than filename?'
| def testLineSentenceWorksWithNormalFile(self):
| with utils.smart_open(datapath('head500.noblanks.cor')) as orig:
with utils.smart_open(datapath('head500.noblanks.cor')) as fin:
sentences = word2vec.LineSentence(fin)
for words in sentences:
self.assertEqual(words, utils.to_unicode(orig.readline()).split())
|
'Does PathLineSentences work with a path argument?'
| def testPathLineSentences(self):
| with utils.smart_open(os.path.join(datapath('PathLineSentences'), '1.txt')) as orig1:
with utils.smart_open(os.path.join(datapath('PathLineSentences'), '2.txt.bz2')) as orig2:
sentences = word2vec.PathLineSentences(datapath('PathLineSentences'))
orig = (orig1.readlines() + orig2.readlines())
orig_counter = 0
for words in sentences:
self.assertEqual(words, utils.to_unicode(orig[orig_counter]).split())
orig_counter += 1
|
'Does PathLineSentences work with a single file argument?'
| def testPathLineSentencesOneFile(self):
| test_file = os.path.join(datapath('PathLineSentences'), '1.txt')
with utils.smart_open(test_file) as orig:
sentences = word2vec.PathLineSentences(test_file)
for words in sentences:
self.assertEqual(words, utils.to_unicode(orig.readline()).split())
|
'setup lee test corpora'
| def setUp(self):
| global bg_corpus, corpus, human_sim_vector, bg_corpus2, corpus2
pre_path = os.path.join(os.path.dirname(__file__), 'test_data')
bg_corpus_file = 'lee_background.cor'
corpus_file = 'lee.cor'
sim_file = 'similarities0-1.txt'
latin1 = (lambda line: utils.to_unicode(line, encoding='latin1'))
with utils.smart_open(os.path.join(pre_path, bg_corpus_file)) as f:
bg_corpus = preprocess_documents((latin1(line) for line in f))
with utils.smart_open(os.path.join(pre_path, corpus_file)) as f:
corpus = preprocess_documents((latin1(line) for line in f))
with utils.smart_open(os.path.join(pre_path, bg_corpus_file)) as f:
bg_corpus2 = [preprocess_string(latin1(s), filters=DEFAULT_FILTERS[:(-1)]) for s in f]
with utils.smart_open(os.path.join(pre_path, corpus_file)) as f:
corpus2 = [preprocess_string(latin1(s), filters=DEFAULT_FILTERS[:(-1)]) for s in f]
sim_matrix = np.loadtxt(os.path.join(pre_path, sim_file))
sim_m_size = np.shape(sim_matrix)[0]
human_sim_vector = sim_matrix[matutils.triu_indices(sim_m_size, 1)]
|
'availability and integrity of corpus'
| def test_corpus(self):
| documents_in_bg_corpus = 300
documents_in_corpus = 50
len_sim_vector = 1225
self.assertEqual(len(bg_corpus), documents_in_bg_corpus)
self.assertEqual(len(corpus), documents_in_corpus)
self.assertEqual(len(human_sim_vector), len_sim_vector)
|
'correlation with human data > 0.6
(this is the value which was achieved in the original paper)'
| def test_lee(self):
| global bg_corpus, corpus
dictionary = corpora.Dictionary(bg_corpus)
bg_corpus = [dictionary.doc2bow(text) for text in bg_corpus]
corpus = [dictionary.doc2bow(text) for text in corpus]
log_ent = models.LogEntropyModel(bg_corpus)
bg_corpus_ent = log_ent[bg_corpus]
lsi = models.LsiModel(bg_corpus_ent, id2word=dictionary, num_topics=200)
corpus_lsi = lsi[log_ent[corpus]]
res = np.zeros((len(corpus), len(corpus)))
for (i, par1) in enumerate(corpus_lsi):
for (j, par2) in enumerate(corpus_lsi):
res[(i, j)] = matutils.cossim(par1, par2)
flat = res[matutils.triu_indices(len(corpus), 1)]
cor = np.corrcoef(flat, human_sim_vector)[(0, 1)]
logging.info(('LSI correlation coefficient is %s' % cor))
self.assertTrue((cor > 0.6))
|
'Test word2vec training.'
| def testWord2VecTraining(self):
| model = self.model_cos_sim
self.assertTrue((model.wv.syn0.shape == (len(model.wv.vocab), 100)))
self.assertTrue((model.syn1.shape == (len(model.wv.vocab), 100)))
sims = model.most_similar('graph', topn=10)
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for (w, sim) in sims2 if (w != 'graph')]
self.assertEqual(sims, sims2)
|
'Test Keras \'Embedding\' layer returned by \'get_embedding_layer\' function for a simple word similarity task.'
| def testEmbeddingLayerCosineSim(self):
| keras_w2v_model = self.model_cos_sim
keras_w2v_model_wv = keras_w2v_model.wv
embedding_layer = keras_w2v_model_wv.get_embedding_layer()
input_a = Input(shape=(1,), dtype='int32', name='input_a')
input_b = Input(shape=(1,), dtype='int32', name='input_b')
embedding_a = embedding_layer(input_a)
embedding_b = embedding_layer(input_b)
similarity = dot([embedding_a, embedding_b], axes=2, normalize=True)
model = Model(input=[input_a, input_b], output=similarity)
model.compile(optimizer='sgd', loss='mse')
word_a = 'graph'
word_b = 'trees'
output = model.predict([np.asarray([keras_w2v_model.wv.vocab[word_a].index]), np.asarray([keras_w2v_model.wv.vocab[word_b].index])])
self.assertTrue((type(output[0][0][0]) == np.float32))
|
'Test Keras \'Embedding\' layer returned by \'get_embedding_layer\' function for a smaller version of the 20NewsGroup classification problem.'
| def testEmbeddingLayer20NewsGroup(self):
| MAX_SEQUENCE_LENGTH = 1000
texts = []
texts_w2v = []
labels = []
data = fetch_20newsgroups(subset='train', categories=['alt.atheism', 'comp.graphics', 'sci.space'])
for index in range(len(data)):
label_id = data.target[index]
file_data = data.data[index]
i = file_data.find('\n\n')
if (i > 0):
file_data = file_data[i:]
try:
curr_str = str(file_data)
sentence_list = curr_str.split('\n')
for sentence in sentence_list:
sentence = sentence.strip().lower()
texts.append(sentence)
texts_w2v.append(sentence.split(' '))
labels.append(label_id)
except:
None
tokenizer = Tokenizer()
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
x_train = data
y_train = labels
Keras_w2v = self.model_twenty_ng
Keras_w2v.build_vocab(texts_w2v)
Keras_w2v.train(texts, total_examples=Keras_w2v.corpus_count, epochs=Keras_w2v.iter)
Keras_w2v_wv = Keras_w2v.wv
embedding_layer = Keras_w2v_wv.get_embedding_layer()
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
preds = Dense(y_train.shape[1], activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
fit_ret_val = model.fit(x_train, y_train, epochs=1)
self.assertTrue((type(fit_ret_val) == keras.callbacks.History))
|
'Test tuple input for l1 transformation'
| def test_tupleInput_l1(self):
| normalized = self.model_l1.normalize(self.doc)
expected = [(1, 0.25), (5, 0.5), (8, 0.25)]
self.assertTrue(np.allclose(normalized, expected))
|
'Test sparse csr matrix input for l1 transformation'
| def test_sparseCSRInput_l1(self):
| row = np.array([0, 0, 1, 2, 2, 2])
col = np.array([0, 2, 2, 0, 1, 2])
data = np.array([1, 2, 3, 4, 5, 6])
sparse_matrix = csr_matrix((data, (row, col)), shape=(3, 3))
normalized = self.model_l1.normalize(sparse_matrix)
self.assertTrue(issparse(normalized))
expected = np.array([[0.04761905, 0.0, 0.0952381], [0.0, 0.0, 0.14285714], [0.19047619, 0.23809524, 0.28571429]])
self.assertTrue(np.allclose(normalized.toarray(), expected))
|
'Test for np ndarray input for l1 transformation'
| def test_numpyndarrayInput_l1(self):
| ndarray_matrix = np.array([[1, 0, 2], [0, 0, 3], [4, 5, 6]])
normalized = self.model_l1.normalize(ndarray_matrix)
self.assertTrue(isinstance(normalized, np.ndarray))
expected = np.array([[0.04761905, 0.0, 0.0952381], [0.0, 0.0, 0.14285714], [0.19047619, 0.23809524, 0.28571429]])
self.assertTrue(np.allclose(normalized, expected))
self.assertRaises(ValueError, (lambda model, doc: model.normalize(doc)), self.model_l1, [1, 2, 3])
|
'Test tuple input for l2 transformation'
| def test_tupleInput_l2(self):
| normalized = self.model_l2.normalize(self.doc)
expected = [(1, 0.4082482904638631), (5, 0.8164965809277261), (8, 0.4082482904638631)]
self.assertTrue(np.allclose(normalized, expected))
|
'Test sparse csr matrix input for l2 transformation'
| def test_sparseCSRInput_l2(self):
| row = np.array([0, 0, 1, 2, 2, 2])
col = np.array([0, 2, 2, 0, 1, 2])
data = np.array([1, 2, 3, 4, 5, 6])
sparse_matrix = csr_matrix((data, (row, col)), shape=(3, 3))
normalized = self.model_l2.normalize(sparse_matrix)
self.assertTrue(issparse(normalized))
expected = np.array([[0.10482848, 0.0, 0.20965697], [0.0, 0.0, 0.31448545], [0.41931393, 0.52414242, 0.6289709]])
self.assertTrue(np.allclose(normalized.toarray(), expected))
|
'Test for np ndarray input for l2 transformation'
| def test_numpyndarrayInput_l2(self):
| ndarray_matrix = np.array([[1, 0, 2], [0, 0, 3], [4, 5, 6]])
normalized = self.model_l2.normalize(ndarray_matrix)
self.assertTrue(isinstance(normalized, np.ndarray))
expected = np.array([[0.10482848, 0.0, 0.20965697], [0.0, 0.0, 0.31448545], [0.41931393, 0.52414242, 0.6289709]])
self.assertTrue(np.allclose(normalized, expected))
self.assertRaises(ValueError, (lambda model, doc: model.normalize(doc)), self.model_l2, [1, 2, 3])
|
'Test if error messages raised on unsupported norm'
| def testInit(self):
| self.assertRaises(ValueError, normmodel.NormModel, self.corpus, 'l0')
|
'Even tiny models trained on any corpus should pass these sanity checks'
| def model_sanity(self, model):
| self.assertEqual(model.wv.syn0.shape, (len(model.wv.vocab), model.vector_size))
self.assertEqual(model.wv.syn0_all.shape, (model.num_ngram_vectors, model.vector_size))
|
'Test self.test_model successfully trained, parameters and weights correctly loaded'
| def testTraining(self):
| if (self.ft_path is None):
logger.info('FT_HOME env variable not set, skipping test')
return
(vocab_size, model_size) = (1763, 10)
trained_model = fasttext.FastText.train(self.ft_path, self.corpus_file, size=model_size, output_file=testfile())
self.assertEqual(trained_model.wv.syn0.shape, (vocab_size, model_size))
self.assertEqual(len(trained_model.wv.vocab), vocab_size)
self.assertEqual(trained_model.wv.syn0_all.shape[1], model_size)
self.model_sanity(trained_model)
self.assertFalse(os.path.exists(('%s.bin' % testfile())))
|
'Tests words with frequency less than `min_count` absent from vocab'
| def testMinCount(self):
| if (self.ft_path is None):
logger.info('FT_HOME env variable not set, skipping test')
return
test_model_min_count_5 = fasttext.FastText.train(self.ft_path, self.corpus_file, output_file=testfile(), size=10, min_count=5)
self.assertTrue(('forests' not in test_model_min_count_5.wv.vocab))
test_model_min_count_1 = fasttext.FastText.train(self.ft_path, self.corpus_file, output_file=testfile(), size=10, min_count=1)
self.assertTrue(('forests' in test_model_min_count_1.wv.vocab))
|
'Tests output vector dimensions are the same as the value for `size` param'
| def testModelSize(self):
| if (self.ft_path is None):
logger.info('FT_HOME env variable not set, skipping test')
return
test_model_size_20 = fasttext.FastText.train(self.ft_path, self.corpus_file, output_file=testfile(), size=20)
self.assertEqual(test_model_size_20.vector_size, 20)
self.assertEqual(test_model_size_20.wv.syn0.shape[1], 20)
self.assertEqual(test_model_size_20.wv.syn0_all.shape[1], 20)
|
'Test storing/loading the entire model.'
| def testPersistence(self):
| self.test_model.save(testfile())
loaded = fasttext.FastText.load(testfile())
self.models_equal(self.test_model, loaded)
self.test_model.save(testfile(), sep_limit=0)
self.models_equal(self.test_model, fasttext.FastText.load(testfile()))
|
'Test syn0norm/syn0_all_norm aren\'t saved in model file'
| def testNormalizedVectorsNotSaved(self):
| self.test_model.init_sims()
self.test_model.save(testfile())
loaded = fasttext.FastText.load(testfile())
self.assertTrue((loaded.wv.syn0norm is None))
self.assertTrue((loaded.wv.syn0_all_norm is None))
wv = self.test_model.wv
wv.save(testfile())
loaded_kv = keyedvectors.KeyedVectors.load(testfile())
self.assertTrue((loaded_kv.syn0norm is None))
self.assertTrue((loaded_kv.syn0_all_norm is None))
|
'Test model successfully loaded from fastText .bin file'
| def testLoadFastTextFormat(self):
| try:
model = fasttext.FastText.load_fasttext_format(self.test_model_file)
except Exception as exc:
self.fail(('Unable to load FastText model from file %s: %s' % (self.test_model_file, exc)))
(vocab_size, model_size) = (1762, 10)
self.assertEqual(model.wv.syn0.shape, (vocab_size, model_size))
self.assertEqual(len(model.wv.vocab), vocab_size, model_size)
self.assertEqual(model.wv.syn0_all.shape, (model.num_ngram_vectors, model_size))
expected_vec = [(-0.57144), (-0.0085561), 0.15748, (-0.67855), (-0.25459), (-0.58077), (-0.09913), 1.1447, 0.23418, 0.060007]
self.assertTrue(numpy.allclose(model['hundred'], expected_vec, atol=0.0001))
expected_vec_oov = [(-0.23825), (-0.58482), (-0.22276), (-0.41215), 0.91015, (-1.6786), (-0.26724), 0.58818, 0.57828, 0.75801]
self.assertTrue(numpy.allclose(model['rejection'], expected_vec_oov, atol=0.0001))
self.assertEquals(model.min_count, 5)
self.assertEquals(model.window, 5)
self.assertEquals(model.iter, 5)
self.assertEquals(model.negative, 5)
self.assertEquals(model.sample, 0.0001)
self.assertEquals(model.bucket, 1000)
self.assertEquals(model.wv.max_n, 6)
self.assertEquals(model.wv.min_n, 3)
self.model_sanity(model)
|
'Test model successfully loaded from fastText (new format) .bin file'
| def testLoadFastTextNewFormat(self):
| try:
new_model = fasttext.FastText.load_fasttext_format(self.test_new_model_file)
except Exception as exc:
self.fail(('Unable to load FastText model from file %s: %s' % (self.test_new_model_file, exc)))
(vocab_size, model_size) = (1763, 10)
self.assertEqual(new_model.wv.syn0.shape, (vocab_size, model_size))
self.assertEqual(len(new_model.wv.vocab), vocab_size, model_size)
self.assertEqual(new_model.wv.syn0_all.shape, (new_model.num_ngram_vectors, model_size))
expected_vec = [(-0.025627), (-0.11448), 0.18116, (-0.96779), 0.2532, (-0.93224), 0.3929, 0.12679, (-0.19685), (-0.13179)]
self.assertTrue(numpy.allclose(new_model['hundred'], expected_vec, atol=0.0001))
expected_vec_oov = [(-0.53378), (-0.19), 0.013482, (-0.86767), (-0.21684), (-0.89928), 0.45124, 0.18025, (-0.14128), 0.22508]
self.assertTrue(numpy.allclose(new_model['rejection'], expected_vec_oov, atol=0.0001))
self.assertEquals(new_model.min_count, 5)
self.assertEquals(new_model.window, 5)
self.assertEquals(new_model.iter, 5)
self.assertEquals(new_model.negative, 5)
self.assertEquals(new_model.sample, 0.0001)
self.assertEquals(new_model.bucket, 1000)
self.assertEquals(new_model.wv.max_n, 6)
self.assertEquals(new_model.wv.min_n, 3)
self.model_sanity(new_model)
|
'Test model accepts input as both `/path/to/model` or `/path/to/model.bin`'
| def testLoadFileName(self):
| self.assertTrue(fasttext.FastText.load_fasttext_format(datapath('lee_fasttext_new')))
self.assertTrue(fasttext.FastText.load_fasttext_format(datapath('lee_fasttext_new.bin')))
|
'Test loading model with non-ascii words in vocab'
| def testLoadModelWithNonAsciiVocab(self):
| model = fasttext.FastText.load_fasttext_format(datapath('non_ascii_fasttext'))
self.assertTrue((u'kter\xfd' in model))
try:
vector = model[u'kter\xfd']
except UnicodeDecodeError:
self.fail('Unable to access vector for utf8 encoded non-ascii word')
|
'Test loading model with words in user-specified encoding'
| def testLoadModelNonUtf8Encoding(self):
| model = fasttext.FastText.load_fasttext_format(datapath('cp852_fasttext'), encoding='cp852')
self.assertTrue((u'kter\xfd' in model))
try:
vector = model[u'kter\xfd']
except KeyError:
self.fail('Unable to access vector for cp-852 word')
|
'Test n_similarity for in-vocab and out-of-vocab words'
| def testNSimilarity(self):
| self.assertTrue(numpy.allclose(self.test_model.n_similarity(['the', 'and'], ['and', 'the']), 1.0))
self.assertEqual(self.test_model.n_similarity(['the'], ['and']), self.test_model.n_similarity(['and'], ['the']))
self.assertTrue(numpy.allclose(self.test_model.n_similarity(['night', 'nights'], ['nights', 'night']), 1.0))
self.assertEqual(self.test_model.n_similarity(['night'], ['nights']), self.test_model.n_similarity(['nights'], ['night']))
|
'Test similarity for in-vocab and out-of-vocab words'
| def testSimilarity(self):
| self.assertTrue(numpy.allclose(self.test_model.similarity('the', 'the'), 1.0))
self.assertEqual(self.test_model.similarity('the', 'and'), self.test_model.similarity('and', 'the'))
self.assertTrue(numpy.allclose(self.test_model.similarity('nights', 'nights'), 1.0))
self.assertEqual(self.test_model.similarity('night', 'nights'), self.test_model.similarity('nights', 'night'))
|
'Test most_similar for in-vocab and out-of-vocab words'
| def testMostSimilar(self):
| self.assertEqual(len(self.test_model.most_similar(positive=['the', 'and'], topn=5)), 5)
self.assertEqual(self.test_model.most_similar('the'), self.test_model.most_similar(positive=['the']))
self.assertEqual(len(self.test_model.most_similar(['night', 'nights'], topn=5)), 5)
self.assertEqual(self.test_model.most_similar('nights'), self.test_model.most_similar(positive=['nights']))
|
'Test most_similar_cosmul for in-vocab and out-of-vocab words'
| def testMostSimilarCosmul(self):
| self.assertEqual(len(self.test_model.most_similar_cosmul(positive=['the', 'and'], topn=5)), 5)
self.assertEqual(self.test_model.most_similar_cosmul('the'), self.test_model.most_similar_cosmul(positive=['the']))
self.assertEqual(len(self.test_model.most_similar_cosmul(['night', 'nights'], topn=5)), 5)
self.assertEqual(self.test_model.most_similar_cosmul('nights'), self.test_model.most_similar_cosmul(positive=['nights']))
|
'Tests word vector lookup for in-vocab and out-of-vocab words'
| def testLookup(self):
| self.assertTrue(('night' in self.test_model.wv.vocab))
self.assertTrue(numpy.allclose(self.test_model['night'], self.test_model[['night']]))
self.assertFalse(('nights' in self.test_model.wv.vocab))
self.assertTrue(numpy.allclose(self.test_model['nights'], self.test_model[['nights']]))
self.assertRaises(KeyError, (lambda : self.test_model['a!@']))
|
'Tests __contains__ for in-vocab and out-of-vocab words'
| def testContains(self):
| self.assertTrue(('night' in self.test_model.wv.vocab))
self.assertTrue(('night' in self.test_model))
self.assertFalse(('nights' in self.test_model.wv.vocab))
self.assertTrue(('nights' in self.test_model))
self.assertFalse(('a!@' in self.test_model.wv.vocab))
self.assertFalse(('a!@' in self.test_model))
|
'Tests wmdistance for docs with in-vocab and out-of-vocab words'
| def testWmdistance(self):
| doc = ['night', 'payment']
oov_doc = ['nights', 'forests', 'payments']
ngrams_absent_doc = ['a!@', 'b#$']
dist = self.test_model.wmdistance(doc, oov_doc)
self.assertNotEqual(float('inf'), dist)
dist = self.test_model.wmdistance(doc, ngrams_absent_doc)
self.assertEqual(float('inf'), dist)
|
'Tests doesnt_match for list of out-of-vocab words'
| def testDoesntMatch(self):
| oov_words = ['nights', 'forests', 'payments']
for word in oov_words:
self.assertFalse((word in self.test_model.wv.vocab))
try:
self.test_model.doesnt_match(oov_words)
except Exception:
self.fail('model.doesnt_match raises exception for oov words')
|
'`HashDictionary` can be saved as textfile.'
| def test_saveAsText(self):
| tmpf = get_tmpfile('dict_test.txt')
d = HashDictionary(['\xc5\xbelo\xc5\xa5ou\xc4\x8dk\xc3\xbd kon\xc3\xad\xc4\x8dek'.split(), '\xd0\x9c\xd0\xb0\xd0\xbb\xd0\xb9\xd0\xb6 \xd0\xbe\xd0\xb1\xd0\xbb\xd1\x8c\xd0\xb9\xd0\xba\xd0\xb2\xd1\x8e\xd1\x8d \xd0\xb0\xd1\x82 \xd1\x8d\xd0\xb6\xd1\x82'.split()])
d.save_as_text(tmpf)
self.assertTrue(os.path.exists(tmpf))
|
'`HashDictionary` can be saved & loaded as compressed pickle.'
| def test_saveAsTextBz2(self):
| tmpf = get_tmpfile('dict_test.txt.bz2')
d = HashDictionary(['\xc5\xbelo\xc5\xa5ou\xc4\x8dk\xc3\xbd kon\xc3\xad\xc4\x8dek'.split(), '\xd0\x9c\xd0\xb0\xd0\xbb\xd0\xb9\xd0\xb6 \xd0\xbe\xd0\xb1\xd0\xbb\xd1\x8c\xd0\xb9\xd0\xba\xd0\xb2\xd1\x8e\xd1\x8d \xd0\xb0\xd1\x82 \xd1\x8d\xd0\xb6\xd1\x82'.split()])
d.save(tmpf)
self.assertTrue(os.path.exists(tmpf))
d2 = d.load(tmpf)
self.assertEqual(len(d), len(d2))
|
'Test p_boolean_document()'
| def testPBooleanDocument(self):
| accumulator = probability_estimation.p_boolean_document(self.corpus, self.segmented_topics)
obtained = accumulator.index_to_dict()
expected = {self.graph_id: {5}, self.user_id: {1, 3}, self.system_id: {1, 2}, self.computer_id: {0}}
self.assertEqual(expected, obtained)
|
'Test p_boolean_sliding_window()'
| def testPBooleanSlidingWindow(self):
| accumulator = probability_estimation.p_boolean_sliding_window(self.texts, self.segmented_topics, self.dictionary, 2)
self.assertEqual(1, accumulator[self.computer_id])
self.assertEqual(3, accumulator[self.user_id])
self.assertEqual(1, accumulator[self.graph_id])
self.assertEqual(4, accumulator[self.system_id])
|
'Test loading/saving LdaVowpalWabbit model.'
| def test_save_load(self):
| if (not self.vw_path):
return
lda = LdaVowpalWabbit(self.vw_path, corpus=self.corpus, passes=10, chunksize=256, id2word=self.dictionary, cleanup_files=True, alpha=0.1, eta=0.1, num_topics=len(TOPIC_WORDS), random_seed=1)
with tempfile.NamedTemporaryFile() as fhandle:
lda.save(fhandle.name)
lda2 = LdaVowpalWabbit.load(fhandle.name)
saved_fields = [lda.alpha, lda.chunksize, lda.cleanup_files, lda.decay, lda.eta, lda.gamma_threshold, lda.id2word, lda.num_terms, lda.num_topics, lda.passes, lda.random_seed, lda.vw_path]
loaded_fields = [lda2.alpha, lda2.chunksize, lda2.cleanup_files, lda2.decay, lda2.eta, lda2.gamma_threshold, lda2.id2word, lda2.num_terms, lda2.num_topics, lda2.passes, lda2.random_seed, lda2.vw_path]
self.assertEqual(saved_fields, loaded_fields)
saved_topics = lda.show_topics(num_topics=5, num_words=10)
loaded_topics = lda2.show_topics(num_topics=5, num_words=10)
self.assertEqual(loaded_topics, saved_topics)
|
'Test updating existing LdaVowpalWabbit model.'
| def test_model_update(self):
| if (not self.vw_path):
return
lda = LdaVowpalWabbit(self.vw_path, corpus=[self.corpus[0]], passes=10, chunksize=256, id2word=self.dictionary, cleanup_files=True, alpha=0.1, eta=0.1, num_topics=len(TOPIC_WORDS), random_seed=1)
lda.update(self.corpus[1:])
result = lda.log_perplexity(self.corpus)
self.assertTrue((result < (-1)))
self.assertTrue((result > (-5)))
|
'Test LdaVowpalWabbit perplexity is within expected range.'
| def test_perplexity(self):
| if (not self.vw_path):
return
lda = LdaVowpalWabbit(self.vw_path, corpus=self.corpus, passes=10, chunksize=256, id2word=self.dictionary, cleanup_files=True, alpha=0.1, eta=0.1, num_topics=len(TOPIC_WORDS), random_seed=1)
result = lda.log_perplexity(self.corpus)
self.assertTrue((result < (-1)))
self.assertTrue((result > (-5)))
|
'Test LdaVowpalWabbit topic coherence.'
| def test_topic_coherence(self):
| if (not self.vw_path):
return
(corpus, dictionary) = get_corpus()
lda = LdaVowpalWabbit(self.vw_path, corpus=corpus, passes=10, chunksize=256, id2word=dictionary, cleanup_files=True, alpha=0.1, eta=0.1, num_topics=len(TOPIC_WORDS), random_seed=1)
lda.print_topics(5, 10)
topic_map = {}
for (i, words) in enumerate(TOPIC_WORDS):
topic_map[frozenset(words)] = i
n_coherent = 0
for topic_id in range(lda.num_topics):
topic = lda.show_topic(topic_id, topn=20)
topic_words = [w[1] for w in topic]
ids = []
for word in topic_words:
for (src_topic_words, src_topic_id) in six.iteritems(topic_map):
if (word in src_topic_words):
ids.append(src_topic_id)
counts = defaultdict(int)
for found_topic_id in ids:
counts[found_topic_id] += 1
max_count = 0
for count in six.itervalues(counts):
max_count = max(max_count, count)
if (max_count >= 6):
n_coherent += 1
self.assertTrue((n_coherent >= 3))
|
'Test corpus to Vowpal Wabbit format conversion.'
| def test_corpus_to_vw(self):
| if (not self.vw_path):
return
corpus = [[(0, 5), (7, 1), (5, 3), (0, 2)], [(7, 2), (2, 1), (3, 11)], [(1, 1)], [], [(5, 2), (0, 1)]]
expected = '\n| 0:5 7:1 5:3 0:2\n| 7:2 2:1 3:11\n| 1:1\n|\n| 5:2 0:1\n'.strip()
result = '\n'.join(ldavowpalwabbit.corpus_to_vw(corpus))
self.assertEqual(result, expected)
|
'Test copying of VWModel to LdaModel'
| def testvwmodel2ldamodel(self):
| if (not self.vw_path):
return
tm1 = LdaVowpalWabbit(vw_path=self.vw_path, corpus=self.corpus, num_topics=2, id2word=self.dictionary)
tm2 = ldavowpalwabbit.vwmodel2ldamodel(tm1)
for document in self.corpus:
(element1_1, element1_2) = tm1[document][0]
(element2_1, element2_2) = tm2[document][0]
self.assertAlmostEqual(element1_1, element2_1)
self.assertAlmostEqual(element1_2, element2_2, 5)
logging.debug('%d %d', element1_1, element2_1)
logging.debug('%d %d', element1_2, element2_2)
|
'Test cosine_similarity()'
| def testCosineSimilarity(self):
| obtained = indirect_confirmation_measure.cosine_similarity(self.segmentation, self.accumulator, self.topics, self.measure, self.gamma)
expected = ((0.623 + 0.623) / 2.0)
self.assertAlmostEqual(expected, obtained[0], 4)
|
'Test storing/loading the entire model.'
| def test_persistence(self):
| model = doc2vec.Doc2Vec(DocsLeeCorpus(), min_count=1)
model.save(testfile())
self.models_equal(model, doc2vec.Doc2Vec.load(testfile()))
|
'Test storing the entire model in word2vec format.'
| def testPersistenceWord2VecFormat(self):
| model = doc2vec.Doc2Vec(DocsLeeCorpus(), min_count=1)
test_doc_word = os.path.join(tempfile.gettempdir(), 'gensim_doc2vec.dw')
model.save_word2vec_format(test_doc_word, doctag_vec=True, word_vec=True, binary=True)
binary_model_dv = keyedvectors.KeyedVectors.load_word2vec_format(test_doc_word, binary=True)
self.assertEqual((len(model.wv.vocab) + len(model.docvecs)), len(binary_model_dv.vocab))
test_doc = os.path.join(tempfile.gettempdir(), 'gensim_doc2vec.d')
model.save_word2vec_format(test_doc, doctag_vec=True, word_vec=False, binary=True)
binary_model_dv = keyedvectors.KeyedVectors.load_word2vec_format(test_doc, binary=True)
self.assertEqual(len(model.docvecs), len(binary_model_dv.vocab))
test_word = os.path.join(tempfile.gettempdir(), 'gensim_doc2vec.w')
model.save_word2vec_format(test_word, doctag_vec=False, word_vec=True, binary=True)
binary_model_dv = keyedvectors.KeyedVectors.load_word2vec_format(test_word, binary=True)
self.assertEqual(len(model.wv.vocab), len(binary_model_dv.vocab))
|
'Test storing/loading the entire model.'
| def test_load_mmap(self):
| model = doc2vec.Doc2Vec(sentences, min_count=1)
model.save(testfile(), sep_limit=0)
self.models_equal(model, doc2vec.Doc2Vec.load(testfile()))
self.models_equal(model, doc2vec.Doc2Vec.load(testfile(), mmap='r'))
|
'Test doc2vec doctag alternatives'
| def test_int_doctags(self):
| corpus = DocsLeeCorpus()
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertEqual(len(model.docvecs.doctag_syn0), 300)
self.assertEqual(model.docvecs[0].shape, (100,))
self.assertEqual(model.docvecs[np.int64(0)].shape, (100,))
self.assertRaises(KeyError, model.__getitem__, '_*0')
|
'Test doc2vec doctag alternatives'
| def test_missing_string_doctag(self):
| corpus = list(DocsLeeCorpus(True))
corpus = (corpus[0:10] + corpus)
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertRaises(KeyError, model.docvecs.__getitem__, 'not_a_tag')
|
'Test doc2vec doctag alternatives'
| def test_string_doctags(self):
| corpus = list(DocsLeeCorpus(True))
corpus = (corpus[0:10] + corpus)
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertEqual(len(model.docvecs.doctag_syn0), 300)
self.assertEqual(model.docvecs[0].shape, (100,))
self.assertEqual(model.docvecs['_*0'].shape, (100,))
self.assertTrue(all((model.docvecs['_*0'] == model.docvecs[0])))
self.assertTrue((max((d.offset for d in model.docvecs.doctags.values())) < len(model.docvecs.doctags)))
self.assertTrue((max((model.docvecs._int_index(str_key) for str_key in model.docvecs.doctags.keys())) < len(model.docvecs.doctag_syn0)))
self.assertEqual(model.docvecs.offset2doctag[0], model.docvecs.most_similar([model.docvecs[0]])[0][0])
|
'Test similarity of out of training sentences'
| def test_similarity_unseen_docs(self):
| rome_str = ['rome', 'italy']
car_str = ['car']
corpus = list(DocsLeeCorpus(True))
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertTrue((model.docvecs.similarity_unseen_docs(model, rome_str, rome_str) > model.docvecs.similarity_unseen_docs(model, rome_str, car_str)))
|
'Any non-trivial model on DocsLeeCorpus can pass these sanity checks'
| def model_sanity(self, model, keep_training=True):
| fire1 = 0
fire2 = np.int64(8)
tennis1 = 6
doc0_inferred = model.infer_vector(list(DocsLeeCorpus())[0].words)
sims_to_infer = model.docvecs.most_similar([doc0_inferred], topn=len(model.docvecs))
f_rank = [docid for (docid, sim) in sims_to_infer].index(fire1)
self.assertLess(f_rank, 10)
sims = model.docvecs.most_similar(fire1, topn=len(model.docvecs))
f2_rank = [docid for (docid, sim) in sims].index(fire2)
self.assertLess(f2_rank, 30)
doc0_vec = model.docvecs[fire1]
sims2 = model.docvecs.most_similar(positive=[doc0_vec], topn=21)
sims2 = [(id, sim) for (id, sim) in sims2 if (id != fire1)]
sims = sims[:20]
self.assertEqual(list(zip(*sims))[0], list(zip(*sims2))[0])
self.assertTrue(np.allclose(list(zip(*sims))[1], list(zip(*sims2))[1]))
clip_sims = model.docvecs.most_similar(fire1, clip_start=(len(model.docvecs) // 2), clip_end=((len(model.docvecs) * 2) // 3))
sims_doc_id = [docid for (docid, sim) in clip_sims]
for s_id in sims_doc_id:
self.assertTrue(((len(model.docvecs) // 2) <= s_id <= ((len(model.docvecs) * 2) // 3)))
self.assertEqual(model.docvecs.doesnt_match([fire1, tennis1, fire2]), tennis1)
self.assertTrue((model.docvecs.similarity(fire1, fire2) > model.docvecs.similarity(fire1, tennis1)))
if keep_training:
model.save(testfile())
loaded = doc2vec.Doc2Vec.load(testfile())
loaded.train(sentences, total_examples=loaded.corpus_count, epochs=loaded.iter)
|
'Test doc2vec training.'
| def test_training(self):
| corpus = DocsLeeCorpus()
model = doc2vec.Doc2Vec(size=100, min_count=2, iter=20, workers=1)
model.build_vocab(corpus)
self.assertEqual(model.docvecs.doctag_syn0.shape, (300, 100))
model.train(corpus, total_examples=model.corpus_count, epochs=model.iter)
self.model_sanity(model)
model2 = doc2vec.Doc2Vec(corpus, size=100, min_count=2, iter=20, workers=1)
self.models_equal(model, model2)
|
'Test DBOW doc2vec training.'
| def test_dbow_hs(self):
| model = doc2vec.Doc2Vec(list_corpus, dm=0, hs=1, negative=0, min_count=2, iter=20)
self.model_sanity(model)
|
'Test DM/mean doc2vec training.'
| def test_dmm_hs(self):
| model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=1, size=24, window=4, hs=1, negative=0, alpha=0.05, min_count=2, iter=20)
self.model_sanity(model)
|
'Test DM/sum doc2vec training.'
| def test_dms_hs(self):
| model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=0, size=24, window=4, hs=1, negative=0, alpha=0.05, min_count=2, iter=20)
self.model_sanity(model)
|
'Test DM/concatenate doc2vec training.'
| def test_dmc_hs(self):
| model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_concat=1, size=24, window=4, hs=1, negative=0, alpha=0.05, min_count=2, iter=20)
self.model_sanity(model)
|
'Test DBOW doc2vec training.'
| def test_dbow_neg(self):
| model = doc2vec.Doc2Vec(list_corpus, dm=0, hs=0, negative=10, min_count=2, iter=20)
self.model_sanity(model)
|
'Test DM/mean doc2vec training.'
| def test_dmm_neg(self):
| model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=1, size=24, window=4, hs=0, negative=10, alpha=0.05, min_count=2, iter=20)
self.model_sanity(model)
|
'Test DM/sum doc2vec training.'
| def test_dms_neg(self):
| model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=0, size=24, window=4, hs=0, negative=10, alpha=0.05, min_count=2, iter=20)
self.model_sanity(model)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.