desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Test DM/concatenate doc2vec training.'
def test_dmc_neg(self):
model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_concat=1, size=24, window=4, hs=0, negative=10, alpha=0.05, min_count=2, iter=20) self.model_sanity(model)
'Test doc2vec parallel training.'
def test_parallel(self):
if (doc2vec.FAST_VERSION < 0): return corpus = utils.RepeatCorpus(DocsLeeCorpus(), 10000) for workers in [2, 4]: model = doc2vec.Doc2Vec(corpus, workers=workers) self.model_sanity(model)
'Test doc2vec results identical with identical RNG seed.'
def test_deterministic_hs(self):
model = doc2vec.Doc2Vec(DocsLeeCorpus(), seed=42, workers=1) model2 = doc2vec.Doc2Vec(DocsLeeCorpus(), seed=42, workers=1) self.models_equal(model, model2)
'Test doc2vec results identical with identical RNG seed.'
def test_deterministic_neg(self):
model = doc2vec.Doc2Vec(DocsLeeCorpus(), hs=0, negative=3, seed=42, workers=1) model2 = doc2vec.Doc2Vec(DocsLeeCorpus(), hs=0, negative=3, seed=42, workers=1) self.models_equal(model, model2)
'Test doc2vec results identical with identical RNG seed.'
def test_deterministic_dmc(self):
model = doc2vec.Doc2Vec(DocsLeeCorpus(), dm=1, dm_concat=1, size=24, window=4, hs=1, negative=3, seed=42, workers=1) model2 = doc2vec.Doc2Vec(DocsLeeCorpus(), dm=1, dm_concat=1, size=24, window=4, hs=1, negative=3, seed=42, workers=1) self.models_equal(model, model2)
'Ensure alternating int/string tags don\'t share indexes in doctag_syn0'
def test_mixed_tag_types(self):
mixed_tag_corpus = [doc2vec.TaggedDocument(words, [i, words[0]]) for (i, words) in enumerate(raw_sentences)] model = doc2vec.Doc2Vec() model.build_vocab(mixed_tag_corpus) expected_length = (len(sentences) + len(model.docvecs.doctags)) self.assertEqual(len(model.docvecs.doctag_syn0), expected_length)
'Test doc2vec model after delete_temporary_training_data'
def test_delete_temporary_training_data(self):
for i in [0, 1]: for j in [0, 1]: model = doc2vec.Doc2Vec(sentences, size=5, min_count=1, window=4, hs=i, negative=j) if i: self.assertTrue(hasattr(model, 'syn1')) if j: self.assertTrue(hasattr(model, 'syn1neg')) self.assertTrue(hasattr(model, 'syn0_lockf')) model.delete_temporary_training_data(keep_doctags_vectors=False, keep_inference=False) self.assertTrue(len(model['human']), 10) self.assertTrue(model.wv.vocab['graph'].count, 5) self.assertTrue((not hasattr(model, 'syn1'))) self.assertTrue((not hasattr(model, 'syn1neg'))) self.assertTrue((not hasattr(model, 'syn0_lockf'))) self.assertTrue((model.docvecs and (not hasattr(model.docvecs, 'doctag_syn0')))) self.assertTrue((model.docvecs and (not hasattr(model.docvecs, 'doctag_syn0_lockf')))) model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=1, size=24, window=4, hs=1, negative=0, alpha=0.05, min_count=2, iter=20) model.delete_temporary_training_data(keep_doctags_vectors=True, keep_inference=True) self.assertTrue((model.docvecs and hasattr(model.docvecs, 'doctag_syn0'))) self.assertTrue(hasattr(model, 'syn1')) self.model_sanity(model, keep_training=False) model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=1, size=24, window=4, hs=0, negative=1, alpha=0.05, min_count=2, iter=20) model.delete_temporary_training_data(keep_doctags_vectors=True, keep_inference=True) self.model_sanity(model, keep_training=False) self.assertTrue(hasattr(model, 'syn1neg'))
'Test if logger warning is raised on non-ideal input to a doc2vec model'
@log_capture() def testBuildVocabWarning(self, l):
raw_sentences = ['human', 'machine'] sentences = [doc2vec.TaggedDocument(words, [i]) for (i, words) in enumerate(raw_sentences)] model = doc2vec.Doc2Vec() model.build_vocab(sentences) warning = "Each 'words' should be a list of words (usually unicode strings)." self.assertTrue((warning in str(l)))
'Test if warning is raised if alpha rises during subsequent calls to train()'
@log_capture() def testTrainWarning(self, l):
raw_sentences = [['human'], ['graph', 'trees']] sentences = [doc2vec.TaggedDocument(words, [i]) for (i, words) in enumerate(raw_sentences)] model = doc2vec.Doc2Vec(alpha=0.025, min_alpha=0.025, min_count=1, workers=8, size=5) model.build_vocab(sentences) for epoch in range(10): model.train(sentences, total_examples=model.corpus_count, epochs=model.iter) model.alpha -= 0.002 model.min_alpha = model.alpha if (epoch == 5): model.alpha += 0.05 warning = "Effective 'alpha' higher than previous training cycles" self.assertTrue((warning in str(l)))
'Test if exception is raised when loading doc2vec model on instance'
def testLoadOnClassError(self):
self.assertRaises(AttributeError, load_on_instance)
'Test arithmetic_mean()'
def testArithmeticMean(self):
obtained = aggregation.arithmetic_mean(self.confirmed_measures) expected = 2.75 self.assertEqual(obtained, expected)
'Test that empty inputs don\'t throw errors and return the expected result.'
def testEmptyInputsOnBigramConstruction(self):
self.assertEqual(list(self.bigram_default[[]]), []) self.assertEqual(list(self.bigram_default[iter(())]), []) self.assertEqual(list(self.bigram_default[[[], []]]), [[], []]) self.assertEqual(list(self.bigram_default[iter([[], []])]), [[], []]) self.assertEqual(list(self.bigram_default[(iter(()) for i in range(2))]), [[], []])
'Test basic bigram using a dummy corpus.'
def testSentenceGeneration(self):
self.assertEqual(len(sentences), len(list(self.bigram_default[sentences])))
'Test basic bigram production when corpus is a generator.'
def testSentenceGenerationWithGenerator(self):
self.assertEqual(len(list(gen_sentences())), len(list(self.bigram_default[gen_sentences()])))
'Test Phrases bigram construction building.'
def testBigramConstruction(self):
bigram1_seen = False bigram2_seen = False for s in self.bigram[sentences]: if ((not bigram1_seen) and (u'response_time' in s)): bigram1_seen = True if ((not bigram2_seen) and (u'graph_minors' in s)): bigram2_seen = True if (bigram1_seen and bigram2_seen): break self.assertTrue((bigram1_seen and bigram2_seen)) self.assertTrue((u'response_time' in self.bigram[sentences[1]])) self.assertTrue((u'response_time' in self.bigram[sentences[4]])) self.assertTrue((u'graph_minors' in self.bigram[sentences[(-2)]])) self.assertTrue((u'graph_minors' in self.bigram[sentences[(-1)]])) self.assertTrue((u'human_interface' in self.bigram[sentences[(-1)]]))
'Test Phrases bigram construction building when corpus is a generator'
def testBigramConstructionFromGenerator(self):
bigram1_seen = False bigram2_seen = False for s in self.bigram[gen_sentences()]: if ((not bigram1_seen) and ('response_time' in s)): bigram1_seen = True if ((not bigram2_seen) and ('graph_minors' in s)): bigram2_seen = True if (bigram1_seen and bigram2_seen): break self.assertTrue((bigram1_seen and bigram2_seen))
'Test that both utf8 and unicode input work; output must be unicode.'
def testEncoding(self):
expected = [u'survey', u'user', u'computer', u'system', u'response_time'] self.assertEqual(self.bigram_utf8[sentences[1]], expected) self.assertEqual(self.bigram_unicode[sentences[1]], expected) transformed = ' '.join(self.bigram_utf8[sentences[1]]) self.assertTrue(isinstance(transformed, unicode))
'Test Phrases bigram export_phrases functionality.'
def testExportPhrases(self):
bigram = Phrases(sentences, min_count=1, threshold=1) seen_bigrams = set() for (phrase, score) in bigram.export_phrases(sentences): seen_bigrams.add(phrase) assert (seen_bigrams == set(['response time', 'graph minors', 'human interface']))
'a single entry should produce multiple bigrams.'
def testMultipleBigramsSingleEntry(self):
bigram = Phrases(sentences, min_count=1, threshold=1) seen_bigrams = set() test_sentences = [['graph', 'minors', 'survey', 'human', 'interface']] for (phrase, score) in bigram.export_phrases(test_sentences): seen_bigrams.add(phrase) assert (seen_bigrams == set(['graph minors', 'human interface']))
'test the default scoring, from the mikolov word2vec paper'
def testScoringDefault(self):
bigram = Phrases(sentences, min_count=1, threshold=1) seen_scores = set() test_sentences = [['graph', 'minors', 'survey', 'human', 'interface']] for (phrase, score) in bigram.export_phrases(test_sentences): seen_scores.add(round(score, 3)) assert (seen_scores == set([5.167, 3.444]))
'test normalized pointwise mutual information scoring'
def testScoringNpmi(self):
bigram = Phrases(sentences, min_count=1, threshold=0.5, scoring='npmi') seen_scores = set() test_sentences = [['graph', 'minors', 'survey', 'human', 'interface']] for (phrase, score) in bigram.export_phrases(test_sentences): seen_scores.add(round(score, 3)) assert (seen_scores == set([0.882, 0.714]))
'Test the phrases module with bad parameters.'
def testBadParameters(self):
self.assertRaises(ValueError, Phrases, sentences, min_count=0) self.assertRaises(ValueError, Phrases, sentences, threshold=(-1))
'Test that max_vocab_size parameter is respected.'
def testPruning(self):
bigram = Phrases(sentences, max_vocab_size=5) self.assertTrue((len(bigram.vocab) <= 5))
'Set up Phraser models for the tests.'
def setUp(self):
bigram_phrases = Phrases(sentences, min_count=1, threshold=1) self.bigram = Phraser(bigram_phrases) bigram_default_phrases = Phrases(sentences) self.bigram_default = Phraser(bigram_default_phrases) bigram_utf8_phrases = Phrases(sentences, min_count=1, threshold=1) self.bigram_utf8 = Phraser(bigram_utf8_phrases) bigram_unicode_phrases = Phrases(unicode_sentences, min_count=1, threshold=1) self.bigram_unicode = Phraser(bigram_unicode_phrases)
'Check provided topic coherence algorithm on given topics'
def check_coherence_measure(self, coherence):
if (coherence in boolean_document_based): kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence=coherence) else: kwargs = dict(texts=self.texts, dictionary=self.dictionary, coherence=coherence) cm1 = CoherenceModel(topics=self.topics1, **kwargs) cm2 = CoherenceModel(topics=self.topics2, **kwargs) self.assertGreater(cm1.get_coherence(), cm2.get_coherence())
'Test U_Mass topic coherence algorithm on given topics'
def testUMass(self):
self.check_coherence_measure('u_mass')
'Test C_v topic coherence algorithm on given topics'
def testCv(self):
self.check_coherence_measure('c_v')
'Test C_uci topic coherence algorithm on given topics'
def testCuci(self):
self.check_coherence_measure('c_uci')
'Test C_npmi topic coherence algorithm on given topics'
def testCnpmi(self):
self.check_coherence_measure('c_npmi')
'Perform sanity check to see if u_mass coherence works with LDA Model'
def testUMassLdaModel(self):
CoherenceModel(model=self.ldamodel, corpus=self.corpus, coherence='u_mass')
'Perform sanity check to see if c_v coherence works with LDA Model'
def testCvLdaModel(self):
CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_v')
'Perform sanity check to see if c_uci coherence works with LDA Model'
def testCuciLdaModel(self):
CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_uci')
'Perform sanity check to see if c_npmi coherence works with LDA Model'
def testCnpmiLdaModel(self):
CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_npmi')
'Perform sanity check to see if u_mass coherence works with LDA Mallet gensim wrapper'
def testUMassMalletModel(self):
if (not self.mallet_path): return CoherenceModel(model=self.malletmodel, corpus=self.corpus, coherence='u_mass')
'Perform sanity check to see if c_v coherence works with LDA Mallet gensim wrapper'
def testCvMalletModel(self):
if (not self.mallet_path): return CoherenceModel(model=self.malletmodel, texts=self.texts, coherence='c_v')
'Perform sanity check to see if c_uci coherence works with LDA Mallet gensim wrapper'
def testCuciMalletModel(self):
if (not self.mallet_path): return CoherenceModel(model=self.malletmodel, texts=self.texts, coherence='c_uci')
'Perform sanity check to see if c_npmi coherence works with LDA Mallet gensim wrapper'
def testCnpmiMalletModel(self):
if (not self.mallet_path): return CoherenceModel(model=self.malletmodel, texts=self.texts, coherence='c_npmi')
'Perform sanity check to see if u_mass coherence works with LDA VW gensim wrapper'
def testUMassVWModel(self):
if (not self.vw_path): return CoherenceModel(model=self.vwmodel, corpus=self.corpus, coherence='u_mass')
'Perform sanity check to see if c_v coherence works with LDA VW gensim wrapper'
def testCvVWModel(self):
if (not self.vw_path): return CoherenceModel(model=self.vwmodel, texts=self.texts, coherence='c_v')
'Perform sanity check to see if c_uci coherence works with LDA VW gensim wrapper'
def testCuciVWModel(self):
if (not self.vw_path): return CoherenceModel(model=self.vwmodel, texts=self.texts, coherence='c_uci')
'Perform sanity check to see if c_npmi coherence works with LDA VW gensim wrapper'
def testCnpmiVWModel(self):
if (not self.vw_path): return CoherenceModel(model=self.vwmodel, texts=self.texts, coherence='c_npmi')
'Test if errors are raised on bad input'
def testErrors(self):
self.assertRaises(ValueError, CoherenceModel, topics=self.topics1, corpus=self.corpus, coherence='u_mass') self.assertRaises(ValueError, CoherenceModel, topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='c_v') self.assertRaises(ValueError, CoherenceModel, topics=self.topics1, dictionary=self.dictionary, coherence='u_mass')
'`Dictionary` can be saved as textfile.'
def test_saveAsText(self):
tmpf = get_tmpfile('save_dict_test.txt') small_text = [['prv\xc3\xa9', 'slovo'], ['slovo', 'druh\xc3\xa9'], ['druh\xc3\xa9', 'slovo']] d = Dictionary(small_text) d.save_as_text(tmpf) with codecs.open(tmpf, 'r', encoding='utf-8') as file: serialized_lines = file.readlines() self.assertEqual(serialized_lines[0], u'3\n') self.assertEqual(len(serialized_lines), 4) self.assertEqual(serialized_lines[1][1:], u' DCTB druh\xe9 DCTB 2\n') self.assertEqual(serialized_lines[2][1:], u' DCTB prv\xe9 DCTB 1\n') self.assertEqual(serialized_lines[3][1:], u' DCTB slovo DCTB 3\n') d.save_as_text(tmpf, sort_by_word=False) with codecs.open(tmpf, 'r', encoding='utf-8') as file: serialized_lines = file.readlines() self.assertEqual(serialized_lines[0], u'3\n') self.assertEqual(len(serialized_lines), 4) self.assertEqual(serialized_lines[1][1:], u' DCTB slovo DCTB 3\n') self.assertEqual(serialized_lines[2][1:], u' DCTB druh\xe9 DCTB 2\n') self.assertEqual(serialized_lines[3][1:], u' DCTB prv\xe9 DCTB 1\n')
'`Dictionary` can be loaded from textfile in legacy format. Legacy format does not have num_docs on the first line.'
def test_loadFromText_legacy(self):
tmpf = get_tmpfile('load_dict_test_legacy.txt') no_num_docs_serialization = to_utf8('1 DCTB prv\xc3\xa9 DCTB 1\n2 DCTB slovo DCTB 2\n') with open(tmpf, 'wb') as file: file.write(no_num_docs_serialization) d = Dictionary.load_from_text(tmpf) self.assertEqual(d.token2id[u'prv\xe9'], 1) self.assertEqual(d.token2id[u'slovo'], 2) self.assertEqual(d.dfs[1], 1) self.assertEqual(d.dfs[2], 2) self.assertEqual(d.num_docs, 0)
'`Dictionary` can be loaded from textfile.'
def test_loadFromText(self):
tmpf = get_tmpfile('load_dict_test.txt') no_num_docs_serialization = to_utf8('2\n1 DCTB prv\xc3\xa9 DCTB 1\n2 DCTB slovo DCTB 2\n') with open(tmpf, 'wb') as file: file.write(no_num_docs_serialization) d = Dictionary.load_from_text(tmpf) self.assertEqual(d.token2id[u'prv\xe9'], 1) self.assertEqual(d.token2id[u'slovo'], 2) self.assertEqual(d.dfs[1], 1) self.assertEqual(d.dfs[2], 2) self.assertEqual(d.num_docs, 2)
'`Dictionary` can be saved as textfile and loaded again from textfile.'
def test_saveAsText_and_loadFromText(self):
tmpf = get_tmpfile('dict_test.txt') for sort_by_word in [True, False]: d = Dictionary(self.texts) d.save_as_text(tmpf, sort_by_word=sort_by_word) self.assertTrue(os.path.exists(tmpf)) d_loaded = Dictionary.load_from_text(tmpf) self.assertNotEqual(d_loaded, None) self.assertEqual(d_loaded.token2id, d.token2id)
'build `Dictionary` from an existing corpus'
def test_from_corpus(self):
documents = ['Human machine interface for lab abc computer applications', 'A survey of user opinion of computer system response time', 'The EPS user interface management system', 'System and human system engineering testing of EPS', 'Relation of user perceived response time to error measurement', 'The generation of random binary unordered trees', 'The intersection graph of paths in trees', 'Graph minors IV Widths of trees and well quasi ordering', 'Graph minors A survey'] stoplist = set('for a of the and to in'.split()) texts = [[word for word in document.lower().split() if (word not in stoplist)] for document in documents] all_tokens = sum(texts, []) tokens_once = set((word for word in set(all_tokens) if (all_tokens.count(word) == 1))) texts = [[word for word in text if (word not in tokens_once)] for text in texts] dictionary = Dictionary(texts) corpus = [dictionary.doc2bow(text) for text in texts] dictionary_from_corpus = Dictionary.from_corpus(corpus) dict_token2id_vals = sorted(dictionary.token2id.values()) dict_from_corpus_vals = sorted(dictionary_from_corpus.token2id.values()) self.assertEqual(dict_token2id_vals, dict_from_corpus_vals) self.assertEqual(dictionary.dfs, dictionary_from_corpus.dfs) self.assertEqual(dictionary.num_docs, dictionary_from_corpus.num_docs) self.assertEqual(dictionary.num_pos, dictionary_from_corpus.num_pos) self.assertEqual(dictionary.num_nnz, dictionary_from_corpus.num_nnz) dictionary_from_corpus_2 = Dictionary.from_corpus(corpus, id2word=dictionary) self.assertEqual(dictionary.token2id, dictionary_from_corpus_2.token2id) self.assertEqual(dictionary.dfs, dictionary_from_corpus_2.dfs) self.assertEqual(dictionary.num_docs, dictionary_from_corpus_2.num_docs) self.assertEqual(dictionary.num_pos, dictionary_from_corpus_2.num_pos) self.assertEqual(dictionary.num_nnz, dictionary_from_corpus_2.num_nnz) bow = gensim.matutils.Sparse2Corpus(scipy.sparse.rand(10, 100)) dictionary = Dictionary.from_corpus(bow) self.assertEqual(dictionary.num_docs, 100)
'Test Python 2 dict-like interface in both Python 2 and 3.'
def test_dict_interface(self):
d = Dictionary(self.texts) self.assertTrue(isinstance(d, Mapping)) self.assertEqual(list(zip(d.keys(), d.values())), list(d.items())) self.assertEqual(list(d.items()), list(d.iteritems())) self.assertEqual(list(d.keys()), list(d.iterkeys())) self.assertEqual(list(d.values()), list(d.itervalues())) if (not PY3): self.assertTrue(isinstance(d.items(), list)) self.assertTrue(isinstance(d.keys(), list)) self.assertTrue(isinstance(d.values(), list))
'Test s_one_pre segmentation.'
def testSOnePre(self):
actual = segmentation.s_one_pre(self.topics) expected = [[(4, 9), (6, 9), (6, 4)], [(10, 9), (7, 9), (7, 10)], [(2, 5), (7, 5), (7, 2)]] self.assertTrue(np.allclose(actual, expected))
'Test s_one_one segmentation.'
def testSOneOne(self):
actual = segmentation.s_one_one(self.topics) expected = [[(9, 4), (9, 6), (4, 9), (4, 6), (6, 9), (6, 4)], [(9, 10), (9, 7), (10, 9), (10, 7), (7, 9), (7, 10)], [(5, 2), (5, 7), (2, 5), (2, 7), (7, 5), (7, 2)]] self.assertTrue(np.allclose(actual, expected))
'Test s_one_set segmentation.'
def testSOneSet(self):
actual = segmentation.s_one_set(self.topics) expected = [[(9, array([9, 4, 6])), (4, array([9, 4, 6])), (6, array([9, 4, 6]))], [(9, array([9, 10, 7])), (10, array([9, 10, 7])), (7, array([9, 10, 7]))], [(5, array([5, 2, 7])), (2, array([5, 2, 7])), (7, array([5, 2, 7]))]] for s_i in range(len(actual)): for j in range(len(actual[s_i])): self.assertEqual(actual[s_i][j][0], expected[s_i][j][0]) self.assertTrue(np.allclose(actual[s_i][j][1], expected[s_i][j][1]))
'The main part of the stemming algorithm starts here. b is a buffer holding a word to be stemmed. The letters are in b[0], b[1] ... ending at b[k]. k is readjusted downwards as the stemming progresses. Note that only lower case sequences are stemmed. Forcing to lower case should be done before stem(...) is called.'
def __init__(self):
self.b = '' self.k = 0 self.j = 0
'True <=> b[i] is a consonant.'
def _cons(self, i):
ch = self.b[i] if (ch in 'aeiou'): return False if (ch == 'y'): return ((i == 0) or (not self._cons((i - 1)))) return True
'Returns the number of consonant sequences between 0 and j. If c is a consonant sequence and v a vowel sequence, and <..> indicates arbitrary presence, <c><v> gives 0 <c>vc<v> gives 1 <c>vcvc<v> gives 2 <c>vcvcvc<v> gives 3'
def _m(self):
i = 0 while True: if (i > self.j): return 0 if (not self._cons(i)): break i += 1 i += 1 n = 0 while True: while True: if (i > self.j): return n if self._cons(i): break i += 1 i += 1 n += 1 while 1: if (i > self.j): return n if (not self._cons(i)): break i += 1 i += 1
'True <=> 0,...j contains a vowel'
def _vowelinstem(self):
return (not all((self._cons(i) for i in xrange((self.j + 1)))))
'True <=> j,(j-1) contain a double consonant.'
def _doublec(self, j):
return ((j > 0) and (self.b[j] == self.b[(j - 1)]) and self._cons(j))
'True <=> i-2,i-1,i has the form consonant - vowel - consonant and also if the second c is not w,x or y. This is used when trying to restore an e at the end of a short word, e.g. cav(e), lov(e), hop(e), crim(e), but snow, box, tray.'
def _cvc(self, i):
if ((i < 2) or (not self._cons(i)) or self._cons((i - 1)) or (not self._cons((i - 2)))): return False return (self.b[i] not in 'wxy')
'True <=> 0,...k ends with the string s.'
def _ends(self, s):
if (s[(-1)] != self.b[self.k]): return 0 length = len(s) if (length > (self.k + 1)): return 0 if (self.b[((self.k - length) + 1):(self.k + 1)] != s): return 0 self.j = (self.k - length) return 1
'Set (j+1),...k to the characters in the string s, adjusting k.'
def _setto(self, s):
self.b = (self.b[:(self.j + 1)] + s) self.k = (len(self.b) - 1)
'Get rid of plurals and -ed or -ing. E.g., caresses -> caress ponies -> poni ties -> ti caress -> caress cats -> cat feed -> feed agreed -> agree disabled -> disable matting -> mat mating -> mate meeting -> meet milling -> mill messing -> mess meetings -> meet'
def _step1ab(self):
if (self.b[self.k] == 's'): if self._ends('sses'): self.k -= 2 elif self._ends('ies'): self._setto('i') elif (self.b[(self.k - 1)] != 's'): self.k -= 1 if self._ends('eed'): if (self._m() > 0): self.k -= 1 elif ((self._ends('ed') or self._ends('ing')) and self._vowelinstem()): self.k = self.j if self._ends('at'): self._setto('ate') elif self._ends('bl'): self._setto('ble') elif self._ends('iz'): self._setto('ize') elif self._doublec(self.k): if (self.b[(self.k - 1)] not in 'lsz'): self.k -= 1 elif ((self._m() == 1) and self._cvc(self.k)): self._setto('e')
'Turn terminal y to i when there is another vowel in the stem.'
def _step1c(self):
if (self._ends('y') and self._vowelinstem()): self.b = (self.b[:self.k] + 'i')
'Map double suffices to single ones. So, -ization ( = -ize plus -ation) maps to -ize etc. Note that the string before the suffix must give _m() > 0.'
def _step2(self):
ch = self.b[(self.k - 1)] if (ch == 'a'): if self._ends('ational'): self._r('ate') elif self._ends('tional'): self._r('tion') elif (ch == 'c'): if self._ends('enci'): self._r('ence') elif self._ends('anci'): self._r('ance') elif (ch == 'e'): if self._ends('izer'): self._r('ize') elif (ch == 'l'): if self._ends('bli'): self._r('ble') elif self._ends('alli'): self._r('al') elif self._ends('entli'): self._r('ent') elif self._ends('eli'): self._r('e') elif self._ends('ousli'): self._r('ous') elif (ch == 'o'): if self._ends('ization'): self._r('ize') elif self._ends('ation'): self._r('ate') elif self._ends('ator'): self._r('ate') elif (ch == 's'): if self._ends('alism'): self._r('al') elif self._ends('iveness'): self._r('ive') elif self._ends('fulness'): self._r('ful') elif self._ends('ousness'): self._r('ous') elif (ch == 't'): if self._ends('aliti'): self._r('al') elif self._ends('iviti'): self._r('ive') elif self._ends('biliti'): self._r('ble') elif (ch == 'g'): if self._ends('logi'): self._r('log')
'Deal with -ic-, -full, -ness etc. Similar strategy to _step2.'
def _step3(self):
ch = self.b[self.k] if (ch == 'e'): if self._ends('icate'): self._r('ic') elif self._ends('ative'): self._r('') elif self._ends('alize'): self._r('al') elif (ch == 'i'): if self._ends('iciti'): self._r('ic') elif (ch == 'l'): if self._ends('ical'): self._r('ic') elif self._ends('ful'): self._r('') elif (ch == 's'): if self._ends('ness'): self._r('')
'_step4() takes off -ant, -ence etc., in context <c>vcvc<v>.'
def _step4(self):
ch = self.b[(self.k - 1)] if (ch == 'a'): if (not self._ends('al')): return elif (ch == 'c'): if ((not self._ends('ance')) and (not self._ends('ence'))): return elif (ch == 'e'): if (not self._ends('er')): return elif (ch == 'i'): if (not self._ends('ic')): return elif (ch == 'l'): if ((not self._ends('able')) and (not self._ends('ible'))): return elif (ch == 'n'): if self._ends('ant'): pass elif self._ends('ement'): pass elif self._ends('ment'): pass elif self._ends('ent'): pass else: return elif (ch == 'o'): if (self._ends('ion') and (self.b[self.j] in 'st')): pass elif self._ends('ou'): pass else: return elif (ch == 's'): if (not self._ends('ism')): return elif (ch == 't'): if ((not self._ends('ate')) and (not self._ends('iti'))): return elif (ch == 'u'): if (not self._ends('ous')): return elif (ch == 'v'): if (not self._ends('ive')): return elif (ch == 'z'): if (not self._ends('ize')): return else: return if (self._m() > 1): self.k = self.j
'Remove a final -e if _m() > 1, and change -ll to -l if m() > 1.'
def _step5(self):
k = self.j = self.k if (self.b[k] == 'e'): a = self._m() if ((a > 1) or ((a == 1) and (not self._cvc((k - 1))))): self.k -= 1 if ((self.b[self.k] == 'l') and self._doublec(self.k) and (self._m() > 1)): self.k -= 1
'Stem the word w, return the stemmed form.'
def stem(self, w):
w = w.lower() k = (len(w) - 1) if (k <= 1): return w self.b = w self.k = k self._step1ab() self._step1c() self._step2() self._step3() self._step4() self._step5() return self.b[:(self.k + 1)]
'Sklearn wrapper for RP model. See gensim.models.RpModel for parameter details.'
def __init__(self, id2word=None, num_topics=300):
self.gensim_model = None self.id2word = id2word self.num_topics = num_topics
'Fit the model according to the given training data. Calls gensim.models.RpModel'
def fit(self, X, y=None):
self.gensim_model = models.RpModel(corpus=X, id2word=self.id2word, num_topics=self.num_topics) return self
'Take documents/corpus as input. Return RP representation of the input documents/corpus. The input `docs` can correspond to multiple documents like : [ [(0, 1.0), (1, 1.0), (2, 1.0)], [(0, 1.0), (3, 1.0), (4, 1.0), (5, 1.0), (6, 1.0), (7, 1.0)] ] or a single document like : [(0, 1.0), (1, 1.0), (2, 1.0)]'
def transform(self, docs):
if (self.gensim_model is None): raise NotFittedError("This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method.") check = (lambda x: ([x] if isinstance(x[0], tuple) else x)) docs = check(docs) X = [[] for _ in range(0, len(docs))] for (k, v) in enumerate(docs): transformed_doc = self.gensim_model[v] probs_docs = matutils.sparse2full(transformed_doc, self.num_topics) X[k] = probs_docs return np.reshape(np.array(X), (len(docs), self.num_topics))
'Sklearn wrapper for LdaSeq model. See gensim.models.LdaSeqModel for parameter details.'
def __init__(self, time_slice=None, id2word=None, alphas=0.01, num_topics=10, initialize='gensim', sstats=None, lda_model=None, obs_variance=0.5, chain_variance=0.005, passes=10, random_state=None, lda_inference_max_iter=25, em_min_iter=6, em_max_iter=20, chunksize=100):
self.gensim_model = None self.time_slice = time_slice self.id2word = id2word self.alphas = alphas self.num_topics = num_topics self.initialize = initialize self.sstats = sstats self.lda_model = lda_model self.obs_variance = obs_variance self.chain_variance = chain_variance self.passes = passes self.random_state = random_state self.lda_inference_max_iter = lda_inference_max_iter self.em_min_iter = em_min_iter self.em_max_iter = em_max_iter self.chunksize = chunksize
'Fit the model according to the given training data. Calls gensim.models.LdaSeqModel'
def fit(self, X, y=None):
self.gensim_model = models.LdaSeqModel(corpus=X, time_slice=self.time_slice, id2word=self.id2word, alphas=self.alphas, num_topics=self.num_topics, initialize=self.initialize, sstats=self.sstats, lda_model=self.lda_model, obs_variance=self.obs_variance, chain_variance=self.chain_variance, passes=self.passes, random_state=self.random_state, lda_inference_max_iter=self.lda_inference_max_iter, em_min_iter=self.em_min_iter, em_max_iter=self.em_max_iter, chunksize=self.chunksize) return self
'Return the topic proportions for the documents passed. The input `docs` should be in BOW format and can be a list of documents like : [ [(4, 1), (7, 1)], [(9, 1), (13, 1)], [(2, 1), (6, 1)] ] or a single document like : [(4, 1), (7, 1)]'
def transform(self, docs):
if (self.gensim_model is None): raise NotFittedError("This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method.") check = (lambda x: ([x] if isinstance(x[0], tuple) else x)) docs = check(docs) X = [[] for _ in range(0, len(docs))] for (k, v) in enumerate(docs): transformed_author = self.gensim_model[v] X[k] = transformed_author return np.reshape(np.array(X), (len(docs), self.num_topics))
'Sklearn wrapper for LSI model. See gensim.model.LsiModel for parameter details.'
def __init__(self, num_topics=200, id2word=None, chunksize=20000, decay=1.0, onepass=True, power_iters=2, extra_samples=100):
self.gensim_model = None self.num_topics = num_topics self.id2word = id2word self.chunksize = chunksize self.decay = decay self.onepass = onepass self.extra_samples = extra_samples self.power_iters = power_iters
'Fit the model according to the given training data. Calls gensim.models.LsiModel'
def fit(self, X, y=None):
if sparse.issparse(X): corpus = matutils.Sparse2Corpus(X) else: corpus = X self.gensim_model = models.LsiModel(corpus=corpus, num_topics=self.num_topics, id2word=self.id2word, chunksize=self.chunksize, decay=self.decay, onepass=self.onepass, power_iters=self.power_iters, extra_samples=self.extra_samples) return self
'Takes a list of documents as input (\'docs\'). Returns a matrix of topic distribution for the given document bow, where a_ij indicates (topic_i, topic_probability_j). The input `docs` should be in BOW format and can be a list of documents like : [ [(4, 1), (7, 1)], [(9, 1), (13, 1)], [(2, 1), (6, 1)] ] or a single document like : [(4, 1), (7, 1)]'
def transform(self, docs):
if (self.gensim_model is None): raise NotFittedError("This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method.") check = (lambda x: ([x] if isinstance(x[0], tuple) else x)) docs = check(docs) X = [[] for i in range(0, len(docs))] for (k, v) in enumerate(docs): doc_topics = self.gensim_model[v] probs_docs = matutils.sparse2full(doc_topics, self.num_topics) X[k] = probs_docs return np.reshape(np.array(X), (len(docs), self.num_topics))
'Train model over X.'
def partial_fit(self, X):
if sparse.issparse(X): X = matutils.Sparse2Corpus(X) if (self.gensim_model is None): self.gensim_model = models.LsiModel(num_topics=self.num_topics, id2word=self.id2word, chunksize=self.chunksize, decay=self.decay, onepass=self.onepass, power_iters=self.power_iters, extra_samples=self.extra_samples) self.gensim_model.add_documents(corpus=X) return self
'Sklearn wrapper for LDA model. See gensim.model.LdaModel for parameter details. `scorer` specifies the metric used in the `score` function. See `gensim.models.LdaModel` class for description of the other parameters.'
def __init__(self, num_topics=100, id2word=None, chunksize=2000, passes=1, update_every=1, alpha='symmetric', eta=None, decay=0.5, offset=1.0, eval_every=10, iterations=50, gamma_threshold=0.001, minimum_probability=0.01, random_state=None, scorer='perplexity'):
self.gensim_model = None self.num_topics = num_topics self.id2word = id2word self.chunksize = chunksize self.passes = passes self.update_every = update_every self.alpha = alpha self.eta = eta self.decay = decay self.offset = offset self.eval_every = eval_every self.iterations = iterations self.gamma_threshold = gamma_threshold self.minimum_probability = minimum_probability self.random_state = random_state self.scorer = scorer
'Fit the model according to the given training data. Calls gensim.models.LdaModel'
def fit(self, X, y=None):
if sparse.issparse(X): corpus = matutils.Sparse2Corpus(X) else: corpus = X self.gensim_model = models.LdaModel(corpus=corpus, num_topics=self.num_topics, id2word=self.id2word, chunksize=self.chunksize, passes=self.passes, update_every=self.update_every, alpha=self.alpha, eta=self.eta, decay=self.decay, offset=self.offset, eval_every=self.eval_every, iterations=self.iterations, gamma_threshold=self.gamma_threshold, minimum_probability=self.minimum_probability, random_state=self.random_state) return self
'Takes a list of documents as input (\'docs\'). Returns a matrix of topic distribution for the given document bow, where a_ij indicates (topic_i, topic_probability_j). The input `docs` should be in BOW format and can be a list of documents like : [ [(4, 1), (7, 1)], [(9, 1), (13, 1)], [(2, 1), (6, 1)] ] or a single document like : [(4, 1), (7, 1)]'
def transform(self, docs):
if (self.gensim_model is None): raise NotFittedError("This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method.") check = (lambda x: ([x] if isinstance(x[0], tuple) else x)) docs = check(docs) X = [[] for _ in range(0, len(docs))] for (k, v) in enumerate(docs): doc_topics = self.gensim_model[v] probs_docs = matutils.sparse2full(doc_topics, self.num_topics) X[k] = probs_docs return np.reshape(np.array(X), (len(docs), self.num_topics))
'Train model over X. By default, \'online (single-pass)\' mode is used for training the LDA model. Configure `passes` and `update_every` params at init to choose the mode among : - online (single-pass): update_every != None and passes == 1 - online (multi-pass): update_every != None and passes > 1 - batch: update_every == None'
def partial_fit(self, X):
if sparse.issparse(X): X = matutils.Sparse2Corpus(X) if (self.gensim_model is None): self.gensim_model = models.LdaModel(num_topics=self.num_topics, id2word=self.id2word, chunksize=self.chunksize, passes=self.passes, update_every=self.update_every, alpha=self.alpha, eta=self.eta, decay=self.decay, offset=self.offset, eval_every=self.eval_every, iterations=self.iterations, gamma_threshold=self.gamma_threshold, minimum_probability=self.minimum_probability, random_state=self.random_state) self.gensim_model.update(corpus=X) return self
'Compute score reflecting how well the model has fit for the input data.'
def score(self, X, y=None):
if (self.scorer == 'perplexity'): corpus_words = sum((cnt for document in X for (_, cnt) in document)) subsample_ratio = 1.0 perwordbound = (self.gensim_model.bound(X, subsample_ratio=subsample_ratio) / (subsample_ratio * corpus_words)) return ((-1) * np.exp2((- perwordbound))) elif (self.scorer == 'u_mass'): goodcm = models.CoherenceModel(model=self.gensim_model, corpus=X, coherence=self.scorer, topn=3) return goodcm.get_coherence() else: raise ValueError('Invalid value of `scorer` param supplied')
'Sklearn wrapper for AuthorTopic model. See gensim.models.AuthorTopicModel for parameter details.'
def __init__(self, num_topics=100, id2word=None, author2doc=None, doc2author=None, chunksize=2000, passes=1, iterations=50, decay=0.5, offset=1.0, alpha='symmetric', eta='symmetric', update_every=1, eval_every=10, gamma_threshold=0.001, serialized=False, serialization_path=None, minimum_probability=0.01, random_state=None):
self.gensim_model = None self.num_topics = num_topics self.id2word = id2word self.author2doc = author2doc self.doc2author = doc2author self.chunksize = chunksize self.passes = passes self.iterations = iterations self.decay = decay self.offset = offset self.alpha = alpha self.eta = eta self.update_every = update_every self.eval_every = eval_every self.gamma_threshold = gamma_threshold self.serialized = serialized self.serialization_path = serialization_path self.minimum_probability = minimum_probability self.random_state = random_state
'Fit the model according to the given training data. Calls gensim.models.AuthorTopicModel'
def fit(self, X, y=None):
self.gensim_model = models.AuthorTopicModel(corpus=X, num_topics=self.num_topics, id2word=self.id2word, author2doc=self.author2doc, doc2author=self.doc2author, chunksize=self.chunksize, passes=self.passes, iterations=self.iterations, decay=self.decay, offset=self.offset, alpha=self.alpha, eta=self.eta, update_every=self.update_every, eval_every=self.eval_every, gamma_threshold=self.gamma_threshold, serialized=self.serialized, serialization_path=self.serialization_path, minimum_probability=self.minimum_probability, random_state=self.random_state) return self
'Return topic distribution for input authors as a list of (topic_id, topic_probabiity) 2-tuples.'
def transform(self, author_names):
if (self.gensim_model is None): raise NotFittedError("This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method.") check = (lambda x: ([x] if (not isinstance(x, list)) else x)) author_names = check(author_names) X = [[] for _ in range(0, len(author_names))] for (k, v) in enumerate(author_names): transformed_author = self.gensim_model[v] probs_author = matutils.sparse2full(transformed_author, self.num_topics) X[k] = probs_author return np.reshape(np.array(X), (len(author_names), self.num_topics))
'Train model over X.'
def partial_fit(self, X, author2doc=None, doc2author=None):
if (self.gensim_model is None): self.gensim_model = models.AuthorTopicModel(corpus=X, num_topics=self.num_topics, id2word=self.id2word, author2doc=self.author2doc, doc2author=self.doc2author, chunksize=self.chunksize, passes=self.passes, iterations=self.iterations, decay=self.decay, offset=self.offset, alpha=self.alpha, eta=self.eta, update_every=self.update_every, eval_every=self.eval_every, gamma_threshold=self.gamma_threshold, serialized=self.serialized, serialization_path=self.serialization_path, minimum_probability=self.minimum_probability, random_state=self.random_state) self.gensim_model.update(corpus=X, author2doc=author2doc, doc2author=doc2author) return self
'Sklearn wrapper for Word2Vec model. See gensim.models.Word2Vec for parameter details.'
def __init__(self, size=100, alpha=0.025, window=5, min_count=5, max_vocab_size=None, sample=0.001, seed=1, workers=3, min_alpha=0.0001, sg=0, hs=0, negative=5, cbow_mean=1, hashfxn=hash, iter=5, null_word=0, trim_rule=None, sorted_vocab=1, batch_words=10000):
self.gensim_model = None self.size = size self.alpha = alpha self.window = window self.min_count = min_count self.max_vocab_size = max_vocab_size self.sample = sample self.seed = seed self.workers = workers self.min_alpha = min_alpha self.sg = sg self.hs = hs self.negative = negative self.cbow_mean = int(cbow_mean) self.hashfxn = hashfxn self.iter = iter self.null_word = null_word self.trim_rule = trim_rule self.sorted_vocab = sorted_vocab self.batch_words = batch_words
'Fit the model according to the given training data. Calls gensim.models.Word2Vec'
def fit(self, X, y=None):
self.gensim_model = models.Word2Vec(sentences=X, size=self.size, alpha=self.alpha, window=self.window, min_count=self.min_count, max_vocab_size=self.max_vocab_size, sample=self.sample, seed=self.seed, workers=self.workers, min_alpha=self.min_alpha, sg=self.sg, hs=self.hs, negative=self.negative, cbow_mean=self.cbow_mean, hashfxn=self.hashfxn, iter=self.iter, null_word=self.null_word, trim_rule=self.trim_rule, sorted_vocab=self.sorted_vocab, batch_words=self.batch_words) return self
'Return the word-vectors for the input list of words.'
def transform(self, words):
if (self.gensim_model is None): raise NotFittedError("This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method.") check = (lambda x: ([x] if isinstance(x, six.string_types) else x)) words = check(words) X = [[] for _ in range(0, len(words))] for (k, v) in enumerate(words): word_vec = self.gensim_model[v] X[k] = word_vec return np.reshape(np.array(X), (len(words), self.size))
':param kernel_type: Kernel type to use in training. \'linear\' use linear kernel function. \'quadratic\' use quadratic kernel function. \'gaussian\' use gaussian kernel function :param C: Value of regularization parameter C :param gamma: parameter for gaussian kernel or Polynomial kernel'
def __init__(self, kernel_type='linear', C=1.0, gamma=5.0):
self.kernels = {'linear': self.kernel_linear, 'quadratic': self.kernel_quadratic, 'gaussian': self.kernel_gaussian} self.kernel_type = kernel_type self.kernel = self.kernels[self.kernel_type] self.C = C self.gamma = gamma
'compute kernel matrix (gram matrix) give two input matrix'
def compute_kernel_matrix(self, X1, X2):
n1 = X1.shape[0] n2 = X2.shape[0] K = np.zeros((n1, n2)) for i in range(n1): for j in range(n2): K[(i, j)] = self.kernel(X1[i], X2[j]) return K
'training KRR :param X: training X :param y: training y :return: alpha vector, see document TODO'
def fit(self, X, y):
K = self.compute_kernel_matrix(X, X) self.alphas = sp.dot(inv((K + (self.C * np.eye(np.shape(K)[0])))), y.transpose()) return self.alphas
':param x_train: DxNtr array of Ntr train data points with D features :param x_test: DxNte array of Nte test data points with D features :return: y_test, D2xNte array'
def predict(self, x_train, x_test):
k = self.compute_kernel_matrix(x_test, x_train) y_test = sp.dot(k, self.alphas) return y_test.transpose()
'Allocate a LeNetConvPoolLayer with shared variable internal parameters. :type rng: numpy.random.RandomState :param rng: a random number generator used to initialize weights :type input: theano.tensor.dtensor4 :param input: symbolic image tensor, of shape image_shape :type filter_shape: tuple or list of length 4 :param filter_shape: (number of filters, num input feature maps, filter height, filter width) :type image_shape: tuple or list of length 4 :param image_shape: (batch size, num input feature maps, image height, image width) :type poolsize: tuple or list of length 2 :param poolsize: the downsampling (pooling) factor (#rows, #cols)'
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
assert (image_shape[1] == filter_shape[1]) self.input = input fan_in = numpy.prod(filter_shape[1:]) fan_out = ((filter_shape[0] * numpy.prod(filter_shape[2:])) / numpy.prod(poolsize)) W_bound = numpy.sqrt((6.0 / (fan_in + fan_out))) self.W = theano.shared(numpy.asarray(rng.uniform(low=(- W_bound), high=W_bound, size=filter_shape), dtype=theano.config.floatX), borrow=True) b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX) self.b = theano.shared(value=b_values, borrow=True) conv_out = conv.conv2d(input=input, filters=self.W, filter_shape=filter_shape, image_shape=image_shape) pooled_out = downsample.max_pool_2d(input=conv_out, ds=poolsize, ignore_border=True) self.output = T.tanh((pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))) self.params = [self.W, self.b]
'Typical hidden layer of a MLP: units are fully-connected and have sigmoidal activation function. Weight matrix W is of shape (n_in,n_out) and the bias vector b is of shape (n_out,). NOTE : The nonlinearity used here is tanh Hidden unit activation is given by: tanh(dot(input,W) + b) :type rng: numpy.random.RandomState :param rng: a random number generator used to initialize weights :type input: theano.tensor.dmatrix :param input: a symbolic tensor of shape (n_examples, n_in) :type n_in: int :param n_in: dimensionality of input :type n_out: int :param n_out: number of hidden units :type activation: theano.Op or function :param activation: Non linearity to be applied in the hidden layer'
def __init__(self, rng, input, n_in, n_out, W=None, b=None, activation=T.tanh):
self.input = input if (W is None): W_values = numpy.asarray(rng.uniform(low=(- numpy.sqrt((6.0 / (n_in + n_out)))), high=numpy.sqrt((6.0 / (n_in + n_out))), size=(n_in, n_out)), dtype=theano.config.floatX) if (activation == theano.tensor.nnet.sigmoid): W_values *= 4 W = theano.shared(value=W_values, name='W', borrow=True) if (b is None): b_values = numpy.zeros((n_out,), dtype=theano.config.floatX) b = theano.shared(value=b_values, name='b', borrow=True) self.W = W self.b = b lin_output = (T.dot(input, self.W) + self.b) self.output = (lin_output if (activation is None) else activation(lin_output)) self.params = [self.W, self.b]
'Initialize the parameters for the multilayer perceptron :type rng: numpy.random.RandomState :param rng: a random number generator used to initialize weights :type input: theano.tensor.TensorType :param input: symbolic variable that describes the input of the architecture (one minibatch) :type n_in: int :param n_in: number of input units, the dimension of the space in which the datapoints lie :type n_hidden: int :param n_hidden: number of hidden units :type n_out: int :param n_out: number of output units, the dimension of the space in which the labels lie'
def __init__(self, rng, input, n_in, n_hidden, n_out):
self.hiddenLayer = HiddenLayer(rng=rng, input=input, n_in=n_in, n_out=n_hidden, activation=T.tanh) self.logRegressionLayer = LogisticRegression(input=self.hiddenLayer.output, n_in=n_hidden, n_out=n_out) self.L1 = (abs(self.hiddenLayer.W).sum() + abs(self.logRegressionLayer.W).sum()) self.L2_sqr = ((self.hiddenLayer.W ** 2).sum() + (self.logRegressionLayer.W ** 2).sum()) self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood self.errors = self.logRegressionLayer.errors self.params = (self.hiddenLayer.params + self.logRegressionLayer.params)
':param kernel: kernel types, should be in the kernel function list above :param C:'
def __init__(self, kernel=linear_kernel, C=None):
self.kernel = kernel self.C = C if (self.C is not None): self.C = float(self.C)
':param max_iter: maximum iteration :param kernel_type: Kernel type to use in training. \'linear\' use linear kernel function. \'quadratic\' use quadratic kernel function. \'gaussian\' use gaussian kernel function :param C: Value of regularization parameter C :param epsilon: Convergence value. :param sigma: parameter for gaussian kernel'
def __init__(self, max_iter=10000, kernel_type='linear', C=1.0, epsilon=0.001, sigma=5.0):
self.kernels = {'linear': self.kernel_linear, 'quadratic': self.kernel_quadratic, 'gaussian': self.kernel_gaussian} self.max_iter = max_iter self.kernel_type = kernel_type self.C = C self.epsilon = epsilon self.sigma = sigma
'参数y数据集的标筟'
def _calcEntropy(self, y):
num = y.shape[0] labelCounts = {} for label in y: if (label not in labelCounts.keys()): labelCounts[label] = 0 labelCounts[label] += 1 entropy = 0.0 for key in labelCounts: prob = (float(labelCounts[key]) / num) entropy -= (prob * np.log2(prob)) return entropy
'凜数功胜返回数据集䞭特埁䞋标䞺index特埁倌等于value的子数据集'
def _splitDataSet(self, X, y, index, value):
ret = [] featVec = X[:, index] X = X[:, [i for i in range(X.shape[1]) if (i != index)]] for i in range(len(featVec)): if (featVec[i] == value): ret.append(i) return (X[ret, :], y[ret])
'ID3 参数dataSet数据集最后䞀列䞺label numFeatures特埁䞪数 oldEntropy原始数据集的熵 newEntropy按某䞪特埁分割数据集后的熵 infoGain信息增益 bestInfoGain记圕最倧的信息增益 bestFeatureIndex信息增益最倧时所选择的分割特埁的䞋标'
def _chooseBestFeatureToSplit_ID3(self, X, y):
numFeatures = X.shape[1] oldEntropy = self._calcEntropy(y) bestInfoGain = 0.0 bestFeatureIndex = (-1) for i in range(numFeatures): featList = X[:, i] uniqueVals = set(featList) newEntropy = 0.0 for value in uniqueVals: (sub_X, sub_y) = self._splitDataSet(X, y, i, value) prob = (len(sub_y) / float(len(y))) newEntropy += (prob * self._calcEntropy(sub_y)) infoGain = (oldEntropy - newEntropy) if (infoGain > bestInfoGain): bestInfoGain = infoGain bestFeatureIndex = i return bestFeatureIndex