repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
kashif/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 59 | 35604 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.random import choice
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(choice(vocab_words, size=5, replace=False,
random_state=rng))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = choice(vocab_words, size=5, replace=False, random_state=rng)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
scollis/high_resolution_hydrology | cluster/profile_mpi0/ipython_kernel_config.py | 4 | 15364 | # Configuration file for ipython-kernel.
c = get_config()
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp, ConnectionFileMixin
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.IPKernelApp.exec_PYTHONSTARTUP = True
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.IPKernelApp.ip = u''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = <class 'IPython.kernel.zmq.ipkernel.IPythonKernel'>
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = u''
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
#
# c.IPKernelApp.transport = 'tcp'
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.IPKernelApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The IPython profile to use.
# c.IPKernelApp.profile = u'default'
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = u''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.IPKernelApp.gui = None
# Reraise exceptions encountered loading IPython extensions?
# c.IPKernelApp.reraise_ipython_extension_failures = False
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
#------------------------------------------------------------------------------
# IPythonKernel configuration
#------------------------------------------------------------------------------
# IPythonKernel will inherit config from: Kernel
# Whether to use appnope for compatiblity with OS X App Nap.
#
# Only affects OS X >= 10.9.
# c.IPythonKernel._darwin_app_nap = True
#
# c.IPythonKernel._execute_sleep = 0.0005
#
# c.IPythonKernel._poll_interval = 0.05
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'Linux'
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.ZMQInteractiveShell.display_page = False
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
# The part of the banner to be printed before the profile
# c.ZMQInteractiveShell.banner1 = 'Python 2.7.10 |Anaconda 1.9.1 (64-bit)| (default, May 28 2015, 17:02:03) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.1.0 -- An enhanced Interactive Python.\nAnaconda is brought to you by Continuum Analytics.\nPlease check out: http://continuum.io/thanks and https://anaconda.org\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# The part of the banner to be printed after the profile
# c.ZMQInteractiveShell.banner2 = ''
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.ZMQInteractiveShell.logstart = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = u'scollis'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The UUID identifying this session.
# c.Session.session = u''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# execution key, for signing messages.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# path to file containing execution key.
# c.Session.keyfile = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
| bsd-2-clause |
xwolf12/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 205 | 10378 | # Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
#Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float)
assert_true(A.dtype == np.float)
def test_connect_regions():
lena = sp.misc.lena()
for thr in (50, 150):
mask = lena > thr
graph = img_to_graph(lena, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
lena = sp.misc.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_lena():
lena = sp.misc.lena().astype(np.float32)
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = lena.astype(np.float)
lena /= 16.0
return lena
def _orange_lena(lena=None):
lena = _downsampled_lena() if lena is None else lena
lena_color = np.zeros(lena.shape + (3,))
lena_color[:, :, 0] = 256 - lena
lena_color[:, :, 1] = 256 - lena / 2
lena_color[:, :, 2] = 256 - lena / 4
return lena_color
def _make_images(lena=None):
lena = _downsampled_lena() if lena is None else lena
# make a collection of lenas
images = np.zeros((3,) + lena.shape)
images[0] = lena
images[1] = lena + 1
images[2] = lena + 2
return images
downsampled_lena = _downsampled_lena()
orange_lena = _orange_lena(downsampled_lena)
lena_collection = _make_images(downsampled_lena)
def test_extract_patches_all():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
lena = orange_lena
i_h, i_w = lena.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
lena = downsampled_lena
lena = lena[:, 32:97]
i_h, i_w = lena.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
lena = downsampled_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_reconstruct_patches_perfect_color():
lena = orange_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_patch_extractor_fit():
lenas = lena_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(lenas))
def test_patch_extractor_max_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(lenas) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(lenas) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
lenas = lena_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(lenas)
assert_equal(patches.shape, (len(lenas) * 100, 12, 12))
def test_patch_extractor_all_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
lenas = _make_images(orange_lena)
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
lena = downsampled_lena
i_h, i_w = lena.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(lena, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| bsd-3-clause |
samuel1208/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
Tjorriemorrie/trading | 14_rf_day_episodes/predict/predict.py | 1 | 17194 | import logging
from os.path import realpath, dirname
import pandas as pd
import numpy as np
import pickle
import argparse
from random import random, choice, shuffle
from pprint import pprint
from sklearn.preprocessing import scale
from time import time, sleep
import operator
import datetime
import calendar
CURRENCIES = [
'AUDUSD',
'EURGBP',
'EURJPY',
'EURUSD',
'GBPJPY',
'GBPUSD',
'NZDUSD',
'USDCAD',
'USDCHF',
'USDJPY',
]
INTERVALS = [
# '60',
'1440',
]
ACTIONS = [
'waiting',
'enter-long',
'stay-long',
'exit-long',
'enter-short',
'stay-short',
'exit-short',
'completed',
]
def main(debug):
interval = choice(INTERVALS)
for currency in CURRENCIES:
df = loadData(currency, interval)
df = dropOutliers(df)
df = setGlobalStats(df)
# print df
q = loadQ(currency, interval)
# only predict on last group
group_df = df.groupby(pd.TimeGrouper(freq='M')).get_group(df.groupby(pd.TimeGrouper(freq='M')).last().iloc[-1].name)
trail = predict(group_df, q)
logging.warn('{0} [{1}] on {4} [{2}] items => {3}'.format(currency, interval, len(group_df), trail, group_df.iloc[-1].name))
# results.append(r)
# if debug:
# break # df groups
#
# # results
# results_avg = np.mean(results)
# results_std = np.std(results)
# while len(results) > 1000:
# results.remove(min(results[:int(len(results)*results_avg)]))
#
# # adjust values
# inverse_val = 1. - max(results_avg, 0.001)
# lamda = results_avg
# alpha = inverse_val / 4.
# epsilon = alpha / 3.
#
# if time() - time_start > time_interval or debug:
# logging.warn('{7} [{0}] {1:.0f}-{6:.0f}-{5:.0f} % [e:{2:.2f}% a:{3:.1f}% l:{4:.0f}%]'.format(
# epoch,
# (results_avg - results_std) * 100,
# epsilon * 100,
# alpha * 100,
# lamda * 100,
# (results_avg + results_std) * 100,
# results_avg * 100,
# currency,
# ))
# saveQ(currency, interval, q)
# time_interval += seconds_info_intervals
#
# if (len(results) > 100 and time() - time_start >= seconds_to_run) or debug:
# # logging.error('{1} training finished at upper {0:.0f}%'.format((results_avg + results_std) * 100, currency))
# break
#
# saveQ(currency, interval, q)
#
# if debug:
# break # currencies
#
# if debug:
# break # forever
def loadData(currency, interval):
logging.info('Data: loading {0} at {1}...'.format(currency, interval))
df = pd.read_csv(
r'{0}/../../data/{1}{2}.csv'.format(realpath(dirname(__file__)), currency, interval),
names=['date', 'time', 'open', 'high', 'low', 'close', 'volume'],
parse_dates=[[0, 1]],
index_col=0,
).astype(float)
logging.info('Data: {0} rows loaded'.format(len(df)))
return df
def dropOutliers(df):
logging.info('Outliers: dropping...')
size_start = len(df)
# get range
df['range'] = df.high - df.low
# print df
# get stats
mean = df.range.mean()
std = df.range.std()
# drop outliers
min_cutoff = mean - std * 2
max_cutoff = mean + std * 2
logging.debug('Dropping outliers between below {0:4f} and above {1:4f}'.format(min_cutoff, max_cutoff))
df = df[df.range > min_cutoff]
df = df[df.range < max_cutoff]
logging.debug('Dropped {0} rows'.format(500 - len(df)))
logging.info('Outliers: {0} removed'.format(size_start - len(df)))
return df
def setGlobalStats(df):
logging.info('DF: adding non-state stats...')
# moving average
hlc = df.apply(lambda x: (x['high'] + x['low'] + x['close']) / 3, axis=1)
avg_5 = pd.rolling_mean(hlc, 5)
avg_5_y = avg_5.shift(+1)
df['ma_quick_bullish'] = avg_5 >= avg_5_y
avg_5_diff = abs(avg_5 - avg_5_y)
avg_5_diff_y = avg_5_diff.shift(+1)
df['ma_quick_divergence'] = avg_5_diff >= avg_5_diff_y
df['ma_quick_magnitude'] = avg_5_diff > avg_5_diff.mean()
avg_20 = pd.rolling_mean(hlc, 20)
avg_20_y = avg_20.shift(+1)
df['ma_signal_bullish'] = avg_20 >= avg_20_y
avg_20_diff = abs(avg_20 - avg_20_y)
avg_20_diff_y = avg_20_diff.shift(+1)
df['ma_signal_divergence'] = avg_20_diff >= avg_20_diff_y
df['ma_signal_magnitude'] = avg_20_diff > avg_20_diff.mean()
df['ma_crossover_bullish'] = avg_5 >= avg_20
ma_diff = avg_5 - avg_20
ma_diff_y = avg_5_y - avg_20_y
df['ma_crossover_divergence'] = ma_diff >= ma_diff_y
df['ma_crossover_magnitude'] = ma_diff >= ma_diff.mean()
# pivots
df_pivots = pd.DataFrame(dtype=float)
df_pivots['hlc'] = hlc
df_pivots['hlc_y1'] = hlc.shift(1)
df_pivots['hlc_y2'] = hlc.shift(2)
df_pivots['hlc_y3'] = hlc.shift(3)
df_pivots['hlc_y4'] = hlc.shift(4)
df['pivot_high_major'] = df_pivots.apply(lambda x: 1 if (x['hlc_y4'] < x['hlc_y3'] < x['hlc_y2'] and x['hlc_y2'] > x['hlc_y1'] > x['hlc']) else 0, axis=1)
df['pivot_high_minor'] = df_pivots.apply(lambda x: 1 if (x['hlc_y2'] < x['hlc_y1'] and x['hlc_y1'] > x['hlc']) else 0, axis=1)
df['pivot_low_major'] = df_pivots.apply(lambda x: 1 if (x['hlc_y4'] > x['hlc_y3'] > x['hlc_y2'] and x['hlc_y2'] < x['hlc_y1'] < x['hlc']) else 0, axis=1)
df['pivot_low_minor'] = df_pivots.apply(lambda x: 1 if (x['hlc_y2'] > x['hlc_y1'] and x['hlc_y1'] < x['hlc']) else 0, axis=1)
# situationals
df['higher_high'] = df_pivots.apply(lambda x: 1 if (x['hlc'] > x['hlc_y1']) else 0, axis=1)
df['lower_low'] = df_pivots.apply(lambda x: 1 if (x['hlc'] < x['hlc_y1']) else 0, axis=1)
df['higher_soldiers'] = df_pivots.apply(lambda x: 1 if (x['hlc'] > x['hlc_y1'] > x['hlc_y2']) else 0, axis=1)
df['lower_soldiers'] = df_pivots.apply(lambda x: 1 if (x['hlc'] < x['hlc_y1'] < x['hlc_y2']) else 0, axis=1)
# ATR
df_atr = pd.DataFrame(dtype=float)
df_atr['range'] = df['range']
df_atr['close_y'] = df['close'].shift(+1)
df_atr['h_from_c'] = abs(df['high'] - df_atr['close_y'])
df_atr['l_from_c'] = abs(df['low'] - df_atr['close_y'])
df_atr['tr'] = df_atr.apply(lambda x: max(x['range'], x['h_from_c'], x['l_from_c']), axis=1)
avg_5 = pd.rolling_mean(df_atr['tr'], 5)
avg_5_y = avg_5.shift(+1)
df['atr_quick_bullish'] = avg_5 >= avg_5_y
avg_5_diff = abs(avg_5 - avg_5_y)
avg_5_diff_y = avg_5_diff.shift(+1)
df['atr_quick_divergence'] = avg_5_diff >= avg_5_diff_y
df['atr_quick_magnitude'] = avg_5_diff > avg_5_diff.mean()
avg_20 = pd.rolling_mean(df_atr['tr'], 20)
avg_20_y = avg_20.shift(+1)
df['atr_signal_bullish'] = avg_20 >= avg_20_y
avg_20_diff = abs(avg_20 - avg_20_y)
avg_20_diff_y = avg_20_diff.shift(+1)
df['atr_signal_divergence'] = avg_20_diff >= avg_20_diff_y
df['atr_signal_magnitude'] = avg_20_diff > avg_20_diff.mean()
df['atr_crossover_bullish'] = avg_5 >= avg_20
atr_diff = avg_5 - avg_20
atr_diff_y = avg_5_y - avg_20_y
df['atr_crossover_divergence'] = atr_diff >= atr_diff_y
df['atr_crossover_magnitude'] = atr_diff >= atr_diff.mean()
# print df
# raise Exception('foo')
logging.info('DF: added non-state stats')
return df
def loadQ(currency, interval):
logging.info('Q: loading...')
try:
with open('{0}/models/{1}_{2}.q'.format(realpath(dirname(__file__)), currency, interval), 'rb') as f:
q = pickle.load(f)
except IOError:
q = {}
logging.info('Q: loaded {0}'.format(len(q)))
return q
def saveQ(currency, interval, q):
logging.info('Q: saving...')
with open('{0}/models/{1}_{2}.q'.format(realpath(dirname(__file__)), currency, interval), 'wb') as f:
pickle.dump(q, f)
logging.info('Q: saved {0}'.format(len(q)))
def predict(df, q):
logging.info('Predicting: started...')
# get bdays
first_bday = df.index[0]
first_day = first_bday.replace(day=1)
last_day = first_day.replace(day=calendar.monthrange(first_bday.year, first_bday.month)[1])
bdays_days = pd.bdate_range(start=first_bday, end=last_day)
bdays = len(bdays_days)
trail = ''
# initial state
i = 0.
s = getState(df, i, bdays)
# initial action
a = getAction(q, s, 0)
for date_time, row in df.iterrows():
logging.info(' ')
logging.info('Environment: {0}/{1} {2}'.format(i, len(df)-1, date_time))
logging.info('State: {0}'.format(sum(s)))
logging.info('Action: {0}'.format(a))
# take action (get trade status for s_next)
s_ts, trail = takeAction(s, a, trail)
# next environment
i_next = i + 1
if i_next >= len(df):
s_next = None
a_next = None
else:
s_next = getState(df, i_next, bdays, s_ts)
a_next = getAction(q, s_next, 0)
a = a_next
s = s_next
i += 1
# logging.warn('Trail {0}'.format(trail))
return trail
########################################################################################################
# WORLD
########################################################################################################
def getState(df, i, bdays, s_ts=None):
logging.info('State: from {0}...'.format(i))
s = []
# trade status
if not s_ts:
s_trade_status = [1, 0, 0, 0]
else:
s_trade_status = s_ts
s += s_trade_status
logging.debug('State: trade status {0}'.format(s_trade_status))
# group progress
s_group_progress = [1 if (i+1)/bdays > t/25. else 0 for t in xrange(0, 25)]
s += s_group_progress
logging.debug('State: group progress {0}'.format(s_group_progress))
# current row
row = df.iloc[i]
# print row
# trend 5/20
s_trend = []
s_trend.append(1 if row['ma_quick_bullish'] else 0)
s_trend.append(1 if row['ma_quick_divergence'] else 0)
s_trend.append(1 if row['ma_quick_magnitude'] else 0)
s_trend.append(1 if row['ma_signal_bullish'] else 0)
s_trend.append(1 if row['ma_signal_divergence'] else 0)
s_trend.append(1 if row['ma_signal_magnitude'] else 0)
s_trend.append(1 if row['ma_crossover_bullish'] else 0)
s_trend.append(1 if row['ma_crossover_divergence'] else 0)
s_trend.append(1 if row['ma_crossover_magnitude'] else 0)
s += s_trend
logging.debug('State: moving average {0}'.format(s_trend))
# peaks
s_peaks = [
row['pivot_high_major'],
row['pivot_high_minor'],
row['pivot_low_major'],
row['pivot_low_minor']
]
s += s_peaks
logging.debug('State: peaks {0}'.format(s_peaks))
# situationals
s_situationals = [
row['higher_high'],
row['lower_low'],
row['higher_soldiers'],
row['lower_soldiers']
]
s += s_situationals
logging.debug('State: situationals {0}'.format(s_situationals))
# ATR 5/20
s_atr = []
s_atr.append(1 if row['atr_quick_bullish'] else 0)
s_atr.append(1 if row['atr_quick_divergence'] else 0)
s_atr.append(1 if row['atr_quick_magnitude'] else 0)
s_atr.append(1 if row['atr_signal_bullish'] else 0)
s_atr.append(1 if row['atr_signal_divergence'] else 0)
s_atr.append(1 if row['atr_signal_magnitude'] else 0)
s_atr.append(1 if row['atr_crossover_bullish'] else 0)
s_atr.append(1 if row['atr_crossover_divergence'] else 0)
s_atr.append(1 if row['atr_crossover_magnitude'] else 0)
s += s_atr
logging.debug('State: average true range {0}'.format(s_atr))
logging.info('State: {0}/{1}'.format(sum(s), len(s)))
return s
def getActionsAvailable(trade_status):
logging.debug('Action: finding available for {0}...'.format(trade_status))
# validate trade status
if sum(trade_status) != 1:
raise Exception('Invalid trade status')
# looking
if trade_status[0]:
actions_available = ['waiting', 'enter-long', 'enter-short']
# buying
elif trade_status[1]:
actions_available = ['stay-long', 'exit-long']
# selling
elif trade_status[2]:
actions_available = ['stay-short', 'exit-short']
# finished
elif trade_status[3]:
actions_available = ['completed']
else:
raise Exception('Unknown state {0}'.format(trade_status))
logging.debug('Action: found {0} for {1}...'.format(actions_available, trade_status))
return actions_available
def takeAction(s, a, trail):
logging.info('Change: state {0} with action {1}...'.format(s, a))
# take action
if a in ['waiting']:
s_trade_status = [1, 0, 0, 0]
trail += '_'
elif a in ['enter-long', 'stay-long']:
s_trade_status = [0, 1, 0, 0]
trail += 'B'
elif a in ['enter-short', 'stay-short']:
s_trade_status = [0, 0, 1, 0]
trail += 'S'
elif a in ['exit-long', 'exit-short', 'completed']:
s_trade_status = [0, 0, 0, 1]
trail += '!'
else:
raise Exception('Unknown action [{0}] to take on state [{1}]'.format(a, s[:4]))
logging.info('Change: trail = {0}'.format(trail))
logging.info('Change: state is now {0}...'.format(s_trade_status))
return s_trade_status, trail
def getReward(trail, optimus):
logging.info('Reward: trail vs optimus')
optimus_len = len(optimus) + 0.
# precision
r_correct = sum(map(operator.eq, trail, optimus))
r_precision = r_correct / optimus_len
logging.debug('Reward: correct {0:.0f} => {1:.2f}'.format(r_correct, r_precision))
# length
# r_length_optimus = optimus.count('B' if 'B' in optimus else 'S')
# r_length_trail = trail.count('B' if 'B' in optimus else 'S')
# r_length = 1 - (abs(r_length_trail - r_length_optimus) / max(optimus_len - r_length_optimus, r_length_optimus))
# logging.debug('Reward: trade length {0:.0f} vs {1:.0f} => {2:.2f}'.format(r_length_trail, r_length_optimus, r_length))
#
# r = np.mean([r_precision, r_length])
r = r_precision
logging.info('Reward: {0:.2f}'.format(r))
return r
########################################################################################################
# SARSA
########################################################################################################
def getAction(q, s, epsilon):
logging.info('Action: finding...')
actions_available = getActionsAvailable(s[:4])
# exploration
if random() < epsilon:
logging.debug('Action: explore (<{0:.2f})'.format(epsilon))
a = choice(actions_available)
# exploitation
else:
logging.debug('Action: exploit (>{0:.2f})'.format(epsilon))
q_max = None
for action in actions_available:
q_sa = q.get((tuple(s), action), random() * 10.)
logging.debug('Qsa action {0} is {1:.4f}'.format(action, q_sa))
if q_sa > q_max:
q_max = q_sa
a = action
logging.info('Action: found {0}'.format(a))
return a
def getDelta(q, s, a, r, s_next, a_next, gamma):
logging.info('Delta: calculating...')
q_sa = q.get((tuple(s), a), 0)
if not s_next or not a_next:
q_sa_next = r
else:
q_sa_next = q.get((tuple(s_next), a_next), r)
d = r + (gamma * q_sa_next) - q_sa
logging.debug('Delta: r [{0:.2f}] + (gamma [{1:.2f}] * Qs`a` [{2:.4f}]) - Qsa [{3:.4f}]'.format(r, gamma, q_sa_next, q_sa))
logging.info('Delta: {0:.4f}'.format(d))
return d
def updateQ(q, s, a, d, r, etraces, lamda, gamma, alpha):
logging.info('Q: updating learning at {0:.2f} with lambda {1:.2f}...'.format(alpha, lamda))
# update current s,a
sa = (tuple(s), a)
etraces[sa] = etraces.get(sa, 0.) + 1
# update for all etraces
etraces_updated = {}
for sa, e_sa in etraces.iteritems():
# q (only if there is a reward)
if r:
q_sa = q.get(sa, r)
# logging.debug('Q: Qsa before {0:.4f}'.format(q_sa))
# logging.debug('Q: d:{0:.2f} e:{1:.2f}'.format(d, e_sa))
q_sa_updated = q_sa + (alpha * d * e_sa)
q[sa] = q_sa_updated
logging.debug('Q: before {0:.4f} \t et {1:.2f} \t after {2:.4f}'.format(q_sa, e_sa, q_sa_updated))
# decay etrace
if e_sa > 0.01:
etraces_updated[sa] = e_sa * gamma * lamda
return q, etraces_updated
########################################################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', action='store_true')
args = parser.parse_args()
debug = args.debug
lvl = logging.DEBUG if debug else logging.WARN
logging.basicConfig(
level=lvl,
format='%(asctime)s %(name)-8s %(levelname)-8s %(message)s',
# datefmt='%Y-%m-%d %H:%M:',
)
main(debug) | mit |
spatialaudio/sweep | log_sweep_kaiser_window_bandlimited_script5/log_sweep_kaiser_window_bandlimited_script5.py | 2 | 2156 | #!/usr/bin/env python3
"""The influence of windowing of log. bandlimited sweep signals when using a
Kaiser Window by fixing beta (=2) and fade_out (=0).
fstart = 100 Hz
fstop = 5000 Hz
"""
import sys
sys.path.append('..')
import measurement_chain
import plotting
import calculation
import ir_imitation
import generation
import matplotlib.pyplot as plt
import windows
from scipy.signal import lfilter, fftconvolve
import numpy as np
# Parameters of the measuring system
fs = 44100
fstart = 100
fstop = 5000
duration = 1
pad = 4
# Generate excitation signal
excitation = generation.log_sweep(fstart, fstop, duration, fs)
N = len(excitation)
# Noise in measurement chain
noise_level_db = -30
noise = measurement_chain.additive_noise(noise_level_db)
# FIR-Filter-System
dirac_system = measurement_chain.convolution([1.0])
# Combinate system elements
system = measurement_chain.chained(dirac_system, noise)
# Lists
beta = 7
fade_in_list = np.arange(0, 1001, 1)
fade_out = 0
# Spectrum of dirac for reference
dirac = np.zeros(pad * fs)
dirac[0] = 1
dirac_f = np.fft.rfft(dirac)
def get_results(fade_in):
excitation_windowed = excitation * windows.window_kaiser(N,
fade_in,
fade_out,
fs, beta)
excitation_windowed_zeropadded = generation.zero_padding(
excitation_windowed, pad, fs)
system_response = system(excitation_windowed_zeropadded)
ir = calculation.deconv_process(excitation_windowed_zeropadded,
system_response,
fs)
return ir
with open("log_sweep_kaiser_window_bandlimited_script5.txt", "w") as f:
for fade_in in fade_in_list:
ir = get_results(fade_in)
pnr = calculation.pnr_db(ir[0], ir[1:4 * fs])
spectrum_distance = calculation.vector_distance(
dirac_f, np.fft.rfft(ir[:pad * fs]))
f.write(
str(fade_in) + " " + str(pnr) +
" " + str(spectrum_distance) + " \n")
| mit |
djgagne/scikit-learn | sklearn/ensemble/forest.py | 176 | 62555 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score fuction."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'balanced_subsample', 'subsample', 'auto')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated and will be removed in 0.18."
" It was replaced by class_weight='balanced_subsample' "
"using the balanced strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
ronggong/jingjuSingingPhraseMatching | phoneticSimilarity/scoreManip.py | 1 | 7588 | '''
* Copyright (C) 2017 Music Technology Group - Universitat Pompeu Fabra
*
* This file is part of jingjuSingingPhraseMatching
*
* pypYIN is free software: you can redistribute it and/or modify it under
* the terms of the GNU Affero General Public License as published by the Free
* Software Foundation (FSF), either version 3 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the Affero GNU General Public License
* version 3 along with this program. If not, see http://www.gnu.org/licenses/
*
* If you have any problem about this python version code, please contact: Rong Gong
* [email protected]
*
*
* If you want to refer this code, please use this article:
*
'''
from general.pinyinMap import *
from general.phonemeMap import *
from general.parameters import list_N_frames
from general.utilsFunctions import hz2cents
from general.filePath import class_name
from tailsMFCCTrain import loadMFCCTrain
from targetAudioProcessing import gmmModelLoad
import matplotlib.pyplot as plt
import numpy as np
import pinyin
import json
import pickle
from sklearn import preprocessing
def scoreMSynthesize(dict_score_info,N_frame):
'''
synthesize score Matrix
:param syl_durations: a list of the syllable durations
:return: Matrix
'''
syl_finals,syl_durations,_ = retrieveSylInfo(dict_score_info)
# print syl_finals,syl_durations
syl_durations = (np.array(syl_durations)/np.sum(syl_durations))*N_frame
scoreM = np.zeros((len(finals),N_frame))
counter_frame=0
for ii in range(len(syl_finals)-1):
index_final = finals.index(syl_finals[ii])
scoreM[index_final,counter_frame:counter_frame+syl_durations[ii]] = 1.0
counter_frame += syl_durations[ii]
index_final = finals.index(syl_finals[-1])
scoreM[index_final,counter_frame:] = 1
return scoreM
def scoreMSynthesizePho(dict_score_info,N_frame):
'''
synthesize score Matrix
:param syl_durations: a list of the syllable durations
:return: Matrix
'''
syl_finals,_,_ = retrieveSylInfo(dict_score_info)
# print syl_finals,syl_durations
list_pho = []
for sf in syl_finals:
pho_final = dic_final_2_sampa[sf]
for pho in pho_final:
pho_map = dic_pho_map[pho]
if pho_map == u'H':
pho_map = u'y'
elif pho_map == u'9':
pho_map = u'S'
elif pho_map == u'yn':
pho_map = u'in'
list_pho.append(pho_map)
pho_dict = dic_pho_map.values()
pho_durations = (np.array([1.0]*len(list_pho))/len(list_pho))*N_frame
scoreM = np.zeros((len(pho_dict),N_frame))
counter_frame=0
for ii in range(len(list_pho)-1):
index_pho = pho_dict.index(list_pho[ii])
scoreM[index_pho,counter_frame:counter_frame+pho_durations[ii]] = 1.0
counter_frame += pho_durations[ii]
index_final = finals.index(syl_finals[-1])
scoreM[index_final,counter_frame:] = 1
return scoreM
def mfccSynthesizeFromGMM(dict_score_info,dim_mfcc,N_frame):
'''
sample the mfcc array from GMM model
:param dic_score_info:
:param N_frame:
:return:
'''
gmmModels = gmmModelLoad()
syl_finals,syl_durations,_ = retrieveSylInfo(dict_score_info)
# print syl_finals,syl_durations
syl_durations = (np.array(syl_durations)/np.sum(syl_durations))*N_frame
mfcc_synthesized = np.zeros((N_frame,dim_mfcc))
counter_frame = 0
for ii in range(len(syl_finals)-1):
final_ii = syl_finals[ii]
dur = int(float(syl_durations[ii]))
if final_ii == 'v':
final_ii = 'u'
elif final_ii == 've':
final_ii = 'ue'
X,y = gmmModels[final_ii].sample(n_samples=dur)
mfcc_synthesized[counter_frame:counter_frame+dur,:] = X
counter_frame += dur
X,y = gmmModels[syl_finals[-1]].sample(n_samples=mfcc_synthesized.shape[0]-counter_frame)
mfcc_synthesized[counter_frame:,:] = X
return mfcc_synthesized
def mfccSynthesizeFromData(dict_score_info,dic_syllable_feature,N_frame):
'''
synthesize mfcc feature matrix for a singing candidate phrase
:param dict_score_info:
:param N_frame:
:return:
'''
syl_finals,syl_durations,syl_cents = retrieveSylInfo(dict_score_info)
syl_durations = (np.array(syl_durations)/np.sum(syl_durations))*N_frame
mfcc_synthesized = np.array([])
for ii in range(len(syl_finals)):
final_ii = syl_finals[ii]
if final_ii == 'v':
final_ii = 'u'
elif final_ii == 've':
final_ii = 'ue'
list_final_ii = dic_syllable_feature[final_ii]
if len(list_final_ii):
list_dur = np.array([dic_final_element_ii['N_frame'] for dic_final_element_ii in list_final_ii])
index_final_chosen = np.argmin((np.abs(list_dur-syl_durations[ii])))
mfcc = list_final_ii[index_final_chosen]['mfcc']
if not len(mfcc_synthesized):
mfcc_synthesized = mfcc
else:
mfcc_synthesized = np.vstack((mfcc_synthesized,mfcc))
else:
print('no candidate final found for final_ii')
# mfcc_synthesized = preprocessing.StandardScaler().fit_transform(mfcc_synthesized)
return mfcc_synthesized
def retrieveSylInfo(dict_score_info):
'''
collect pinyin and pinyin durations from score
:param dict_score_info:
:return:
'''
syl_finals = []
syl_durations =[]
syl_cents = []
for dict_note in dict_score_info['notes']:
lyric = dict_note['lyric']
dur = dict_note['quarterLength']
cents = hz2cents(dict_note['freq'])
# print lyric
# print dict_note['quarterLength']
if lyric and dur:
py = pinyin.get(lyric, format="strip", delimiter=" ")
py_split = py.split()
if len(py_split) > 1:
if py_split[0] in non_pinyin:
py = py_split[1]
else:
py = py_split[0]
# print py
final = dic_pinyin_2_initial_final_map[py]['final']
syl_finals.append(final)
syl_durations.append(dur)
syl_cents.append(cents)
elif len(syl_durations):
syl_durations[-1] += dur
return syl_finals,syl_durations,syl_cents
def plotMFCC(mfcc):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.pcolormesh(mfcc)
plt.show()
def processSyllableMFCCTemplates(N_frame):
'''
generate mfcc synthesized templates according to N_frame
N_frame: template syllable frame length
:return:
'''
dic_mfcc_synthesized = {}
for key in dict_score:
print key
mfcc_synthesized = mfccSynthesizeFromData(dict_score[key],dic_syllable_feature_train,N_frame)
dic_mfcc_synthesized[key] = mfcc_synthesized
output = open('syllable_mfcc_templates/dic_mfcc_synthesized_'+str(N_frame)+'.pkl', 'wb')
pickle.dump(dic_mfcc_synthesized, output)
output.close()
if __name__ == '__main__':
dict_score = json.load(open('../melodicSimilarity/scores.json'))
dic_syllable_feature_train = loadMFCCTrain('dic_syllable_feature_train_'+class_name+'.pkl')
for N_frame in list_N_frames:
processSyllableMFCCTemplates(N_frame)
| agpl-3.0 |
happykhan/mlst_comp | src/main.py | 1 | 16402 | #!/usr/bin/env python
# Copyright (C) 2016. Nabil-Fared Alikhan
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
I don't really know, but it compares MLST Methods
I still don't really know, but it compares MLST Methods
### CHANGE LOG ###
2016-11-22 <Nabil-Fareed Alikhan> <[email protected]>
* Initial build
"""
import sys, os, traceback, argparse
import time
import __init__ as meta
from docker import Client
import logging
import urllib2
import re
from subprocess import Popen, PIPE
import shutil
import gzip
from os.path import expanduser
epi = "Licence: "+ meta.__licence__ + " by " +meta.__author__ + " <" +meta.__author_email__ + ">"
def run_method(args):
data_dict = {}
method = args.mlst_method
output_file = args.output
# Open dataset file
if not output_file:
output_file = '%s.updated.tsv' %args.dataset
with open(args.dataset) as data_f:
headers = data_f.readline().strip().split('\t')
# For each record,
for record in data_f.readlines():
vals = []
for val in record.split('\t'):
vals.append(val.strip())
record_dict = dict(zip(headers, vals))
data_dict[record_dict['SRR_Acc_code']] = record_dict
for idx, record_name in enumerate(data_dict):
record_dict = data_dict[record_name]
# Check if ST if already called:
method_list = [method]
if method == 'all':
method_list = ['stringMLST', 'MOST', 'Ariba']
existing = [method_name for method_name in method_list if record_dict.get('%s_ST' %method_name)]
if len(existing) != len(method_list) or args.clean:
# Fetch reads
for read_pair in ['1', '2']:
if not record_dict.get('Download_loc_%s' %read_pair):
acc = record_dict.get('SRR_Acc_code')
if len(acc) > 9:
pos = 9 - len(acc)
acc_fill = acc[pos].zfill(3)
record_dict['Download_loc_%s' %read_pair] = 'ftp://ftp.sra.ebi.ac.uk/vol1/fastq/%s/%s/%s/%s_%s.fastq.gz' %(acc[0:6],
acc_fill,
acc,
acc,
read_pair)
else:
record_dict['Download_loc_%s' %read_pair] = 'ftp://ftp.sra.ebi.ac.uk/vol1/fastq/%s/%s/%s_%s.fastq.gz' %(acc[0:6],
acc,
acc,
read_pair)
read_loc_1 = _get_reads(record_dict['Download_loc_1'], clean = True)
read_loc_2 = _get_reads(record_dict['Download_loc_2'])
if not read_loc_1 or not read_loc_2:
logger.error(' Could not fetch reads from %s ' %record_dict['Download_loc_1'])
continue
# Run method, return ST and running time
if method == 'stringMLST' or method == 'all':
ST = None
runtime = None
if not record_dict.get('stringMLST_ST') or args.clean:
ST, runtime = run_string_mlst(read_loc_1, read_loc_2)
if ST:
record_dict['stringMLST_ST'] = ST
if runtime:
record_dict['stringMLST_runtime'] = runtime
if method == 'MOST' or method == 'all':
ST = None
runtime = None
if not record_dict.get('MOST_ST') or args.clean:
ST, runtime = run_most(read_loc_1, read_loc_2)
if ST:
record_dict['MOST_ST'] = ST
if runtime:
record_dict['MOST_runtime'] = runtime
if method == 'Ariba' or method == 'all':
ST = None
runtime = None
if not record_dict.get('Ariba_ST') or args.clean:
ST, runtime = run_ariba(read_loc_1, read_loc_2)
if ST:
record_dict['Ariba_ST'] = ST
if runtime:
record_dict['Ariba_runtime'] = runtime
data_dict[record_dict['SRR_Acc_code']] = record_dict
_write_output(output_file, data_dict)
if idx % 5 == 0 or idx == (len(data_dict)-1):
_draw_graph(output_file, args.html)
def _draw_graph(data_file, html_dir):
from bokeh.charts import BoxPlot, output_file, save
from pandas import read_csv, DataFrame
df = read_csv(data_file, sep='\t')
df = df.set_index('SRR_Acc_code')
labels = [str(col) for col in df.columns if col.endswith('_runtime')]
df_perf = DataFrame()
for label in labels:
x = DataFrame(zip(df[label], [label.split('_')[0]] * len(df[label])), columns= ['Seconds', 'MLST Caller'])
df_perf = df_perf.append(x)
output_name = os.path.splitext(os.path.basename(data_file))[0]
output_dir = os.path.join(html_dir, output_name)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
p = BoxPlot(df_perf, values='Seconds',label='MLST Caller', title="Runtime for MLST callers")
output_file(os.path.join(output_dir, '%s-runtime.html' %output_name))
save(p)
def _write_output(output_file, data_list):
# Write new dataset file including new ST calling
first = True
with open(output_file, 'w') as output_f:
for record_name in data_list:
record = data_list[record_name]
if first:
header_list = sorted(record.keys())
output_f.write('%s\n' %('\t'.join(header_list)))
first = False
row_list = []
for header in header_list:
row_list.append(record.get(header,''))
output_f.write('%s\n' %'\t'.join(row_list))
def run_string_mlst(read_file_1, read_file_2):
'''Runs stringMLST on given read set,
returns ST and runtime as a string
'''
cli = Client(base_url='tcp://127.0.0.1:2375')
logger.debug('Building StringMLST container...')
url = 'https://raw.githubusercontent.com/andrewjpage/docker_mlst/master/stringMLST/Dockerfile'
response = urllib2.urlopen(urllib2.Request(url))
docker_file_path = os.path.abspath(os.path.join('../temp', 'StringMLST-Dockerfile'))
with open(docker_file_path, 'w') as docker_f:
docker_f.write(response.read())
response = [line for line in cli.build(fileobj=open(docker_file_path))]
build_hash = None
for line in response:
regex = re.search('Successfully built (\w+)', line )
if regex:
build_hash = regex.group(1)
logger.debug('Built StringMLST container successfully : %s' %build_hash)
if build_hash:
logger.debug('Running StringMLST on %s' %os.path.basename(read_file_1))
p = Popen(['sudo',
'docker', 'run', '--rm', '-v', '%s:/reads' %os.path.dirname(read_file_1),
build_hash, 'stringMLST.py', '--predict', '-1', '/reads/%s' %os.path.basename(read_file_1),
'-2', '/reads/%s' %os.path.basename(read_file_2), '-p', '-k', '35',
'-P','/stringMLST/SE'],
stdout=PIPE)
start_time = time.time()
out = p.stdout.read()
runtime = time.time() - start_time
ST = out.strip().split('\t')[-1]
logger.debug('%s: ST %s in %s seconds ' %(os.path.basename(read_file_1),ST,runtime))
return str(ST), str(round(runtime,3))
return None, None
def run_most(read_file_1, read_file_2):
'''Runs MOST on given read set,
returns ST and runtime as a string
'''
cli = Client(base_url='tcp://127.0.0.1:2375')
logger.debug('Building MOST container...')
url = 'https://raw.githubusercontent.com/andrewjpage/docker_mlst/master/MOST/Dockerfile'
response = urllib2.urlopen(urllib2.Request(url))
docker_file_path = os.path.abspath(os.path.join('../temp', 'MOST-Dockerfile'))
with open(docker_file_path, 'w') as docker_f:
docker_f.write(response.read())
response = [line for line in cli.build(fileobj=open(docker_file_path))]
build_hash = None
for line in response:
regex = re.search('Successfully built (\w+)', line )
if regex:
build_hash = regex.group(1)
logger.debug('Built MOST container successfully : %s' %build_hash)
if build_hash:
logger.debug('Running MOST on %s' %os.path.basename(read_file_1))
if os.path.exists('../temp/MOST-temp'):
Popen(['sudo', 'rm', '-rf', '../temp/MOST-temp']).wait()
p = Popen(['sudo',
'docker', 'run', '--rm', '-v', '%s:/reads' %os.path.dirname(read_file_1),
build_hash, 'MOST.py', '-1', '/reads/%s' %os.path.basename(read_file_1),
'-2', '/reads/%s' %os.path.basename(read_file_2),
'-o', '/reads/MOST-temp',
'-st','/MOST/MLST_data/salmonella'],
stdout=PIPE)
start_time = time.time()
out = p.stdout.read()
runtime = time.time() - start_time
ST = None
MOST_output = '../temp/MOST-temp/%s_MLST_result.csv' %os.path.basename(read_file_1).split('.gz')[0]
if not os.path.exists(MOST_output):
return None, str(round(runtime,3))
with open(MOST_output) as most_f:
for line in most_f.readlines():
regex = re.search('st value:[,*]*([0-9]+)', line)
if regex:
ST = regex.group(1)
logger.debug('%s: ST %s in %s seconds '
%(os.path.basename(read_file_1),ST,runtime))
return str(ST), str(round(runtime,3))
return None, None
def run_ariba(read_file_1, read_file_2):
'''Runs Ariba on given read set,
returns ST and runtime as a string
'''
cli = Client(base_url='tcp://127.0.0.1:2375')
logger.debug('Building Ariba container...')
url = 'https://raw.githubusercontent.com/andrewjpage/docker_mlst/master/ariba/Dockerfile'
response = urllib2.urlopen(urllib2.Request(url))
docker_file_path = os.path.abspath(os.path.join('../temp', 'ariba-Dockerfile'))
with open(docker_file_path, 'w') as docker_f:
docker_f.write(response.read())
response = [line for line in cli.build(fileobj=open(docker_file_path))]
build_hash = None
for line in response:
regex = re.search('Successfully built (\w+)', line )
if regex:
build_hash = regex.group(1)
logger.debug('Built Ariba container successfully : %s' %build_hash)
if build_hash:
logger.debug('Running Ariba on %s' %os.path.basename(read_file_1))
if os.path.exists('../temp/ariba-temp'):
Popen(['sudo', 'rm', '-rf', '../temp/ariba-temp']).wait()
p = Popen(['sudo',
'docker', 'run', '--rm', '-v', '%s:/reads' %os.path.dirname(read_file_1), \
build_hash, 'ariba', 'run', '/salmonella_db/ref_db', \
'/reads/%s' %os.path.basename(read_file_1), \
'/reads/%s' %os.path.basename(read_file_2),\
'/reads/ariba-temp'],\
stdout=PIPE)
start_time = time.time()
out = p.stdout.read()
runtime = time.time() - start_time
allele_prof = {}
with open('../temp/ariba-temp/report.tsv') as ariba_f:
for line in ariba_f.readlines()[1:]:
out = line.split('\t')[0].split('.')
allele_prof[out[0]] = out[1]
with gzip.open('../datasets/Salmonella.UoW.profiles.list.gz', 'rb') as st_f:
headers = st_f.readline().strip().split('\t')
ST = None
for line in st_f.readlines():
st_prof = dict(zip(headers, line.strip().split('\t')))
match = 0
for locus in allele_prof.keys():
if allele_prof[locus] == st_prof[locus]:
match += 1
if match == 7:
ST = st_prof['ST']
break
logger.debug('%s: ST %s in %s seconds '
%(os.path.basename(read_file_1),ST,runtime))
return str(ST), str(round(runtime,3))
return None, None
def _get_reads(url, clean=False):
if not os.path.exists('../temp'):
os.mkdir('../temp')
read_path = os.path.join('../temp',url.split('/')[-1])
if not os.path.exists(read_path):
if clean:
num_files = len([name for name in os.listdir('../temp') if name.endswith('.gz')])
if num_files > 21:
logger.debug('Cleaning temp dir...')
for name in os.listdir('../temp'):
if name.endswith('.gz'):
os.remove(os.path.join('../temp', name))
logger.debug('Downloading read file %s...' %os.path.basename(read_path))
with open(read_path, 'wb') as read_f:
try:
response = urllib2.urlopen(urllib2.Request(url))
read_f.write(response.read())
except Exception :
return None
return os.path.abspath(read_path)
if __name__ == '__main__':
try:
start_time = time.time()
desc = __doc__.split('\n\n')[1].strip()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description=desc,epilog=epi)
parser.add_argument ('-v', '--verbose', action='store_true', default=False, help='verbose output')
parser.add_argument('--version', action='version', version='%(prog)s ' + meta.__version__)
parser.add_argument('-o','--output',action='store',help='output prefix')
subparsers = parser.add_subparsers(help='commands')
run_parser = subparsers.add_parser('run', help='Run MLST software over given dataset with Docker')
run_parser.add_argument('dataset', action='store', help='File location of dataset')
run_parser.add_argument('mlst_method', action='store', help='in silico typing MLST method', choices=['stringMLST', 'MOST', 'none', 'Ariba', 'all'])
run_parser.add_argument('-o','--output', action='store', help='Output file location, Default: <dataset>.updated.tsv', default=None)
run_parser.add_argument('-c', '--clean', action='store_true', help='Redo completed typing results', default=False)
run_parser.add_argument('-t', '--html', action='store', help='HTML output folder', default=os.path.join(expanduser('~'), 'public_html'))
run_parser.set_defaults(func=run_method)
args = parser.parse_args()
if args.verbose:
print "Executing @ " + time.asctime()
logger.setLevel(10)
args.func(args)
if args.verbose:
print "Ended @ " + time.asctime()
if args.verbose:
print 'total time in minutes:',
if args.verbose:
print (time.time() - start_time) / 60.0
sys.exit(0)
except KeyboardInterrupt, e: # Ctrl-C
raise e
except SystemExit, e: # sys.exit()
raise e
except Exception, e:
print 'ERROR, UNEXPECTED EXCEPTION'
print str(e)
traceback.print_exc()
os._exit(1) | gpl-3.0 |
B3AU/waveTree | examples/cluster/plot_ward_structured_vs_unstructured.py | 7 | 3079 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import pylab as pl
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import Ward
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1000
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = Ward(n_clusters=6).fit(X)
label = ward.labels_
print("Elapsed time: ", time.time() - st)
print("Number of points: ", label.size)
###############################################################################
# Plot result
fig = pl.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=pl.cm.jet(np.float(l) / np.max(label + 1)))
pl.title('Without connectivity constraints')
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = Ward(n_clusters=6, connectivity=connectivity).fit(X)
label = ward.labels_
print("Elapsed time: ", time.time() - st)
print("Number of points: ", label.size)
###############################################################################
# Plot result
fig = pl.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=pl.cm.jet(float(l) / np.max(label + 1)))
pl.title('With connectivity constraints')
pl.show()
| bsd-3-clause |
dominicelse/scipy | scipy/stats/_continuous_distns.py | 6 | 146462 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy.misc.doccer import inherit_docstring_from
from scipy import optimize
from scipy import integrate
import scipy.special as sc
from scipy._lib._numpy_compat import broadcast_to
from . import _stats
from ._tukeylambda_stats import (tukeylambda_variance as _tlvar,
tukeylambda_kurtosis as _tlkurt)
from ._distn_infrastructure import (get_distribution_names, _kurtosis,
_lazyselect, _lazywhere, _ncx2_cdf,
_ncx2_log_pdf, _ncx2_pdf,
rv_continuous, _skew, valarray)
from ._constants import _XMIN, _EULER, _ZETA3, _XMAX, _LOGXMAX
## Kolmogorov-Smirnov one-sided and two-sided test statistics
class ksone_gen(rv_continuous):
"""General Kolmogorov-Smirnov one-sided test.
%(default)s
"""
def _cdf(self, x, n):
return 1.0 - sc.smirnov(n, x)
def _ppf(self, q, n):
return sc.smirnovi(n, 1.0 - q)
ksone = ksone_gen(a=0.0, name='ksone')
class kstwobign_gen(rv_continuous):
"""Kolmogorov-Smirnov two-sided test for large N.
%(default)s
"""
def _cdf(self, x):
return 1.0 - sc.kolmogorov(x)
def _sf(self, x):
return sc.kolmogorov(x)
def _ppf(self, q):
return sc.kolmogi(1.0 - q)
kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = np.sqrt(2*np.pi)
_norm_pdf_logC = np.log(_norm_pdf_C)
def _norm_pdf(x):
return np.exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return sc.ndtr(x)
def _norm_logcdf(x):
return sc.log_ndtr(x)
def _norm_ppf(q):
return sc.ndtri(q)
def _norm_sf(x):
return _norm_cdf(-x)
def _norm_logsf(x):
return _norm_logcdf(-x)
def _norm_isf(q):
return -_norm_ppf(q)
class norm_gen(rv_continuous):
"""A normal continuous random variable.
The location (loc) keyword specifies the mean.
The scale (scale) keyword specifies the standard deviation.
%(before_notes)s
Notes
-----
The probability density function for `norm` is::
norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
The survival function, ``norm.sf``, is also referred to as the
Q-function in some contexts (see, e.g.,
`Wikipedia's <https://en.wikipedia.org/wiki/Q-function>`_ definition).
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.standard_normal(self._size)
def _pdf(self, x):
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self, x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_sf(x)
def _logsf(self, x):
return _norm_logsf(x)
def _ppf(self, q):
return _norm_ppf(q)
def _isf(self, q):
return _norm_isf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(np.log(2*np.pi)+1)
@inherit_docstring_from(rv_continuous)
def fit(self, data, **kwds):
"""%(super)s
This function (norm_gen.fit) uses explicit formulas for the maximum
likelihood estimation of the parameters, so the `optimizer` argument
is ignored.
"""
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if floc is None:
loc = data.mean()
else:
loc = floc
if fscale is None:
scale = np.sqrt(((data - loc)**2).mean())
else:
scale = fscale
return loc, scale
norm = norm_gen(name='norm')
class alpha_gen(rv_continuous):
"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` is::
alpha.pdf(x, a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2),
where ``Phi(alpha)`` is the normal CDF, ``x > 0``, and ``a > 0``.
`alpha` takes ``a`` as a shape parameter.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
return 1.0/(x**2)/_norm_cdf(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*np.log(x) + _norm_logpdf(a-1.0/x) - np.log(_norm_cdf(a))
def _cdf(self, x, a):
return _norm_cdf(a-1.0/x) / _norm_cdf(a)
def _ppf(self, q, a):
return 1.0/np.asarray(a-sc.ndtri(q*_norm_cdf(a)))
def _stats(self, a):
return [np.inf]*2 + [np.nan]*2
alpha = alpha_gen(a=0.0, name='alpha')
class anglit_gen(rv_continuous):
"""An anglit continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `anglit` is::
anglit.pdf(x) = sin(2*x + pi/2) = cos(2*x),
for ``-pi/4 <= x <= pi/4``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return np.cos(2*x)
def _cdf(self, x):
return np.sin(x+np.pi/4)**2.0
def _ppf(self, q):
return np.arcsin(np.sqrt(q))-np.pi/4
def _stats(self):
return 0.0, np.pi*np.pi/16-0.5, 0.0, -2*(np.pi**4 - 96)/(np.pi*np.pi-8)**2
def _entropy(self):
return 1-np.log(2)
anglit = anglit_gen(a=-np.pi/4, b=np.pi/4, name='anglit')
class arcsine_gen(rv_continuous):
"""An arcsine continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `arcsine` is::
arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
for ``0 < x < 1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
return 1.0/np.pi/np.sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/np.pi*np.arcsin(np.sqrt(x))
def _ppf(self, q):
return np.sin(np.pi/2.0*q)**2.0
def _stats(self):
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
class FitDataError(ValueError):
# This exception is raised by, for example, beta_gen.fit when both floc
# and fscale are fixed and there are values in the data not in the open
# interval (floc, floc+fscale).
def __init__(self, distr, lower, upper):
self.args = (
"Invalid values in `data`. Maximum likelihood "
"estimation with {distr!r} requires that {lower!r} < x "
"< {upper!r} for each x in `data`.".format(
distr=distr, lower=lower, upper=upper),
)
class FitSolverError(RuntimeError):
# This exception is raised by, for example, beta_gen.fit when
# optimize.fsolve returns with ier != 1.
def __init__(self, mesg):
emsg = "Solver for the MLE equations failed to converge: "
emsg += mesg.replace('\n', '')
self.args = (emsg,)
def _beta_mle_a(a, b, n, s1):
# The zeros of this function give the MLE for `a`, with
# `b`, `n` and `s1` given. `s1` is the sum of the logs of
# the data. `n` is the number of data points.
psiab = sc.psi(a + b)
func = s1 - n * (-psiab + sc.psi(a))
return func
def _beta_mle_ab(theta, n, s1, s2):
# Zeros of this function are critical points of
# the maximum likelihood function. Solving this system
# for theta (which contains a and b) gives the MLE for a and b
# given `n`, `s1` and `s2`. `s1` is the sum of the logs of the data,
# and `s2` is the sum of the logs of 1 - data. `n` is the number
# of data points.
a, b = theta
psiab = sc.psi(a + b)
func = [s1 - n * (-psiab + sc.psi(a)),
s2 - n * (-psiab + sc.psi(b))]
return func
class beta_gen(rv_continuous):
"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is::
gamma(a+b) * x**(a-1) * (1-x)**(b-1)
beta.pdf(x, a, b) = ------------------------------------
gamma(a)*gamma(b)
for ``0 < x < 1``, ``a > 0``, ``b > 0``, where ``gamma(z)`` is the gamma
function (`scipy.special.gamma`).
`beta` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, b):
return self._random_state.beta(a, b, self._size)
def _pdf(self, x, a, b):
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
lPx = sc.xlog1py(b - 1.0, -x) + sc.xlogy(a - 1.0, x)
lPx -= sc.betaln(a, b)
return lPx
def _cdf(self, x, a, b):
return sc.btdtr(a, b, x)
def _ppf(self, q, a, b):
return sc.btdtri(a, b, q)
def _stats(self, a, b):
mn = a*1.0 / (a + b)
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
g1 = 2.0*(b-a)*np.sqrt((1.0+a+b)/(a*b)) / (2+a+b)
g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b))
g2 /= a*b*(a+b+2)*(a+b+3)
return mn, var, g1, g2
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*np.sqrt(a + b + 1) / (a + b + 2) / np.sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super(beta_gen, self)._fitstart(data, args=(a, b))
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
"""%(super)s
In the special case where both `floc` and `fscale` are given, a
`ValueError` is raised if any value `x` in `data` does not satisfy
`floc < x < floc + fscale`.
"""
# Override rv_continuous.fit, so we can more efficiently handle the
# case where floc and fscale are given.
f0 = (kwds.get('f0', None) or kwds.get('fa', None) or
kwds.get('fix_a', None))
f1 = (kwds.get('f1', None) or kwds.get('fb', None) or
kwds.get('fix_b', None))
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None or fscale is None:
# do general fit
return super(beta_gen, self).fit(data, *args, **kwds)
if f0 is not None and f1 is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Special case: loc and scale are constrained, so we are fitting
# just the shape parameters. This can be done much more efficiently
# than the method used in `rv_continuous.fit`. (See the subsection
# "Two unknown parameters" in the section "Maximum likelihood" of
# the Wikipedia article on the Beta distribution for the formulas.)
# Normalize the data to the interval [0, 1].
data = (np.ravel(data) - floc) / fscale
if np.any(data <= 0) or np.any(data >= 1):
raise FitDataError("beta", lower=floc, upper=floc + fscale)
xbar = data.mean()
if f0 is not None or f1 is not None:
# One of the shape parameters is fixed.
if f0 is not None:
# The shape parameter a is fixed, so swap the parameters
# and flip the data. We always solve for `a`. The result
# will be swapped back before returning.
b = f0
data = 1 - data
xbar = 1 - xbar
else:
b = f1
# Initial guess for a. Use the formula for the mean of the beta
# distribution, E[x] = a / (a + b), to generate a reasonable
# starting point based on the mean of the data and the given
# value of b.
a = b * xbar / (1 - xbar)
# Compute the MLE for `a` by solving _beta_mle_a.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_a, a,
args=(b, len(data), np.log(data).sum()),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a = theta[0]
if f0 is not None:
# The shape parameter a was fixed, so swap back the
# parameters.
a, b = b, a
else:
# Neither of the shape parameters is fixed.
# s1 and s2 are used in the extra arguments passed to _beta_mle_ab
# by optimize.fsolve.
s1 = np.log(data).sum()
s2 = sc.log1p(-data).sum()
# Use the "method of moments" to estimate the initial
# guess for a and b.
fac = xbar * (1 - xbar) / data.var(ddof=0) - 1
a = xbar * fac
b = (1 - xbar) * fac
# Compute the MLE for a and b by solving _beta_mle_ab.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_ab, [a, b],
args=(len(data), s1, s2),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a, b = theta
return a, b, floc, fscale
beta = beta_gen(a=0.0, b=1.0, name='beta')
class betaprime_gen(rv_continuous):
"""A beta prime continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `betaprime` is::
betaprime.pdf(x, a, b) = x**(a-1) * (1+x)**(-a-b) / beta(a, b)
for ``x > 0``, ``a > 0``, ``b > 0``, where ``beta(a, b)`` is the beta
function (see `scipy.special.beta`).
`betaprime` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, a, b):
sz, rndm = self._size, self._random_state
u1 = gamma.rvs(a, size=sz, random_state=rndm)
u2 = gamma.rvs(b, size=sz, random_state=rndm)
return u1 / u2
def _pdf(self, x, a, b):
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
return sc.xlogy(a - 1.0, x) - sc.xlog1py(a + b, x) - sc.betaln(a, b)
def _cdf(self, x, a, b):
return sc.betainc(a, b, x/(1.+x))
def _munp(self, n, a, b):
if n == 1.0:
return np.where(b > 1,
a/(b-1.0),
np.inf)
elif n == 2.0:
return np.where(b > 2,
a*(a+1.0)/((b-2.0)*(b-1.0)),
np.inf)
elif n == 3.0:
return np.where(b > 3,
a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
np.inf)
elif n == 4.0:
return np.where(b > 4,
(a*(a + 1.0)*(a + 2.0)*(a + 3.0) /
((b - 4.0)*(b - 3.0)*(b - 2.0)*(b - 1.0))),
np.inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, name='betaprime')
class bradford_gen(rv_continuous):
"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is::
bradford.pdf(x, c) = c / (k * (1+c*x)),
for ``0 < x < 1``, ``c > 0`` and ``k = log(1+c)``.
`bradford` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c / (c*x + 1.0) / sc.log1p(c)
def _cdf(self, x, c):
return sc.log1p(c*x) / sc.log1p(c)
def _ppf(self, q, c):
return sc.expm1(q * sc.log1p(c)) / c
def _stats(self, c, moments='mv'):
k = np.log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = np.sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= np.sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = (c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3)
+ 6*c*k*k*(3*k-14) + 12*k**3)
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = np.log(1+c)
return k/2.0 - np.log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford')
class burr_gen(rv_continuous):
"""A Burr (Type III) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or ``burr12`` with ``d = 1``
burr12 : Burr Type XII distribution
Notes
-----
The probability density function for `burr` is::
burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
for ``x > 0``.
`burr` takes ``c`` and ``d`` as shape parameters.
This is the PDF corresponding to the third CDF given in Burr's list;
specifically, it is equation (11) in Burr's paper [1]_.
%(after_notes)s
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, d):
return c * d * (x**(-c - 1.0)) * ((1 + x**(-c))**(-d - 1.0))
def _cdf(self, x, c, d):
return (1 + x**(-c))**(-d)
def _ppf(self, q, c, d):
return (q**(-1.0/d) - 1)**(-1.0/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 - nc, d + nc)
burr = burr_gen(a=0.0, name='burr')
class burr12_gen(rv_continuous):
"""A Burr (Type XII) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or ``burr12`` with ``d = 1``
burr : Burr Type III distribution
Notes
-----
The probability density function for `burr` is::
burr12.pdf(x, c, d) = c * d * x**(c-1) * (1+x**(c))**(-d-1)
for ``x > 0``.
`burr12` takes ``c`` and ``d`` as shape parameters.
This is the PDF corresponding to the twelfth CDF given in Burr's list;
specifically, it is equation (20) in Burr's paper [1]_.
%(after_notes)s
The Burr type 12 distribution is also sometimes referred to as
the Singh-Maddala distribution from NIST [2]_.
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
.. [2] http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/b12pdf.htm
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, d):
return np.exp(self._logpdf(x, c, d))
def _logpdf(self, x, c, d):
return np.log(c) + np.log(d) + sc.xlogy(c - 1, x) + sc.xlog1py(-d-1, x**c)
def _cdf(self, x, c, d):
return -sc.expm1(self._logsf(x, c, d))
def _logcdf(self, x, c, d):
return sc.log1p(-(1 + x**c)**(-d))
def _sf(self, x, c, d):
return np.exp(self._logsf(x, c, d))
def _logsf(self, x, c, d):
return sc.xlog1py(-d, x**c)
def _ppf(self, q, c, d):
# The following is an implementation of
# ((1 - q)**(-1.0/d) - 1)**(1.0/c)
# that does a better job handling small values of q.
return sc.expm1(-1/d * sc.log1p(-q))**(1/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 + nc, d - nc)
burr12 = burr12_gen(a=0.0, name='burr12')
class fisk_gen(burr_gen):
"""A Fisk continuous random variable.
The Fisk distribution is also known as the log-logistic distribution, and
equals the Burr distribution with ``d == 1``.
`fisk` takes ``c`` as a shape parameter.
%(before_notes)s
Notes
-----
The probability density function for `fisk` is::
fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
for ``x > 0``.
`fisk` takes ``c`` as a shape parameters.
%(after_notes)s
See Also
--------
burr
%(example)s
"""
def _pdf(self, x, c):
return burr_gen._pdf(self, x, c, 1.0)
def _cdf(self, x, c):
return burr_gen._cdf(self, x, c, 1.0)
def _ppf(self, x, c):
return burr_gen._ppf(self, x, c, 1.0)
def _munp(self, n, c):
return burr_gen._munp(self, n, c, 1.0)
def _entropy(self, c):
return 2 - np.log(c)
fisk = fisk_gen(a=0.0, name='fisk')
# median = loc
class cauchy_gen(rv_continuous):
"""A Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `cauchy` is::
cauchy.pdf(x) = 1 / (pi * (1 + x**2))
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 1.0/np.pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi*q-np.pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/np.pi*np.arctan(x)
def _isf(self, q):
return np.tan(np.pi/2.0-np.pi*q)
def _stats(self):
return np.nan, np.nan, np.nan, np.nan
def _entropy(self):
return np.log(4*np.pi)
def _fitstart(self, data, args=None):
# Initialize ML guesses using quartiles instead of moments.
p25, p50, p75 = np.percentile(data, [25, 50, 75])
return p50, (p75 - p25)/2
cauchy = cauchy_gen(name='cauchy')
class chi_gen(rv_continuous):
"""A chi continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi` is::
chi.pdf(x, df) = x**(df-1) * exp(-x**2/2) / (2**(df/2-1) * gamma(df/2))
for ``x > 0``.
Special cases of `chi` are:
- ``chi(1, loc, scale)`` is equivalent to `halfnorm`
- ``chi(2, 0, scale)`` is equivalent to `rayleigh`
- ``chi(3, 0, scale)`` is equivalent to `maxwell`
`chi` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
sz, rndm = self._size, self._random_state
return np.sqrt(chi2.rvs(df, size=sz, random_state=rndm))
def _pdf(self, x, df):
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
l = np.log(2) - .5*np.log(2)*df - sc.gammaln(.5*df)
return l + sc.xlogy(df - 1., x) - .5*x**2
def _cdf(self, x, df):
return sc.gammainc(.5*df, .5*x**2)
def _ppf(self, q, df):
return np.sqrt(2*sc.gammaincinv(.5*df, q))
def _stats(self, df):
mu = np.sqrt(2)*sc.gamma(df/2.0+0.5)/sc.gamma(df/2.0)
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/np.asarray(np.power(mu2, 1.5))
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= np.asarray(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0, name='chi')
## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2)
class chi2_gen(rv_continuous):
"""A chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi2` is::
chi2.pdf(x, df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
`chi2` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
return self._random_state.chisquare(df, self._size)
def _pdf(self, x, df):
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
return sc.xlogy(df/2.-1, x) - x/2. - sc.gammaln(df/2.) - (np.log(2)*df)/2.
def _cdf(self, x, df):
return sc.chdtr(df, x)
def _sf(self, x, df):
return sc.chdtrc(df, x)
def _isf(self, p, df):
return sc.chdtri(df, p)
def _ppf(self, p, df):
return self._isf(1.0-p, df)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*np.sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0, name='chi2')
class cosine_gen(rv_continuous):
"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is::
cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
for ``-pi <= x <= pi``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 1.0/2/np.pi*(1+np.cos(x))
def _cdf(self, x):
return 1.0/2/np.pi*(np.pi + x + np.sin(x))
def _stats(self):
return 0.0, np.pi*np.pi/3.0-2.0, 0.0, -6.0*(np.pi**4-90)/(5.0*(np.pi*np.pi-6)**2)
def _entropy(self):
return np.log(4*np.pi)-1.0
cosine = cosine_gen(a=-np.pi, b=np.pi, name='cosine')
class dgamma_gen(rv_continuous):
"""A double gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is::
dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
for ``a > 0``.
`dgamma` takes ``a`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
sz, rndm = self._size, self._random_state
u = rndm.random_sample(size=sz)
gm = gamma.rvs(a, size=sz, random_state=rndm)
return gm * np.where(u >= 0.5, 1, -1)
def _pdf(self, x, a):
ax = abs(x)
return 1.0/(2*sc.gamma(a))*ax**(a-1.0) * np.exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return sc.xlogy(a - 1.0, ax) - ax - np.log(2) - sc.gammaln(a)
def _cdf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5 + fac, 0.5 - fac)
def _sf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5-fac, 0.5+fac)
def _ppf(self, q, a):
fac = sc.gammainccinv(a, 1-abs(2*q-1))
return np.where(q > 0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma')
class dweibull_gen(rv_continuous):
"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is::
dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
`dweibull` takes ``d`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, c):
sz, rndm = self._size, self._random_state
u = rndm.random_sample(size=sz)
w = weibull_min.rvs(c, size=sz, random_state=rndm)
return w * (np.where(u >= 0.5, 1, -1))
def _pdf(self, x, c):
ax = abs(x)
Px = c / 2.0 * ax**(c-1.0) * np.exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return np.log(c) - np.log(2.0) + sc.xlogy(c - 1.0, ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5 * np.exp(-abs(x)**c)
return np.where(x > 0, 1 - Cx1, Cx1)
def _ppf(self, q, c):
fac = 2. * np.where(q <= 0.5, q, 1. - q)
fac = np.power(-np.log(fac), 1.0 / c)
return np.where(q > 0.5, fac, -fac)
def _munp(self, n, c):
return (1 - (n % 2)) * sc.gamma(1.0 + 1.0 * n / c)
# since we know that all odd moments are zeros, return them at once.
# returning Nones from _stats makes the public stats call _munp
# so overall we're saving one or two gamma function evaluations here.
def _stats(self, c):
return 0, None, 0, None
dweibull = dweibull_gen(name='dweibull')
## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale)
class expon_gen(rv_continuous):
"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is::
expon.pdf(x) = exp(-x)
for ``x >= 0``.
%(after_notes)s
A common parameterization for `expon` is in terms of the rate parameter
``lambda``, such that ``pdf = lambda * exp(-lambda * x)``. This
parameterization corresponds to using ``scale = 1 / lambda``.
%(example)s
"""
def _rvs(self):
return self._random_state.standard_exponential(self._size)
def _pdf(self, x):
return np.exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -sc.expm1(-x)
def _ppf(self, q):
return -sc.log1p(-q)
def _sf(self, x):
return np.exp(-x)
def _logsf(self, x):
return -x
def _isf(self, q):
return -np.log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
expon = expon_gen(a=0.0, name='expon')
## Exponentially Modified Normal (exponential distribution
## convolved with a Normal).
## This is called an exponentially modified gaussian on wikipedia
class exponnorm_gen(rv_continuous):
"""An exponentially modified Normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponnorm` is::
exponnorm.pdf(x, K) =
1/(2*K) exp(1/(2 * K**2)) exp(-x / K) * erfc-(x - 1/K) / sqrt(2))
where the shape parameter ``K > 0``.
It can be thought of as the sum of a normally distributed random
value with mean ``loc`` and sigma ``scale`` and an exponentially
distributed random number with a pdf proportional to ``exp(-lambda * x)``
where ``lambda = (K * scale)**(-1)``.
%(after_notes)s
An alternative parameterization of this distribution (for example, in
`Wikipedia <http://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution>`_)
involves three parameters, :math:`\\mu`, :math:`\\lambda` and :math:`\\sigma`.
In the present parameterization this corresponds to having ``loc`` and
``scale`` equal to :math:`\\mu` and :math:`\\sigma`, respectively, and
shape parameter :math:`K = 1/\\sigma\\lambda`.
.. versionadded:: 0.16.0
%(example)s
"""
def _rvs(self, K):
expval = self._random_state.standard_exponential(self._size) * K
gval = self._random_state.standard_normal(self._size)
return expval + gval
def _pdf(self, x, K):
invK = 1.0 / K
exparg = 0.5 * invK**2 - invK * x
# Avoid overflows; setting np.exp(exparg) to the max float works
# all right here
expval = _lazywhere(exparg < _LOGXMAX, (exparg,), np.exp, _XMAX)
return 0.5 * invK * expval * sc.erfc(-(x - invK) / np.sqrt(2))
def _logpdf(self, x, K):
invK = 1.0 / K
exparg = 0.5 * invK**2 - invK * x
return exparg + np.log(0.5 * invK * sc.erfc(-(x - invK) / np.sqrt(2)))
def _cdf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return _norm_cdf(x) - np.exp(expval) * _norm_cdf(x - invK)
def _sf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return _norm_cdf(-x) + np.exp(expval) * _norm_cdf(x - invK)
def _stats(self, K):
K2 = K * K
opK2 = 1.0 + K2
skw = 2 * K**3 * opK2**(-1.5)
krt = 6.0 * K2 * K2 * opK2**(-2)
return K, opK2, skw, krt
exponnorm = exponnorm_gen(name='exponnorm')
class exponweib_gen(rv_continuous):
"""An exponentiated Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponweib` is::
exponweib.pdf(x, a, c) =
a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
for ``x > 0``, ``a > 0``, ``c > 0``.
`exponweib` takes ``a`` and ``c`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, a, c):
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
negxc = -x**c
exm1c = -sc.expm1(negxc)
logp = (np.log(a) + np.log(c) + sc.xlogy(a - 1.0, exm1c) +
negxc + sc.xlogy(c - 1.0, x))
return logp
def _cdf(self, x, a, c):
exm1c = -sc.expm1(-x**c)
return exm1c**a
def _ppf(self, q, a, c):
return (-sc.log1p(-q**(1.0/a)))**np.asarray(1.0/c)
exponweib = exponweib_gen(a=0.0, name='exponweib')
class exponpow_gen(rv_continuous):
"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is::
exponpow.pdf(x, b) = b * x**(b-1) * exp(1 + x**b - exp(x**b))
for ``x >= 0``, ``b > 0``. Note that this is a different distribution
from the exponential power distribution that is also known under the names
"generalized normal" or "generalized Gaussian".
`exponpow` takes ``b`` as a shape parameter.
%(after_notes)s
References
----------
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Exponentialpower.pdf
%(example)s
"""
def _pdf(self, x, b):
return np.exp(self._logpdf(x, b))
def _logpdf(self, x, b):
xb = x**b
f = 1 + np.log(b) + sc.xlogy(b - 1.0, x) + xb - np.exp(xb)
return f
def _cdf(self, x, b):
return -sc.expm1(-sc.expm1(x**b))
def _sf(self, x, b):
return np.exp(-sc.expm1(x**b))
def _isf(self, x, b):
return (sc.log1p(-np.log(x)))**(1./b)
def _ppf(self, q, b):
return pow(sc.log1p(-sc.log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow')
class fatiguelife_gen(rv_continuous):
"""A fatigue-life (Birnbaum-Saunders) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `fatiguelife` is::
fatiguelife.pdf(x, c) =
(x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
for ``x > 0``.
`fatiguelife` takes ``c`` as a shape parameter.
%(after_notes)s
References
----------
.. [1] "Birnbaum-Saunders distribution",
http://en.wikipedia.org/wiki/Birnbaum-Saunders_distribution
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, c):
z = self._random_state.standard_normal(self._size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*np.sqrt(1 + x2)
return t
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return (np.log(x+1) - (x-1)**2 / (2.0*x*c**2) - np.log(2*c) -
0.5*(np.log(2*np.pi) + 3*np.log(x)))
def _cdf(self, x, c):
return _norm_cdf(1.0 / c * (np.sqrt(x) - 1.0/np.sqrt(x)))
def _ppf(self, q, c):
tmp = c*sc.ndtri(q)
return 0.25 * (tmp + np.sqrt(tmp**2 + 4))**2
def _stats(self, c):
# NB: the formula for kurtosis in wikipedia seems to have an error:
# it's 40, not 41. At least it disagrees with the one from Wolfram
# Alpha. And the latter one, below, passes the tests, while the wiki
# one doesn't So far I didn't have the guts to actually check the
# coefficients from the expressions for the raw moments.
c2 = c*c
mu = c2 / 2.0 + 1.0
den = 5.0 * c2 + 4.0
mu2 = c2*den / 4.0
g1 = 4 * c * (11*c2 + 6.0) / np.power(den, 1.5)
g2 = 6 * c2 * (93*c2 + 40.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife')
class foldcauchy_gen(rv_continuous):
"""A folded Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldcauchy` is::
foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
for ``x >= 0``.
`foldcauchy` takes ``c`` as a shape parameter.
%(example)s
"""
def _rvs(self, c):
return abs(cauchy.rvs(loc=c, size=self._size,
random_state=self._random_state))
def _pdf(self, x, c):
return 1.0/np.pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/np.pi*(np.arctan(x-c) + np.arctan(x+c))
def _stats(self, c):
return np.inf, np.inf, np.nan, np.nan
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy')
class f_gen(rv_continuous):
"""An F continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `f` is::
df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
F.pdf(x, df1, df2) = --------------------------------------------
(df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
for ``x > 0``.
`f` takes ``dfn`` and ``dfd`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd):
return self._random_state.f(dfn, dfd, self._size)
def _pdf(self, x, dfn, dfd):
return np.exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0 * dfn
m = 1.0 * dfd
lPx = m/2 * np.log(m) + n/2 * np.log(n) + (n/2 - 1) * np.log(x)
lPx -= ((n+m)/2) * np.log(m + n*x) + sc.betaln(n/2, m/2)
return lPx
def _cdf(self, x, dfn, dfd):
return sc.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return sc.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return sc.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v1, v2 = 1. * dfn, 1. * dfd
v2_2, v2_4, v2_6, v2_8 = v2 - 2., v2 - 4., v2 - 6., v2 - 8.
mu = _lazywhere(
v2 > 2, (v2, v2_2),
lambda v2, v2_2: v2 / v2_2,
np.inf)
mu2 = _lazywhere(
v2 > 4, (v1, v2, v2_2, v2_4),
lambda v1, v2, v2_2, v2_4:
2 * v2 * v2 * (v1 + v2_2) / (v1 * v2_2**2 * v2_4),
np.inf)
g1 = _lazywhere(
v2 > 6, (v1, v2_2, v2_4, v2_6),
lambda v1, v2_2, v2_4, v2_6:
(2 * v1 + v2_2) / v2_6 * np.sqrt(v2_4 / (v1 * (v1 + v2_2))),
np.nan)
g1 *= np.sqrt(8.)
g2 = _lazywhere(
v2 > 8, (g1, v2_6, v2_8),
lambda g1, v2_6, v2_8: (8 + g1 * g1 * v2_6) / v2_8,
np.nan)
g2 *= 3. / 2.
return mu, mu2, g1, g2
f = f_gen(a=0.0, name='f')
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is::
foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
for ``c >= 0``.
`foldnorm` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return c >= 0
def _rvs(self, c):
return abs(self._random_state.standard_normal(self._size) + c)
def _pdf(self, x, c):
return _norm_pdf(x + c) + _norm_pdf(x-c)
def _cdf(self, x, c):
return _norm_cdf(x-c) + _norm_cdf(x+c) - 1.0
def _stats(self, c):
# Regina C. Elandt, Technometrics 3, 551 (1961)
# http://www.jstor.org/stable/1266561
#
c2 = c*c
expfac = np.exp(-0.5*c2) / np.sqrt(2.*np.pi)
mu = 2.*expfac + c * sc.erf(c/np.sqrt(2))
mu2 = c2 + 1 - mu*mu
g1 = 2. * (mu*mu*mu - c2*mu - expfac)
g1 /= np.power(mu2, 1.5)
g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
g2 = g2 / mu2**2.0 - 3.
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm')
## Extreme Value Type II or Frechet
## (defined in Regress+ documentation as Extreme LB) as
## a limiting value distribution.
##
class frechet_r_gen(rv_continuous):
"""A Frechet right (or Weibull minimum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_min : The same distribution as `frechet_r`.
frechet_l, weibull_max
Notes
-----
The probability density function for `frechet_r` is::
frechet_r.pdf(x, c) = c * x**(c-1) * exp(-x**c)
for ``x > 0``, ``c > 0``.
`frechet_r` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c*pow(x, c-1)*np.exp(-pow(x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c - 1, x) - pow(x, c)
def _cdf(self, x, c):
return -sc.expm1(-pow(x, c))
def _sf(self, x, c):
return np.exp(-pow(x, c))
def _logsf(self, x, c):
return -pow(x, c)
def _ppf(self, q, c):
return pow(-sc.log1p(-q), 1.0/c)
def _munp(self, n, c):
return sc.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
frechet_r = frechet_r_gen(a=0.0, name='frechet_r')
weibull_min = frechet_r_gen(a=0.0, name='weibull_min')
class frechet_l_gen(rv_continuous):
"""A Frechet left (or Weibull maximum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_max : The same distribution as `frechet_l`.
frechet_r, weibull_min
Notes
-----
The probability density function for `frechet_l` is::
frechet_l.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
for ``x < 0``, ``c > 0``.
`frechet_l` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c*pow(-x, c-1)*np.exp(-pow(-x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c-1, -x) - pow(-x, c)
def _cdf(self, x, c):
return np.exp(-pow(-x, c))
def _logcdf(self, x, c):
return -pow(-x, c)
def _sf(self, x, c):
return -sc.expm1(-pow(-x, c))
def _ppf(self, q, c):
return -pow(-np.log(q), 1.0/c)
def _munp(self, n, c):
val = sc.gamma(1.0+n*1.0/c)
if int(n) % 2:
sgn = -1
else:
sgn = 1
return sgn * val
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
frechet_l = frechet_l_gen(b=0.0, name='frechet_l')
weibull_max = frechet_l_gen(b=0.0, name='weibull_max')
class genlogistic_gen(rv_continuous):
"""A generalized logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genlogistic` is::
genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
for ``x > 0``, ``c > 0``.
`genlogistic` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) - x - (c+1.0)*sc.log1p(np.exp(-x))
def _cdf(self, x, c):
Cx = (1+np.exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -np.log(pow(q, -1.0/c)-1)
return vals
def _stats(self, c):
mu = _EULER + sc.psi(c)
mu2 = np.pi*np.pi/6.0 + sc.zeta(2, c)
g1 = -2*sc.zeta(3, c) + 2*_ZETA3
g1 /= np.power(mu2, 1.5)
g2 = np.pi**4/15.0 + 6*sc.zeta(4, c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic')
class genpareto_gen(rv_continuous):
"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is::
genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
defined for ``x >= 0`` if ``c >=0``, and for
``0 <= x <= -1/c`` if ``c < 0``.
`genpareto` takes ``c`` as a shape parameter.
For ``c == 0``, `genpareto` reduces to the exponential
distribution, `expon`::
genpareto.pdf(x, c=0) = exp(-x)
For ``c == -1``, `genpareto` is uniform on ``[0, 1]``::
genpareto.cdf(x, c=-1) = x
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
c = np.asarray(c)
self.b = _lazywhere(c < 0, (c,),
lambda c: -1. / c,
np.inf)
return True
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.xlog1py(c + 1., c*x) / c,
-x)
def _cdf(self, x, c):
return -sc.inv_boxcox1p(-x, -c)
def _sf(self, x, c):
return sc.inv_boxcox(-x, -c)
def _logsf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.log1p(c*x) / c,
-x)
def _ppf(self, q, c):
return -sc.boxcox1p(-q, -c)
def _isf(self, q, c):
return -sc.boxcox(q, -c)
def _munp(self, n, c):
def __munp(n, c):
val = 0.0
k = np.arange(0, n + 1)
for ki, cnk in zip(k, sc.comb(n, k)):
val = val + cnk * (-1) ** ki / (1.0 - c * ki)
return np.where(c * n < 1, val * (-1.0 / c) ** n, np.inf)
return _lazywhere(c != 0, (c,),
lambda c: __munp(n, c),
sc.gamma(n + 1))
def _entropy(self, c):
return 1. + c
genpareto = genpareto_gen(a=0.0, name='genpareto')
class genexpon_gen(rv_continuous):
"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is::
genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
exp(-a*x - b*x + b/c * (1-exp(-c*x)))
for ``x >= 0``, ``a, b, c > 0``.
`genexpon` takes ``a``, ``b`` and ``c`` as shape parameters.
%(after_notes)s
References
----------
H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
Distribution", Journal of the American Statistical Association, 1993.
N. Balakrishnan, "The Exponential Distribution: Theory, Methods and
Applications", Asit P. Basu.
%(example)s
"""
def _pdf(self, x, a, b, c):
return (a + b*(-sc.expm1(-c*x)))*np.exp((-a-b)*x +
b*(-sc.expm1(-c*x))/c)
def _cdf(self, x, a, b, c):
return -sc.expm1((-a-b)*x + b*(-sc.expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-sc.expm1(-c*x))) + (-a-b)*x+b*(-sc.expm1(-c*x))/c
genexpon = genexpon_gen(a=0.0, name='genexpon')
class genextreme_gen(rv_continuous):
"""A generalized extreme value continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r
Notes
-----
For ``c=0``, `genextreme` is equal to `gumbel_r`.
The probability density function for `genextreme` is::
genextreme.pdf(x, c) =
exp(-exp(-x))*exp(-x), for c==0
exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x <= 1/c, c > 0
Note that several sources and software packages use the opposite
convention for the sign of the shape parameter ``c``.
`genextreme` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
self.b = np.where(c > 0, 1.0 / np.maximum(c, _XMIN), np.inf)
self.a = np.where(c < 0, 1.0 / np.minimum(c, -_XMIN), -np.inf)
return np.where(abs(c) == np.inf, 0, 1)
def _loglogcdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: sc.log1p(-c*x)/c, -x)
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
cx = _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: c*x, 0.0)
logex2 = sc.log1p(-cx)
logpex2 = self._loglogcdf(x, c)
pex2 = np.exp(logpex2)
# Handle special cases
np.putmask(logpex2, (c == 0) & (x == -np.inf), 0.0)
logpdf = np.where((cx == 1) | (cx == -np.inf),
-np.inf,
-pex2+logpex2-logex2)
np.putmask(logpdf, (c == 1) & (x == 1), 0.0)
return logpdf
def _logcdf(self, x, c):
return -np.exp(self._loglogcdf(x, c))
def _cdf(self, x, c):
return np.exp(self._logcdf(x, c))
def _sf(self, x, c):
return -sc.expm1(self._logcdf(x, c))
def _ppf(self, q, c):
x = -np.log(-np.log(q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _isf(self, q, c):
x = -np.log(-sc.log1p(-q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _stats(self, c):
g = lambda n: sc.gamma(n*c + 1)
g1 = g(1)
g2 = g(2)
g3 = g(3)
g4 = g(4)
g2mg12 = np.where(abs(c) < 1e-7, (c*np.pi)**2.0/6.0, g2-g1**2.0)
gam2k = np.where(abs(c) < 1e-7, np.pi**2.0/6.0,
sc.expm1(sc.gammaln(2.0*c+1.0)-2*sc.gammaln(c + 1.0))/c**2.0)
eps = 1e-14
gamk = np.where(abs(c) < eps, -_EULER, sc.expm1(sc.gammaln(c + 1))/c)
m = np.where(c < -1.0, np.nan, -gamk)
v = np.where(c < -0.5, np.nan, g1**2.0*gam2k)
# skewness
sk1 = np.where(c < -1./3, np.nan,
np.sign(c)*(-g3+(g2+2*g2mg12)*g1)/((g2mg12)**(3./2.)))
sk = np.where(abs(c) <= eps**0.29, 12*np.sqrt(6)*_ZETA3/np.pi**3, sk1)
# kurtosis
ku1 = np.where(c < -1./4, np.nan,
(g4+(-4*g3+3*(g2+g2mg12)*g1)*g1)/((g2mg12)**2))
ku = np.where(abs(c) <= (eps)**0.23, 12.0/5.0, ku1-3.0)
return m, v, sk, ku
def _fitstart(self, data):
# This is better than the default shape of (1,).
g = _skew(data)
if g < 0:
a = 0.5
else:
a = -0.5
return super(genextreme_gen, self)._fitstart(data, args=(a,))
def _munp(self, n, c):
k = np.arange(0, n+1)
vals = 1.0/c**n * np.sum(
sc.comb(n, k) * (-1)**k * sc.gamma(c*k + 1),
axis=0)
return np.where(c*n > -1, vals, np.inf)
def _entropy(self, c):
return _EULER*(1 - c) + 1
genextreme = genextreme_gen(name='genextreme')
def _digammainv(y):
# Inverse of the digamma function (real positive arguments only).
# This function is used in the `fit` method of `gamma_gen`.
# The function uses either optimize.fsolve or optimize.newton
# to solve `sc.digamma(x) - y = 0`. There is probably room for
# improvement, but currently it works over a wide range of y:
# >>> y = 64*np.random.randn(1000000)
# >>> y.min(), y.max()
# (-311.43592651416662, 351.77388222276869)
# x = [_digammainv(t) for t in y]
# np.abs(sc.digamma(x) - y).max()
# 1.1368683772161603e-13
#
_em = 0.5772156649015328606065120
func = lambda x: sc.digamma(x) - y
if y > -0.125:
x0 = np.exp(y) + 0.5
if y < 10:
# Some experimentation shows that newton reliably converges
# must faster than fsolve in this y range. For larger y,
# newton sometimes fails to converge.
value = optimize.newton(func, x0, tol=1e-10)
return value
elif y > -3:
x0 = np.exp(y/2.332) + 0.08661
else:
x0 = 1.0 / (-y - _em)
value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11,
full_output=True)
if ier != 1:
raise RuntimeError("_digammainv: fsolve failed, y = %r" % y)
return value[0]
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
"""A gamma continuous random variable.
%(before_notes)s
See Also
--------
erlang, expon
Notes
-----
The probability density function for `gamma` is::
gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a)
for ``x >= 0``, ``a > 0``. Here ``gamma(a)`` refers to the gamma function.
`gamma` has a shape parameter `a` which needs to be set explicitly.
When ``a`` is an integer, `gamma` reduces to the Erlang
distribution, and when ``a=1`` to the exponential distribution.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.standard_gamma(a, self._size)
def _pdf(self, x, a):
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return sc.xlogy(a-1.0, x) - x - sc.gammaln(a)
def _cdf(self, x, a):
return sc.gammainc(a, x)
def _sf(self, x, a):
return sc.gammaincc(a, x)
def _ppf(self, q, a):
return sc.gammaincinv(a, q)
def _stats(self, a):
return a, a, 2.0/np.sqrt(a), 6.0/a
def _entropy(self, a):
return sc.psi(a)*(1-a) + a + sc.gammaln(a)
def _fitstart(self, data):
# The skewness of the gamma distribution is `4 / np.sqrt(a)`.
# We invert that to estimate the shape `a` using the skewness
# of the data. The formula is regularized with 1e-8 in the
# denominator to allow for degenerate data where the skewness
# is close to 0.
a = 4 / (1e-8 + _skew(data)**2)
return super(gamma_gen, self)._fitstart(data, args=(a,))
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
f0 = (kwds.get('f0', None) or kwds.get('fa', None) or
kwds.get('fix_a', None))
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None:
# loc is not fixed. Use the default fit method.
return super(gamma_gen, self).fit(data, *args, **kwds)
# Special case: loc is fixed.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Fixed location is handled by shifting the data.
data = np.asarray(data)
if np.any(data <= floc):
raise FitDataError("gamma", lower=floc, upper=np.inf)
if floc != 0:
# Don't do the subtraction in-place, because `data` might be a
# view of the input array.
data = data - floc
xbar = data.mean()
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free
if f0 is not None:
# shape is fixed
a = f0
else:
# shape and scale are both free.
# The MLE for the shape parameter `a` is the solution to:
# np.log(a) - sc.digamma(a) - np.log(xbar) + np.log(data.mean) = 0
s = np.log(xbar) - np.log(data).mean()
func = lambda a: np.log(a) - sc.digamma(a) - s
aest = (3-s + np.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
# The MLE for the scale parameter is just the data mean
# divided by the shape parameter.
scale = xbar / a
else:
# scale is fixed, shape is free
# The MLE for the shape parameter `a` is the solution to:
# sc.digamma(a) - np.log(data).mean() + np.log(fscale) = 0
c = np.log(data).mean() - np.log(fscale)
a = _digammainv(c)
scale = fscale
return a, floc, scale
gamma = gamma_gen(a=0.0, name='gamma')
class erlang_gen(gamma_gen):
"""An Erlang continuous random variable.
%(before_notes)s
See Also
--------
gamma
Notes
-----
The Erlang distribution is a special case of the Gamma distribution, with
the shape parameter `a` an integer. Note that this restriction is not
enforced by `erlang`. It will, however, generate a warning the first time
a non-integer value is used for the shape parameter.
Refer to `gamma` for examples.
"""
def _argcheck(self, a):
allint = np.all(np.floor(a) == a)
allpos = np.all(a > 0)
if not allint:
# An Erlang distribution shouldn't really have a non-integer
# shape parameter, so warn the user.
warnings.warn(
'The shape parameter of the erlang distribution '
'has been given a non-integer value %r.' % (a,),
RuntimeWarning)
return allpos
def _fitstart(self, data):
# Override gamma_gen_fitstart so that an integer initial value is
# used. (Also regularize the division, to avoid issues when
# _skew(data) is 0 or close to 0.)
a = int(4.0 / (1e-8 + _skew(data)**2))
return super(gamma_gen, self)._fitstart(data, args=(a,))
# Trivial override of the fit method, so we can monkey-patch its
# docstring.
def fit(self, data, *args, **kwds):
return super(erlang_gen, self).fit(data, *args, **kwds)
if fit.__doc__ is not None:
fit.__doc__ = (rv_continuous.fit.__doc__ +
"""
Notes
-----
The Erlang distribution is generally defined to have integer values
for the shape parameter. This is not enforced by the `erlang` class.
When fitting the distribution, it will generally return a non-integer
value for the shape parameter. By using the keyword argument
`f0=<integer>`, the fit method can be constrained to fit the data to
a specific integer shape parameter.
""")
erlang = erlang_gen(a=0.0, name='erlang')
class gengamma_gen(rv_continuous):
"""A generalized gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gengamma` is::
gengamma.pdf(x, a, c) = abs(c) * x**(c*a-1) * exp(-x**c) / gamma(a)
for ``x >= 0``, ``a > 0``, and ``c != 0``.
`gengamma` takes ``a`` and ``c`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
return np.log(abs(c)) + sc.xlogy(c*a - 1, x) - x**c - sc.gammaln(a)
def _cdf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val1, val2)
def _sf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val2, val1)
def _ppf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val1, val2)**(1.0/c)
def _isf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val2, val1)**(1.0/c)
def _munp(self, n, a, c):
# Pochhammer symbol: sc.pocha,n) = gamma(a+n)/gamma(a)
return sc.poch(a, n*1.0/c)
def _entropy(self, a, c):
val = sc.psi(a)
return a*(1-val) + 1.0/c*val + sc.gammaln(a) - np.log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma')
class genhalflogistic_gen(rv_continuous):
"""A generalized half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genhalflogistic` is::
genhalflogistic.pdf(x, c) =
2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
for ``0 <= x <= 1/c``, and ``c > 0``.
`genhalflogistic` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
self.b = 1.0 / c
return c > 0
def _pdf(self, x, c):
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self, c):
return 2 - (2*c+1)*np.log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic')
class gompertz_gen(rv_continuous):
"""A Gompertz (or truncated Gumbel) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gompertz` is::
gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
for ``x >= 0``, ``c > 0``.
`gompertz` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) + x - c * sc.expm1(x)
def _cdf(self, x, c):
return -sc.expm1(-c * sc.expm1(x))
def _ppf(self, q, c):
return sc.log1p(-1.0 / c * sc.log1p(-q))
def _entropy(self, c):
return 1.0 - np.log(c) - np.exp(c)*sc.expn(1, c)
gompertz = gompertz_gen(a=0.0, name='gompertz')
class gumbel_r_gen(rv_continuous):
"""A right-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_l, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_r` is::
gumbel_r.pdf(x) = exp(-(x + exp(-x)))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - np.exp(-x)
def _cdf(self, x):
return np.exp(-np.exp(-x))
def _logcdf(self, x):
return -np.exp(-x)
def _ppf(self, q):
return -np.log(-np.log(q))
def _stats(self):
return _EULER, np.pi*np.pi/6.0, 12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
# http://en.wikipedia.org/wiki/Gumbel_distribution
return _EULER + 1.
gumbel_r = gumbel_r_gen(name='gumbel_r')
class gumbel_l_gen(rv_continuous):
"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is::
gumbel_l.pdf(x) = exp(x - exp(x))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return x - np.exp(x)
def _cdf(self, x):
return -sc.expm1(-np.exp(x))
def _ppf(self, q):
return np.log(-sc.log1p(-q))
def _logsf(self, x):
return -np.exp(x)
def _sf(self, x):
return np.exp(-np.exp(x))
def _isf(self, x):
return np.log(-np.log(x))
def _stats(self):
return -_EULER, np.pi*np.pi/6.0, \
-12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return _EULER + 1.
gumbel_l = gumbel_l_gen(name='gumbel_l')
class halfcauchy_gen(rv_continuous):
"""A Half-Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfcauchy` is::
halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
for ``x >= 0``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 2.0/np.pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/np.pi) - sc.log1p(x*x)
def _cdf(self, x):
return 2.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi/2*q)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
def _entropy(self):
return np.log(2*np.pi)
halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
class halflogistic_gen(rv_continuous):
"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is::
halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2 = 1/2 * sech(x/2)**2
for ``x >= 0``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return np.log(2) - x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return np.tanh(x/2.0)
def _ppf(self, q):
return 2*np.arctanh(q)
def _munp(self, n):
if n == 1:
return 2*np.log(2)
if n == 2:
return np.pi*np.pi/3.0
if n == 3:
return 9*_ZETA3
if n == 4:
return 7*np.pi**4 / 15.0
return 2*(1-pow(2.0, 1-n))*sc.gamma(n+1)*sc.zeta(n, 1)
def _entropy(self):
return 2-np.log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
class halfnorm_gen(rv_continuous):
"""A half-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfnorm` is::
halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
for ``x > 0``.
`halfnorm` is a special case of `chi` with ``df == 1``.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return abs(self._random_state.standard_normal(size=self._size))
def _pdf(self, x):
return np.sqrt(2.0/np.pi)*np.exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/np.pi) - x*x/2.0
def _cdf(self, x):
return _norm_cdf(x)*2-1.0
def _ppf(self, q):
return sc.ndtri((1+q)/2.0)
def _stats(self):
return (np.sqrt(2.0/np.pi), 1-2.0/np.pi, np.sqrt(2)*(4-np.pi)/(np.pi-2)**1.5,
8*(np.pi-3)/(np.pi-2)**2)
def _entropy(self):
return 0.5*np.log(np.pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
class hypsecant_gen(rv_continuous):
"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is::
hypsecant.pdf(x) = 1/pi * sech(x)
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 1.0/(np.pi*np.cosh(x))
def _cdf(self, x):
return 2.0/np.pi*np.arctan(np.exp(x))
def _ppf(self, q):
return np.log(np.tan(np.pi*q/2.0))
def _stats(self):
return 0, np.pi*np.pi/4, 0, 2
def _entropy(self):
return np.log(2*np.pi)
hypsecant = hypsecant_gen(name='hypsecant')
class gausshyper_gen(rv_continuous):
"""A Gauss hypergeometric continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gausshyper` is::
gausshyper.pdf(x, a, b, c, z) =
C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
for ``0 <= x <= 1``, ``a > 0``, ``b > 0``, and
``C = 1 / (B(a, b) F[2, 1](c, a; a+b; -z))``
`gausshyper` takes ``a``, ``b``, ``c`` and ``z`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b, c, z):
return (a > 0) & (b > 0) & (c == c) & (z == z)
def _pdf(self, x, a, b, c, z):
Cinv = sc.gamma(a)*sc.gamma(b)/sc.gamma(a+b)*sc.hyp2f1(c, a, a+b, -z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = sc.beta(n+a, b) / sc.beta(a, b)
num = sc.hyp2f1(c, a+n, a+b+n, -z)
den = sc.hyp2f1(c, a, a+b, -z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper')
class invgamma_gen(rv_continuous):
"""An inverted gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgamma` is::
invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
for x > 0, a > 0.
`invgamma` takes ``a`` as a shape parameter.
`invgamma` is a special case of `gengamma` with ``c == -1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return -(a+1) * np.log(x) - sc.gammaln(a) - 1.0/x
def _cdf(self, x, a):
return sc.gammaincc(a, 1.0 / x)
def _ppf(self, q, a):
return 1.0 / sc.gammainccinv(a, q)
def _sf(self, x, a):
return sc.gammainc(a, 1.0 / x)
def _isf(self, q, a):
return 1.0 / sc.gammaincinv(a, q)
def _stats(self, a, moments='mvsk'):
m1 = _lazywhere(a > 1, (a,), lambda x: 1. / (x - 1.), np.inf)
m2 = _lazywhere(a > 2, (a,), lambda x: 1. / (x - 1.)**2 / (x - 2.),
np.inf)
g1, g2 = None, None
if 's' in moments:
g1 = _lazywhere(
a > 3, (a,),
lambda x: 4. * np.sqrt(x - 2.) / (x - 3.), np.nan)
if 'k' in moments:
g2 = _lazywhere(
a > 4, (a,),
lambda x: 6. * (5. * x - 11.) / (x - 3.) / (x - 4.), np.nan)
return m1, m2, g1, g2
def _entropy(self, a):
return a - (a+1.0) * sc.psi(a) + sc.gammaln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma')
# scale is gamma from DATAPLOT and B from Regress
class invgauss_gen(rv_continuous):
"""An inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgauss` is::
invgauss.pdf(x, mu) = 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
for ``x > 0``.
`invgauss` takes ``mu`` as a shape parameter.
%(after_notes)s
When `mu` is too small, evaluating the cumulative distribution function will be
inaccurate due to ``cdf(mu -> 0) = inf * 0``.
NaNs are returned for ``mu <= 0.0028``.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, mu):
return self._random_state.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/np.sqrt(2*np.pi*x**3.0)*np.exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*np.log(2*np.pi) - 1.5*np.log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
fac = np.sqrt(1.0/x)
# Numerical accuracy for small `mu` is bad. See #869.
C1 = _norm_cdf(fac*(x-mu)/mu)
C1 += np.exp(1.0/mu) * _norm_cdf(-fac*(x+mu)/mu) * np.exp(1.0/mu)
return C1
def _stats(self, mu):
return mu, mu**3.0, 3*np.sqrt(mu), 15*mu
invgauss = invgauss_gen(a=0.0, name='invgauss')
class invweibull_gen(rv_continuous):
"""An inverted Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is::
invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
for ``x > 0``, ``c > 0``.
`invweibull` takes ``c`` as a shape parameter.
%(after_notes)s
References
----------
F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse
Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c):
xc1 = np.power(x, -c - 1.0)
xc2 = np.power(x, -c)
xc2 = np.exp(-xc2)
return c * xc1 * xc2
def _cdf(self, x, c):
xc1 = np.power(x, -c)
return np.exp(-xc1)
def _ppf(self, q, c):
return np.power(-np.log(q), -1.0/c)
def _munp(self, n, c):
return sc.gamma(1 - n / c)
def _entropy(self, c):
return 1+_EULER + _EULER / c - np.log(c)
invweibull = invweibull_gen(a=0, name='invweibull')
class johnsonsb_gen(rv_continuous):
"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is::
johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
for ``0 < x < 1`` and ``a, b > 0``, and ``phi`` is the normal pdf.
`johnsonsb` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
trm = _norm_pdf(a + b*np.log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b*np.log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0 / (1 + np.exp(-1.0 / b * (_norm_ppf(q) - a)))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonsb')
class johnsonsu_gen(rv_continuous):
"""A Johnson SU continuous random variable.
%(before_notes)s
See Also
--------
johnsonsb
Notes
-----
The probability density function for `johnsonsu` is::
johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
phi(a + b * log(x + sqrt(x**2 + 1)))
for all ``x, a, b > 0``, and `phi` is the normal pdf.
`johnsonsu` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
x2 = x*x
trm = _norm_pdf(a + b * np.log(x + np.sqrt(x2+1)))
return b*1.0/np.sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b * np.log(x + np.sqrt(x*x + 1)))
def _ppf(self, q, a, b):
return np.sinh((_norm_ppf(q) - a) / b)
johnsonsu = johnsonsu_gen(name='johnsonsu')
class laplace_gen(rv_continuous):
"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is::
laplace.pdf(x) = 1/2 * exp(-abs(x))
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.laplace(0, 1, size=self._size)
def _pdf(self, x):
return 0.5*np.exp(-abs(x))
def _cdf(self, x):
return np.where(x > 0, 1.0-0.5*np.exp(-x), 0.5*np.exp(x))
def _ppf(self, q):
return np.where(q > 0.5, -np.log(2*(1-q)), np.log(2*q))
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return np.log(2)+1
laplace = laplace_gen(name='laplace')
class levy_gen(rv_continuous):
"""A Levy continuous random variable.
%(before_notes)s
See Also
--------
levy_stable, levy_l
Notes
-----
The probability density function for `levy` is::
levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
for ``x > 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=1.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
return 1 / np.sqrt(2*np.pi*x) / x * np.exp(-1/(2*x))
def _cdf(self, x):
# Equivalent to 2*norm.sf(np.sqrt(1/x))
return sc.erfc(np.sqrt(0.5 / x))
def _ppf(self, q):
# Equivalent to 1.0/(norm.isf(q/2)**2) or 0.5/(erfcinv(q)**2)
val = -sc.ndtri(q/2)
return 1.0 / (val * val)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy = levy_gen(a=0.0, name="levy")
class levy_l_gen(rv_continuous):
"""A left-skewed Levy continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_stable
Notes
-----
The probability density function for `levy_l` is::
levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
for ``x < 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=-1.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
ax = abs(x)
return 1/np.sqrt(2*np.pi*ax)/ax*np.exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2 * _norm_cdf(1 / np.sqrt(ax)) - 1
def _ppf(self, q):
val = _norm_ppf((q + 1.0) / 2)
return -1.0 / (val * val)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy_l = levy_l_gen(b=0.0, name="levy_l")
class levy_stable_gen(rv_continuous):
"""A Levy-stable continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_l
Notes
-----
Levy-stable distribution (only random variates available -- ignore other
docs)
%(after_notes)s
%(example)s
"""
def _rvs(self, alpha, beta):
def alpha1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (2/np.pi*(np.pi/2 + bTH)*tanTH -
beta*np.log((np.pi/2*W*cosTH)/(np.pi/2 + bTH)))
def beta0func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (W/(cosTH/np.tan(aTH) + np.sin(TH)) *
((np.cos(aTH) + np.sin(aTH)*tanTH)/W)**(1.0/alpha))
def otherwise(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
# alpha is not 1 and beta is not 0
val0 = beta*np.tan(np.pi*alpha/2)
th0 = np.arctan(val0)/alpha
val3 = W/(cosTH/np.tan(alpha*(th0 + TH)) + np.sin(TH))
res3 = val3*((np.cos(aTH) + np.sin(aTH)*tanTH -
val0*(np.sin(aTH) - np.cos(aTH)*tanTH))/W)**(1.0/alpha)
return res3
def alphanot1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
res = _lazywhere(beta == 0,
(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
beta0func, f2=otherwise)
return res
sz = self._size
alpha = broadcast_to(alpha, sz)
beta = broadcast_to(beta, sz)
TH = uniform.rvs(loc=-np.pi/2.0, scale=np.pi, size=sz,
random_state=self._random_state)
W = expon.rvs(size=sz, random_state=self._random_state)
aTH = alpha*TH
bTH = beta*TH
cosTH = np.cos(TH)
tanTH = np.tan(TH)
res = _lazywhere(alpha == 1, (alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
alpha1func, f2=alphanot1func)
return res
def _argcheck(self, alpha, beta):
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
def _pdf(self, x, alpha, beta):
raise NotImplementedError
levy_stable = levy_stable_gen(name='levy_stable')
class logistic_gen(rv_continuous):
"""A logistic (or Sech-squared) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is::
logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
`logistic` is a special case of `genlogistic` with ``c == 1``.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.logistic(size=self._size)
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return sc.expit(x)
def _ppf(self, q):
return sc.logit(q)
def _sf(self, x):
return sc.expit(-x)
def _isf(self, q):
return -sc.logit(q)
def _stats(self):
return 0, np.pi*np.pi/3.0, 0, 6.0/5.0
def _entropy(self):
# http://en.wikipedia.org/wiki/Logistic_distribution
return 2.0
logistic = logistic_gen(name='logistic')
class loggamma_gen(rv_continuous):
"""A log gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loggamma` is::
loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
for all ``x, c > 0``.
`loggamma` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, c):
return np.log(self._random_state.gamma(c, size=self._size))
def _pdf(self, x, c):
return np.exp(c*x-np.exp(x)-sc.gammaln(c))
def _cdf(self, x, c):
return sc.gammainc(c, np.exp(x))
def _ppf(self, q, c):
return np.log(sc.gammaincinv(c, q))
def _stats(self, c):
# See, for example, "A Statistical Study of Log-Gamma Distribution", by
# Ping Shing Chan (thesis, McMaster University, 1993).
mean = sc.digamma(c)
var = sc.polygamma(1, c)
skewness = sc.polygamma(2, c) / np.power(var, 1.5)
excess_kurtosis = sc.polygamma(3, c) / (var*var)
return mean, var, skewness, excess_kurtosis
loggamma = loggamma_gen(name='loggamma')
class loglaplace_gen(rv_continuous):
"""A log-Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loglaplace` is::
loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
= c / 2 * x**(-c-1), for x >= 1
for ``c > 0``.
`loglaplace` takes ``c`` as a shape parameter.
%(after_notes)s
References
----------
T.J. Kozubowski and K. Podgorski, "A log-Laplace growth rate model",
The Mathematical Scientist, vol. 28, pp. 49-60, 2003.
%(example)s
"""
def _pdf(self, x, c):
cd2 = c/2.0
c = np.where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return np.where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return np.where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _munp(self, n, c):
return c**2 / (c**2 - n**2)
def _entropy(self, c):
return np.log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace')
def _lognorm_logpdf(x, s):
return _lazywhere(x != 0, (x, s),
lambda x, s: -np.log(x)**2 / (2*s**2) - np.log(s*x*np.sqrt(2*np.pi)),
-np.inf)
class lognorm_gen(rv_continuous):
"""A lognormal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lognorm` is::
lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
for ``x > 0``, ``s > 0``.
`lognorm` takes ``s`` as a shape parameter.
%(after_notes)s
A common parametrization for a lognormal random variable ``Y`` is in
terms of the mean, ``mu``, and standard deviation, ``sigma``, of the
unique normally distributed random variable ``X`` such that exp(X) = Y.
This parametrization corresponds to setting ``s = sigma`` and ``scale =
exp(mu)``.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, s):
return np.exp(s * self._random_state.standard_normal(self._size))
def _pdf(self, x, s):
return np.exp(self._logpdf(x, s))
def _logpdf(self, x, s):
return _lognorm_logpdf(x, s)
def _cdf(self, x, s):
return _norm_cdf(np.log(x) / s)
def _logcdf(self, x, s):
return _norm_logcdf(np.log(x) / s)
def _ppf(self, q, s):
return np.exp(s * _norm_ppf(q))
def _sf(self, x, s):
return _norm_sf(np.log(x) / s)
def _logsf(self, x, s):
return _norm_logsf(np.log(x) / s)
def _stats(self, s):
p = np.exp(s*s)
mu = np.sqrt(p)
mu2 = p*(p-1)
g1 = np.sqrt((p-1))*(2+p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5 * (1 + np.log(2*np.pi) + 2 * np.log(s))
lognorm = lognorm_gen(a=0.0, name='lognorm')
class gilbrat_gen(rv_continuous):
"""A Gilbrat continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gilbrat` is::
gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
`gilbrat` is a special case of `lognorm` with ``s = 1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return np.exp(self._random_state.standard_normal(self._size))
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return _lognorm_logpdf(x, 1.0)
def _cdf(self, x):
return _norm_cdf(np.log(x))
def _ppf(self, q):
return np.exp(_norm_ppf(q))
def _stats(self):
p = np.e
mu = np.sqrt(p)
mu2 = p * (p - 1)
g1 = np.sqrt((p - 1)) * (2 + p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self):
return 0.5 * np.log(2 * np.pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat')
class maxwell_gen(rv_continuous):
"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df = 3``, ``loc = 0.0``,
and given ``scale = a``, where ``a`` is the parameter used in the
Mathworld description [1]_.
The probability density function for `maxwell` is::
maxwell.pdf(x) = sqrt(2/pi)x**2 * exp(-x**2/2)
for ``x > 0``.
%(after_notes)s
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self):
return chi.rvs(3.0, size=self._size, random_state=self._random_state)
def _pdf(self, x):
return np.sqrt(2.0/np.pi)*x*x*np.exp(-x*x/2.0)
def _cdf(self, x):
return sc.gammainc(1.5, x*x/2.0)
def _ppf(self, q):
return np.sqrt(2*sc.gammaincinv(1.5, q))
def _stats(self):
val = 3*np.pi-8
return (2*np.sqrt(2.0/np.pi),
3-8/np.pi,
np.sqrt(2)*(32-10*np.pi)/val**1.5,
(-12*np.pi*np.pi + 160*np.pi - 384) / val**2.0)
def _entropy(self):
return _EULER + 0.5*np.log(2*np.pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell')
class mielke_gen(rv_continuous):
"""A Mielke's Beta-Kappa continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `mielke` is::
mielke.pdf(x, k, s) = k * x**(k-1) / (1+x**s)**(1+k/s)
for ``x > 0``.
`mielke` takes ``k`` and ``s`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q, s*1.0/k)
return pow(qsk/(1.0-qsk), 1.0/s)
mielke = mielke_gen(a=0.0, name='mielke')
class kappa4_gen(rv_continuous):
"""Kappa 4 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for kappa4 is::
kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
(1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1)
if ``h`` and ``k`` are not equal to 0.
If ``h`` or ``k`` are zero then the pdf can be simplified:
h = 0 and k != 0::
kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
exp(-(1.0 - k*x)**(1.0/k))
h != 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*(1.0 - h*exp(-x))**(1.0/h - 1.0)
h = 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*exp(-exp(-x))
kappa4 takes ``h`` and ``k`` as shape parameters.
The kappa4 distribution returns other distributions when certain
``h`` and ``k`` values are used.
+------+-------------+----------------+------------------+
| h | k=0.0 | k=1.0 | -inf<=k<=inf |
+======+=============+================+==================+
| -1.0 | Logistic | | Generalized |
| | | | Logistic(1) |
| | | | |
| | logistic(x) | | |
+------+-------------+----------------+------------------+
| 0.0 | Gumbel | Reverse | Generalized |
| | | Exponential(2) | Extreme Value |
| | | | |
| | gumbel_r(x) | | genextreme(x, k) |
+------+-------------+----------------+------------------+
| 1.0 | Exponential | Uniform | Generalized |
| | | | Pareto |
| | | | |
| | expon(x) | uniform(x) | genpareto(x, -k) |
+------+-------------+----------------+------------------+
(1) There are at least five generalized logistic distributions.
Four are described here:
https://en.wikipedia.org/wiki/Generalized_logistic_distribution
The "fifth" one is the one kappa4 should match which currently
isn't implemented in scipy:
https://en.wikipedia.org/wiki/Talk:Generalized_logistic_distribution
http://www.mathwave.com/help/easyfit/html/analyses/distributions/gen_logistic.html
(2) This distribution is currently not in scipy.
References
----------
J.C. Finney, "Optimization of a Skewed Logistic Distribution With Respect
to the Kolmogorov-Smirnov Test", A Dissertation Submitted to the Graduate
Faculty of the Louisiana State University and Agricultural and Mechanical
College, (August, 2004),
http://etd.lsu.edu/docs/available/etd-05182004-144851/unrestricted/Finney_dis.pdf
J.R.M. Hosking, "The four-parameter kappa distribution". IBM J. Res.
Develop. 38 (3), 25 1-258 (1994).
B. Kumphon, A. Kaew-Man, P. Seenoi, "A Rainfall Distribution for the Lampao
Site in the Chi River Basin, Thailand", Journal of Water Resource and
Protection, vol. 4, 866-869, (2012).
http://file.scirp.org/pdf/JWARP20121000009_14676002.pdf
C. Winchester, "On Estimation of the Four-Parameter Kappa Distribution", A
Thesis Submitted to Dalhousie University, Halifax, Nova Scotia, (March
2000).
http://www.nlc-bnc.ca/obj/s4/f2/dsk2/ftp01/MQ57336.pdf
%(after_notes)s
%(example)s
"""
def _argcheck(self, h, k):
condlist = [np.logical_and(h > 0, k > 0),
np.logical_and(h > 0, k == 0),
np.logical_and(h > 0, k < 0),
np.logical_and(h <= 0, k > 0),
np.logical_and(h <= 0, k == 0),
np.logical_and(h <= 0, k < 0)]
def f0(h, k):
return (1.0 - h**(-k))/k
def f1(h, k):
return np.log(h)
def f3(h, k):
a = np.empty(np.shape(h))
a[:] = -np.inf
return a
def f5(h, k):
return 1.0/k
self.a = _lazyselect(condlist,
[f0, f1, f0, f3, f3, f5],
[h, k],
default=np.nan)
def f0(h, k):
return 1.0/k
def f1(h, k):
a = np.empty(np.shape(h))
a[:] = np.inf
return a
self.b = _lazyselect(condlist,
[f0, f1, f1, f0, f1, f1],
[h, k],
default=np.nan)
return h == h
def _pdf(self, x, h, k):
return np.exp(self._logpdf(x, h, k))
def _logpdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*(
1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1.0)
logpdf = ...
'''
return (sc.xlog1py(1.0/k - 1.0, -k*x) +
sc.xlog1py(1.0/h - 1.0, -h*(1.0 - k*x)**(1.0/k)))
def f1(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*np.exp(-(
1.0 - k*x)**(1.0/k))
logpdf = ...
'''
return sc.xlog1py(1.0/k - 1.0, -k*x) - (1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''pdf = np.exp(-x)*(1.0 - h*np.exp(-x))**(1.0/h - 1.0)
logpdf = ...
'''
return -x + sc.xlog1py(1.0/h - 1.0, -h*np.exp(-x))
def f3(x, h, k):
'''pdf = np.exp(-x-np.exp(-x))
logpdf = ...
'''
return -x - np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _cdf(self, x, h, k):
return np.exp(self._logcdf(x, h, k))
def _logcdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''cdf = (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*(1.0 - k*x)**(1.0/k))
def f1(x, h, k):
'''cdf = np.exp(-(1.0 - k*x)**(1.0/k))
logcdf = ...
'''
return -(1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''cdf = (1.0 - h*np.exp(-x))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*np.exp(-x))
def f3(x, h, k):
'''cdf = np.exp(-np.exp(-x))
logcdf = ...
'''
return -np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _ppf(self, q, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(q, h, k):
return 1.0/k*(1.0 - ((1.0 - (q**h))/h)**k)
def f1(q, h, k):
return 1.0/k*(1.0 - (-np.log(q))**k)
def f2(q, h, k):
'''ppf = -np.log((1.0 - (q**h))/h)
'''
return -sc.log1p(-(q**h)) + np.log(h)
def f3(q, h, k):
return -np.log(-np.log(q))
return _lazyselect(condlist,
[f0, f1, f2, f3],
[q, h, k],
default=np.nan)
def _stats(self, h, k):
if h >= 0 and k >= 0:
maxr = 5
elif h < 0 and k >= 0:
maxr = int(-1.0/h*k)
elif k < 0:
maxr = int(-1.0/k)
else:
maxr = 5
outputs = [None if r < maxr else np.nan for r in range(1, 5)]
return outputs[:]
kappa4 = kappa4_gen(name='kappa4')
class kappa3_gen(rv_continuous):
"""Kappa 3 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for `kappa` is::
kappa3.pdf(x, a) =
a*[a + x**a]**(-(a + 1)/a), for ``x > 0``
0.0, for ``x <= 0``
`kappa3` takes ``a`` as a shape parameter and ``a > 0``.
References
----------
P.W. Mielke and E.S. Johnson, "Three-Parameter Kappa Distribution Maximum
Likelihood and Likelihood Ratio Tests", Methods in Weather Research,
701-707, (September, 1973),
http://docs.lib.noaa.gov/rescue/mwr/101/mwr-101-09-0701.pdf
B. Kumphon, "Maximum Entropy and Maximum Likelihood Estimation for the
Three-Parameter Kappa Distribution", Open Journal of Statistics, vol 2,
415-419 (2012)
http://file.scirp.org/pdf/OJS20120400011_95789012.pdf
%(after_notes)s
%(example)s
"""
def _argcheck(self, a):
return a > 0
def _pdf(self, x, a):
return a*(a + x**a)**(-1.0/a-1)
def _cdf(self, x, a):
return x*(a + x**a)**(-1.0/a)
def _ppf(self, q, a):
return (a/(q**-a - 1.0))**(1.0/a)
def _stats(self, a):
outputs = [None if i < a else np.nan for i in range(1, 5)]
return outputs[:]
kappa3 = kappa3_gen(a=0.0, name='kappa3')
class nakagami_gen(rv_continuous):
"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is::
nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
x**(2*nu-1) * exp(-nu*x**2)
for ``x > 0``, ``nu > 0``.
`nakagami` takes ``nu`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, nu):
return 2*nu**nu/sc.gamma(nu)*(x**(2*nu-1.0))*np.exp(-nu*x*x)
def _cdf(self, x, nu):
return sc.gammainc(nu, nu*x*x)
def _ppf(self, q, nu):
return np.sqrt(1.0/nu*sc.gammaincinv(nu, q))
def _stats(self, nu):
mu = sc.gamma(nu+0.5)/sc.gamma(nu)/np.sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5)
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
nakagami = nakagami_gen(a=0.0, name="nakagami")
class ncx2_gen(rv_continuous):
"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is::
ncx2.pdf(x, df, nc) = exp(-(nc+x)/2) * 1/2 * (x/nc)**((df-2)/4)
* I[(df-2)/2](sqrt(nc*x))
for ``x > 0``.
`ncx2` takes ``df`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, df, nc):
return self._random_state.noncentral_chisquare(df, nc, self._size)
def _logpdf(self, x, df, nc):
return _ncx2_log_pdf(x, df, nc)
def _pdf(self, x, df, nc):
return _ncx2_pdf(x, df, nc)
def _cdf(self, x, df, nc):
return _ncx2_cdf(x, df, nc)
def _ppf(self, q, df, nc):
return sc.chndtrix(q, df, nc)
def _stats(self, df, nc):
val = df + 2.0*nc
return (df + nc,
2*val,
np.sqrt(8)*(val+nc)/val**1.5,
12.0*(val+2*nc)/val**2.0)
ncx2 = ncx2_gen(a=0.0, name='ncx2')
class ncf_gen(rv_continuous):
"""A non-central F distribution continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncf` is::
ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2))) *
df1**(df1/2) * df2**(df2/2) * x**(df1/2-1) *
(df2+df1*x)**(-(df1+df2)/2) *
gamma(df1/2)*gamma(1+df2/2) *
L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2))) /
(B(v1/2, v2/2) * gamma((v1+v2)/2))
for ``df1, df2, nc > 0``.
`ncf` takes ``df1``, ``df2`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd, nc):
return self._random_state.noncentral_f(dfn, dfd, nc, self._size)
def _pdf_skip(self, x, dfn, dfd, nc):
n1, n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + sc.gammaln(n1/2.)+sc.gammaln(1+n2/2.)
term -= sc.gammaln((n1+n2)/2.0)
Px = np.exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= sc.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)), n2/2, n1/2-1)
Px /= sc.beta(n1/2, n2/2)
# This function does not have a return. Drop it for now, the generic
# function seems to work OK.
def _cdf(self, x, dfn, dfd, nc):
return sc.ncfdtr(dfn, dfd, nc, x)
def _ppf(self, q, dfn, dfd, nc):
return sc.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn * 1.0/dfd)**n
term = sc.gammaln(n+0.5*dfn) + sc.gammaln(0.5*dfd-n) - sc.gammaln(dfd*0.5)
val *= np.exp(-nc / 2.0+term)
val *= sc.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
mu = np.where(dfd <= 2, np.inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn))
mu2 = np.where(dfd <= 4, np.inf, 2*(dfd*1.0/dfn)**2.0 *
((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) /
((dfd-2.0)**2.0 * (dfd-4.0)))
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf')
class t_gen(rv_continuous):
"""A Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `t` is::
gamma((df+1)/2)
t.pdf(x, df) = ---------------------------------------------------
sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2)
for ``df > 0``.
`t` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
return self._random_state.standard_t(df, size=self._size)
def _pdf(self, x, df):
r = np.asarray(df*1.0)
Px = np.exp(sc.gammaln((r+1)/2)-sc.gammaln(r/2))
Px /= np.sqrt(r*np.pi)*(1+(x**2)/r)**((r+1)/2)
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = sc.gammaln((r+1)/2)-sc.gammaln(r/2)
lPx -= 0.5*np.log(r*np.pi) + (r+1)/2*np.log(1+(x**2)/r)
return lPx
def _cdf(self, x, df):
return sc.stdtr(df, x)
def _sf(self, x, df):
return sc.stdtr(df, -x)
def _ppf(self, q, df):
return sc.stdtrit(df, q)
def _isf(self, q, df):
return -sc.stdtrit(df, q)
def _stats(self, df):
mu2 = _lazywhere(df > 2, (df,),
lambda df: df / (df-2.0),
np.inf)
g1 = np.where(df > 3, 0.0, np.nan)
g2 = _lazywhere(df > 4, (df,),
lambda df: 6.0 / (df-4.0),
np.nan)
return 0, mu2, g1, g2
t = t_gen(name='t')
class nct_gen(rv_continuous):
"""A non-central Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nct` is::
df**(df/2) * gamma(df+1)
nct.pdf(x, df, nc) = ----------------------------------------------------
2**df*exp(nc**2/2) * (df+x**2)**(df/2) * gamma(df/2)
for ``df > 0``.
`nct` takes ``df`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc == nc)
def _rvs(self, df, nc):
sz, rndm = self._size, self._random_state
n = norm.rvs(loc=nc, size=sz, random_state=rndm)
c2 = chi2.rvs(df, size=sz, random_state=rndm)
return n * np.sqrt(df) / np.sqrt(c2)
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = n/2.*np.log(n) + sc.gammaln(n+1)
trm1 -= n*np.log(2)+nc*nc/2.+(n/2.)*np.log(fac1)+sc.gammaln(n/2.)
Px = np.exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = np.sqrt(2)*nc*x*sc.hyp1f1(n/2+1, 1.5, valF)
trm1 /= np.asarray(fac1*sc.gamma((n+1)/2))
trm2 = sc.hyp1f1((n+1)/2, 0.5, valF)
trm2 /= np.asarray(np.sqrt(fac1)*sc.gamma(n/2+1))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return sc.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return sc.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
#
# See D. Hogben, R.S. Pinkham, and M.B. Wilk,
# 'The moments of the non-central t-distribution'
# Biometrika 48, p. 465 (2961).
# e.g. http://www.jstor.org/stable/2332772 (gated)
#
mu, mu2, g1, g2 = None, None, None, None
gfac = sc.gamma(df/2.-0.5) / sc.gamma(df/2.)
c11 = np.sqrt(df/2.) * gfac
c20 = df / (df-2.)
c22 = c20 - c11*c11
mu = np.where(df > 1, nc*c11, np.inf)
mu2 = np.where(df > 2, c22*nc*nc + c20, np.inf)
if 's' in moments:
c33t = df * (7.-2.*df) / (df-2.) / (df-3.) + 2.*c11*c11
c31t = 3.*df / (df-2.) / (df-3.)
mu3 = (c33t*nc*nc + c31t) * c11*nc
g1 = np.where(df > 3, mu3 / np.power(mu2, 1.5), np.nan)
#kurtosis
if 'k' in moments:
c44 = df*df / (df-2.) / (df-4.)
c44 -= c11*c11 * 2.*df*(5.-df) / (df-2.) / (df-3.)
c44 -= 3.*c11**4
c42 = df / (df-4.) - c11*c11 * (df-1.) / (df-3.)
c42 *= 6.*df / (df-2.)
c40 = 3.*df*df / (df-2.) / (df-4.)
mu4 = c44 * nc**4 + c42*nc**2 + c40
g2 = np.where(df > 4, mu4/mu2**2 - 3., np.nan)
return mu, mu2, g1, g2
nct = nct_gen(name="nct")
class pareto_gen(rv_continuous):
"""A Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pareto` is::
pareto.pdf(x, b) = b / x**(b+1)
for ``x >= 1``, ``b > 0``.
`pareto` takes ``b`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, b):
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = np.extract(mask, b)
mu = valarray(np.shape(b), value=np.inf)
np.place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = np.extract(mask, b)
mu2 = valarray(np.shape(b), value=np.inf)
np.place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = np.extract(mask, b)
g1 = valarray(np.shape(b), value=np.nan)
vals = 2 * (bt + 1.0) * np.sqrt(bt - 2.0) / ((bt - 3.0) * np.sqrt(bt))
np.place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = np.extract(mask, b)
g2 = valarray(np.shape(b), value=np.nan)
vals = (6.0*np.polyval([1.0, 1.0, -6, -2], bt) /
np.polyval([1.0, -7.0, 12.0, 0.0], bt))
np.place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - np.log(c)
pareto = pareto_gen(a=1.0, name="pareto")
class lomax_gen(rv_continuous):
"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The Lomax distribution is a special case of the Pareto distribution, with
(loc=-1.0).
The probability density function for `lomax` is::
lomax.pdf(x, c) = c / (1+x)**(c+1)
for ``x >= 0``, ``c > 0``.
`lomax` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return np.log(c) - (c+1)*sc.log1p(x)
def _cdf(self, x, c):
return -sc.expm1(-c*sc.log1p(x))
def _sf(self, x, c):
return np.exp(-c*sc.log1p(x))
def _logsf(self, x, c):
return -c*sc.log1p(x)
def _ppf(self, q, c):
return sc.expm1(-sc.log1p(-q)/c)
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-np.log(c)
lomax = lomax_gen(a=0.0, name="lomax")
class pearson3_gen(rv_continuous):
"""A pearson type III continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pearson3` is::
pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) *
(beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta))
where::
beta = 2 / (skew * stddev)
alpha = (stddev * beta)**2
zeta = loc - alpha / beta
`pearson3` takes ``skew`` as a shape parameter.
%(after_notes)s
%(example)s
References
----------
R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and
Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water
Resources Research, Vol.27, 3149-3158 (1991).
L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist.,
Vol.1, 191-198 (1930).
"Using Modern Computing Tools to Fit the Pearson Type III Distribution to
Aviation Loads Data", Office of Aviation Research (2003).
"""
def _preprocess(self, x, skew):
# The real 'loc' and 'scale' are handled in the calling pdf(...). The
# local variables 'loc' and 'scale' within pearson3._pdf are set to
# the defaults just to keep them as part of the equations for
# documentation.
loc = 0.0
scale = 1.0
# If skew is small, return _norm_pdf. The divide between pearson3
# and norm was found by brute force and is approximately a skew of
# 0.000016. No one, I hope, would actually use a skew value even
# close to this small.
norm2pearson_transition = 0.000016
ans, x, skew = np.broadcast_arrays([1.0], x, skew)
ans = ans.copy()
# mask is True where skew is small enough to use the normal approx.
mask = np.absolute(skew) < norm2pearson_transition
invmask = ~mask
beta = 2.0 / (skew[invmask] * scale)
alpha = (scale * beta)**2
zeta = loc - alpha / beta
transx = beta * (x[invmask] - zeta)
return ans, x, transx, mask, invmask, beta, alpha, zeta
def _argcheck(self, skew):
# The _argcheck function in rv_continuous only allows positive
# arguments. The skew argument for pearson3 can be zero (which I want
# to handle inside pearson3._pdf) or negative. So just return True
# for all skew args.
return np.ones(np.shape(skew), dtype=bool)
def _stats(self, skew):
_, _, _, _, _, beta, alpha, zeta = (
self._preprocess([1], skew))
m = zeta + alpha / beta
v = alpha / (beta**2)
s = 2.0 / (alpha**0.5) * np.sign(beta)
k = 6.0 / alpha
return m, v, s, k
def _pdf(self, x, skew):
# Do the calculation in _logpdf since helps to limit
# overflow/underflow problems
ans = np.exp(self._logpdf(x, skew))
if ans.ndim == 0:
if np.isnan(ans):
return 0.0
return ans
ans[np.isnan(ans)] = 0.0
return ans
def _logpdf(self, x, skew):
# PEARSON3 logpdf GAMMA logpdf
# np.log(abs(beta))
# + (alpha - 1)*np.log(beta*(x - zeta)) + (a - 1)*np.log(x)
# - beta*(x - zeta) - x
# - sc.gammalnalpha) - sc.gammalna)
ans, x, transx, mask, invmask, beta, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = np.log(_norm_pdf(x[mask]))
ans[invmask] = np.log(abs(beta)) + gamma._logpdf(transx, alpha)
return ans
def _cdf(self, x, skew):
ans, x, transx, mask, invmask, _, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = _norm_cdf(x[mask])
ans[invmask] = gamma._cdf(transx, alpha)
return ans
def _rvs(self, skew):
skew = broadcast_to(skew, self._size)
ans, _, _, mask, invmask, beta, alpha, zeta = (
self._preprocess([0], skew))
nsmall = mask.sum()
nbig = mask.size - nsmall
ans[mask] = self._random_state.standard_normal(nsmall)
ans[invmask] = (self._random_state.standard_gamma(alpha, nbig)/beta +
zeta)
if self._size == ():
ans = ans[0]
return ans
def _ppf(self, q, skew):
ans, q, _, mask, invmask, beta, alpha, zeta = (
self._preprocess(q, skew))
ans[mask] = _norm_ppf(q[mask])
ans[invmask] = sc.gammaincinv(alpha, q[invmask])/beta + zeta
return ans
pearson3 = pearson3_gen(name="pearson3")
class powerlaw_gen(rv_continuous):
"""A power-function continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlaw` is::
powerlaw.pdf(x, a) = a * x**(a-1)
for ``0 <= x <= 1``, ``a > 0``.
`powerlaw` takes ``a`` as a shape parameter.
%(after_notes)s
`powerlaw` is a special case of `beta` with ``b == 1``.
%(example)s
"""
def _pdf(self, x, a):
return a*x**(a-1.0)
def _logpdf(self, x, a):
return np.log(a) + sc.xlogy(a - 1, x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*np.log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return (a / (a + 1.0),
a / (a + 2.0) / (a + 1.0) ** 2,
-2.0 * ((a - 1.0) / (a + 3.0)) * np.sqrt((a + 2.0) / a),
6 * np.polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4)))
def _entropy(self, a):
return 1 - 1.0/a - np.log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw")
class powerlognorm_gen(rv_continuous):
"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is::
powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) *
(Phi(-log(x)/s))**(c-1),
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``s, c > 0``.
`powerlognorm` takes ``c`` and ``s`` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, s):
return (c/(x*s) * _norm_pdf(np.log(x)/s) *
pow(_norm_cdf(-np.log(x)/s), c*1.0-1.0))
def _cdf(self, x, c, s):
return 1.0 - pow(_norm_cdf(-np.log(x)/s), c*1.0)
def _ppf(self, q, c, s):
return np.exp(-s * _norm_ppf(pow(1.0 - q, 1.0 / c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm")
class powernorm_gen(rv_continuous):
"""A power normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powernorm` is::
powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``c > 0``.
`powernorm` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c*_norm_pdf(x) * (_norm_cdf(-x)**(c-1.0))
def _logpdf(self, x, c):
return np.log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -_norm_ppf(pow(1.0 - q, 1.0 / c))
powernorm = powernorm_gen(name='powernorm')
class rdist_gen(rv_continuous):
"""An R-distributed continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rdist` is::
rdist.pdf(x, c) = (1-x**2)**(c/2-1) / B(1/2, c/2)
for ``-1 <= x <= 1``, ``c > 0``.
`rdist` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return np.power((1.0 - x**2), c / 2.0 - 1) / sc.beta(0.5, c / 2.0)
def _cdf(self, x, c):
term1 = x / sc.beta(0.5, c / 2.0)
res = 0.5 + term1 * sc.hyp2f1(0.5, 1 - c / 2.0, 1.5, x**2)
# There's an issue with hyp2f1, it returns nans near x = +-1, c > 100.
# Use the generic implementation in that case. See gh-1285 for
# background.
if np.any(np.isnan(res)):
return rv_continuous._cdf(self, x, c)
return res
def _munp(self, n, c):
numerator = (1 - (n % 2)) * sc.beta((n + 1.0) / 2, c / 2.0)
return numerator / sc.beta(1. / 2, c / 2.)
rdist = rdist_gen(a=-1.0, b=1.0, name="rdist")
class rayleigh_gen(rv_continuous):
"""A Rayleigh continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rayleigh` is::
rayleigh.pdf(r) = r * exp(-r**2/2)
for ``x >= 0``.
`rayleigh` is a special case of `chi` with ``df == 2``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return chi.rvs(2, size=self._size, random_state=self._random_state)
def _pdf(self, r):
return np.exp(self._logpdf(r))
def _logpdf(self, r):
return np.log(r) - 0.5 * r * r
def _cdf(self, r):
return -sc.expm1(-0.5 * r**2)
def _ppf(self, q):
return np.sqrt(-2 * sc.log1p(-q))
def _sf(self, r):
return np.exp(self._logsf(r))
def _logsf(self, r):
return -0.5 * r * r
def _isf(self, q):
return np.sqrt(-2 * np.log(q))
def _stats(self):
val = 4 - np.pi
return (np.sqrt(np.pi/2),
val/2,
2*(np.pi-3)*np.sqrt(np.pi)/val**1.5,
6*np.pi/val-16/val**2)
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*np.log(2)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
class reciprocal_gen(rv_continuous):
"""A reciprocal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `reciprocal` is::
reciprocal.pdf(x, a, b) = 1 / (x*log(b/a))
for ``a <= x <= b``, ``a, b > 0``.
`reciprocal` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self.d = np.log(b*1.0 / a)
return (a > 0) & (b > 0) & (b > a)
def _pdf(self, x, a, b):
return 1.0 / (x * self.d)
def _logpdf(self, x, a, b):
return -np.log(x) - np.log(self.d)
def _cdf(self, x, a, b):
return (np.log(x)-np.log(a)) / self.d
def _ppf(self, q, a, b):
return a*pow(b*1.0/a, q)
def _munp(self, n, a, b):
return 1.0/self.d / n * (pow(b*1.0, n) - pow(a*1.0, n))
def _entropy(self, a, b):
return 0.5*np.log(a*b)+np.log(np.log(b/a))
reciprocal = reciprocal_gen(name="reciprocal")
class rice_gen(rv_continuous):
"""A Rice continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rice` is::
rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
for ``x > 0``, ``b > 0``.
`rice` takes ``b`` as a shape parameter.
%(after_notes)s
The Rice distribution describes the length, ``r``, of a 2-D vector
with components ``(U+u, V+v)``, where ``U, V`` are constant, ``u, v``
are independent Gaussian random variables with standard deviation
``s``. Let ``R = (U**2 + V**2)**0.5``. Then the pdf of ``r`` is
``rice.pdf(x, R/s, scale=s)``.
%(example)s
"""
def _argcheck(self, b):
return b >= 0
def _rvs(self, b):
# http://en.wikipedia.org/wiki/Rice_distribution
t = b/np.sqrt(2) + self._random_state.standard_normal(size=(2,) +
self._size)
return np.sqrt((t*t).sum(axis=0))
def _cdf(self, x, b):
return sc.chndtr(np.square(x), 2, np.square(b))
def _ppf(self, q, b):
return np.sqrt(sc.chndtrix(q, 2, np.square(b)))
def _pdf(self, x, b):
# We use (x**2 + b**2)/2 = ((x-b)**2)/2 + xb.
# The factor of np.exp(-xb) is then included in the i0e function
# in place of the modified Bessel function, i0, improving
# numerical stability for large values of xb.
return x * np.exp(-(x-b)*(x-b)/2.0) * sc.i0e(x*b)
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1 + nd2
b2 = b*b/2.0
return (2.0**(nd2) * np.exp(-b2) * sc.gamma(n1) *
sc.hyp1f1(n1, 1, b2))
rice = rice_gen(a=0.0, name="rice")
# FIXME: PPF does not work.
class recipinvgauss_gen(rv_continuous):
"""A reciprocal inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `recipinvgauss` is::
recipinvgauss.pdf(x, mu) = 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
for ``x >= 0``.
`recipinvgauss` takes ``mu`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, mu):
return 1.0/np.sqrt(2*np.pi*x)*np.exp(-(1-mu*x)**2.0 / (2*x*mu**2.0))
def _logpdf(self, x, mu):
return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*np.log(2*np.pi*x)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/np.sqrt(x)
return 1.0-_norm_cdf(isqx*trm1)-np.exp(2.0/mu)*_norm_cdf(-isqx*trm2)
def _rvs(self, mu):
return 1.0/self._random_state.wald(mu, 1.0, size=self._size)
recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss')
class semicircular_gen(rv_continuous):
"""A semicircular continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `semicircular` is::
semicircular.pdf(x) = 2/pi * sqrt(1-x**2)
for ``-1 <= x <= 1``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 2.0/np.pi*np.sqrt(1-x*x)
def _cdf(self, x):
return 0.5+1.0/np.pi*(x*np.sqrt(1-x*x) + np.arcsin(x))
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
class skew_norm_gen(rv_continuous):
"""A skew-normal random variable.
%(before_notes)s
Notes
-----
The pdf is::
skewnorm.pdf(x, a) = 2*norm.pdf(x)*norm.cdf(ax)
`skewnorm` takes ``a`` as a skewness parameter
When a=0 the distribution is identical to a normal distribution.
rvs implements the method of [1]_.
%(after_notes)s
%(example)s
References
----------
.. [1] A. Azzalini and A. Capitanio (1999). Statistical applications of the
multivariate skew-normal distribution. J. Roy. Statist. Soc., B 61, 579-602.
http://azzalini.stat.unipd.it/SN/faq-r.html
"""
def _argcheck(self, a):
return np.isfinite(a)
def _pdf(self, x, a):
return 2.*_norm_pdf(x)*_norm_cdf(a*x)
def _rvs(self, a):
u0 = self._random_state.normal(size=self._size)
v = self._random_state.normal(size=self._size)
d = a/np.sqrt(1 + a**2)
u1 = d*u0 + v*np.sqrt(1 - d**2)
return np.where(u0 >= 0, u1, -u1)
def _stats(self, a, moments='mvsk'):
output = [None, None, None, None]
const = np.sqrt(2/np.pi) * a/np.sqrt(1 + a**2)
if 'm' in moments:
output[0] = const
if 'v' in moments:
output[1] = 1 - const**2
if 's' in moments:
output[2] = ((4 - np.pi)/2) * (const/np.sqrt(1 - const**2))**3
if 'k' in moments:
output[3] = (2*(np.pi - 3)) * (const**4/(1 - const**2)**2)
return output
skewnorm = skew_norm_gen(name='skewnorm')
class trapz_gen(rv_continuous):
"""A trapezoidal continuous random variable.
%(before_notes)s
Notes
-----
The trapezoidal distribution can be represented with an up-sloping line
from ``loc`` to ``(loc + c*scale)``, then constant to ``(loc + d*scale)``
and then downsloping from ``(loc + d*scale)`` to ``(loc+scale)``.
`trapz` takes ``c`` and ``d`` as shape parameters.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _argcheck(self, c, d):
return (c >= 0) & (c <= 1) & (d >= 0) & (d <= 1) & (d >= c)
def _pdf(self, x, c, d):
u = 2 / (d - c + 1)
condlist = [x < c, x <= d, x > d]
choicelist = [u * x / c, u, u * (1 - x) / (1 - d)]
return np.select(condlist, choicelist)
def _cdf(self, x, c, d):
condlist = [x < c, x <= d, x > d]
choicelist = [x**2 / c / (d - c + 1),
(c + 2 * (x - c)) / (d - c + 1),
1 - ((1 - x)**2 / (d - c + 1) / (1 - d))]
return np.select(condlist, choicelist)
def _ppf(self, q, c, d):
qc, qd = self._cdf(c, c, d), self._cdf(d, c, d)
condlist = [q < qc, q <= qd, q > qd]
choicelist = [np.sqrt(q * c * (1 + d - c)),
0.5 * q * (1 + d - c) + 0.5 * c,
1 - np.sqrt((1 - q) * (d - c + 1) * (1 - d))]
return np.select(condlist, choicelist)
trapz = trapz_gen(a=0.0, b=1.0, name="trapz")
class triang_gen(rv_continuous):
"""A triangular continuous random variable.
%(before_notes)s
Notes
-----
The triangular distribution can be represented with an up-sloping line from
``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
to ``(loc+scale)``.
`triang` takes ``c`` as a shape parameter.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _rvs(self, c):
return self._random_state.triangular(0, c, 1, self._size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
return np.where(x < c, 2*x/c, 2*(1-x)/(1-c))
def _cdf(self, x, c):
return np.where(x < c, x*x/c, (x*x-2*x+c)/(c-1))
def _ppf(self, q, c):
return np.where(q < c, np.sqrt(c*q), 1-np.sqrt((1-c)*(1-q)))
def _stats(self, c):
return ((c+1.0)/3.0,
(1.0-c+c*c)/18,
np.sqrt(2)*(2*c-1)*(c+1)*(c-2) / (5*np.power((1.0-c+c*c), 1.5)),
-3.0/5.0)
def _entropy(self, c):
return 0.5-np.log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang")
class truncexpon_gen(rv_continuous):
"""A truncated exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `truncexpon` is::
truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
for ``0 < x < b``.
`truncexpon` takes ``b`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, b):
self.b = b
return b > 0
def _pdf(self, x, b):
return np.exp(-x)/(-sc.expm1(-b))
def _logpdf(self, x, b):
return -x - np.log(-sc.expm1(-b))
def _cdf(self, x, b):
return sc.expm1(-x)/sc.expm1(-b)
def _ppf(self, q, b):
return -sc.log1p(q*sc.expm1(-b))
def _munp(self, n, b):
# wrong answer with formula, same as in continuous.pdf
# return sc.gamman+1)-sc.gammainc1+n, b)
if n == 1:
return (1-(b+1)*np.exp(-b))/(-sc.expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*np.exp(-b))/(-sc.expm1(-b))
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = np.exp(b)
return np.log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon')
class truncnorm_gen(rv_continuous):
"""A truncated normal continuous random variable.
%(before_notes)s
Notes
-----
The standard form of this distribution is a standard normal truncated to
the range [a, b] --- notice that a and b are defined over the domain of the
standard normal. To convert clip values for a specific mean and standard
deviation, use::
a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
`truncnorm` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self._nb = _norm_cdf(b)
self._na = _norm_cdf(a)
self._sb = _norm_sf(b)
self._sa = _norm_sf(a)
self._delta = np.where(self.a > 0,
-(self._sb - self._sa),
self._nb - self._na)
self._logdelta = np.log(self._delta)
return a != b
def _pdf(self, x, a, b):
return _norm_pdf(x) / self._delta
def _logpdf(self, x, a, b):
return _norm_logpdf(x) - self._logdelta
def _cdf(self, x, a, b):
return (_norm_cdf(x) - self._na) / self._delta
def _ppf(self, q, a, b):
# XXX Use _lazywhere...
ppf = np.where(self.a > 0,
_norm_isf(q*self._sb + self._sa*(1.0-q)),
_norm_ppf(q*self._nb + self._na*(1.0-q)))
return ppf
def _stats(self, a, b):
nA, nB = self._na, self._nb
d = nB - nA
pA, pB = _norm_pdf(a), _norm_pdf(b)
mu = (pA - pB) / d # correction sign
mu2 = 1 + (a*pA - b*pB) / d - mu*mu
return mu, mu2, None, None
truncnorm = truncnorm_gen(name='truncnorm')
# FIXME: RVS does not work.
class tukeylambda_gen(rv_continuous):
"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (lam=-1)
- logistic (lam=0.0)
- approx Normal (lam=0.14)
- u-shape (lam = 0.5)
- uniform from -1 to 1 (lam = 1)
`tukeylambda` takes ``lam`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lam):
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = np.asarray(sc.tklmbda(x, lam))
Px = Fx**(lam-1.0) + (np.asarray(1-Fx))**(lam-1.0)
Px = 1.0/np.asarray(Px)
return np.where((lam <= 0) | (abs(x) < 1.0/np.asarray(lam)), Px, 0.0)
def _cdf(self, x, lam):
return sc.tklmbda(x, lam)
def _ppf(self, q, lam):
return sc.boxcox(q, lam) - sc.boxcox1p(-q, lam)
def _stats(self, lam):
return 0, _tlvar(lam), 0, _tlkurt(lam)
def _entropy(self, lam):
def integ(p):
return np.log(pow(p, lam-1)+pow(1-p, lam-1))
return integrate.quad(integ, 0, 1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda')
class uniform_gen(rv_continuous):
"""A uniform continuous random variable.
This distribution is constant between `loc` and ``loc + scale``.
%(before_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.uniform(0.0, 1.0, self._size)
def _pdf(self, x):
return 1.0*(x == x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
class vonmises_gen(rv_continuous):
"""A Von Mises continuous random variable.
%(before_notes)s
Notes
-----
If `x` is not in range or `loc` is not in range it assumes they are angles
and converts them to [-pi, pi] equivalents.
The probability density function for `vonmises` is::
vonmises.pdf(x, kappa) = exp(kappa * cos(x)) / (2*pi*I[0](kappa))
for ``-pi <= x <= pi``, ``kappa > 0``.
`vonmises` takes ``kappa`` as a shape parameter.
%(after_notes)s
See Also
--------
vonmises_line : The same distribution, defined on a [-pi, pi] segment
of the real line.
%(example)s
"""
def _rvs(self, kappa):
return self._random_state.vonmises(0.0, kappa, size=self._size)
def _pdf(self, x, kappa):
return np.exp(kappa * np.cos(x)) / (2*np.pi*sc.i0(kappa))
def _cdf(self, x, kappa):
return _stats.von_mises_cdf(kappa, x)
def _stats_skip(self, kappa):
return 0, None, 0, None
def _entropy(self, kappa):
return (-kappa * sc.i1(kappa) / sc.i0(kappa) +
np.log(2 * np.pi * sc.i0(kappa)))
vonmises = vonmises_gen(name='vonmises')
vonmises_line = vonmises_gen(a=-np.pi, b=np.pi, name='vonmises_line')
class wald_gen(invgauss_gen):
"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wald` is::
wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
for ``x > 0``.
`wald` is a special case of `invgauss` with ``mu == 1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return self._random_state.wald(1.0, 1.0, size=self._size)
def _pdf(self, x):
return invgauss._pdf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald")
class wrapcauchy_gen(rv_continuous):
"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is::
wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
for ``0 <= x <= 2*pi``, ``0 < c < 1``.
`wrapcauchy` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
return (1.0-c*c)/(2*np.pi*(1+c*c-2*c*np.cos(x)))
def _cdf(self, x, c):
output = np.zeros(x.shape, dtype=x.dtype)
val = (1.0+c)/(1.0-c)
c1 = x < np.pi
c2 = 1-c1
xp = np.extract(c1, x)
xn = np.extract(c2, x)
if np.any(xn):
valn = np.extract(c2, np.ones_like(x)*val)
xn = 2*np.pi - xn
yn = np.tan(xn/2.0)
on = 1.0-1.0/np.pi*np.arctan(valn*yn)
np.place(output, c2, on)
if np.any(xp):
valp = np.extract(c1, np.ones_like(x)*val)
yp = np.tan(xp/2.0)
op = 1.0/np.pi*np.arctan(valp*yp)
np.place(output, c1, op)
return output
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*np.arctan(val*np.tan(np.pi*q))
rcmq = 2*np.pi-2*np.arctan(val*np.tan(np.pi*(1-q)))
return np.where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return np.log(2*np.pi*(1-c*c))
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*np.pi, name='wrapcauchy')
class gennorm_gen(rv_continuous):
"""A generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gennorm` is [1]_::
beta
gennorm.pdf(x, beta) = --------------- exp(-|x|**beta)
2 gamma(1/beta)
`gennorm` takes ``beta`` as a shape parameter.
For ``beta = 1``, it is identical to a Laplace distribution.
For ``beta = 2``, it is identical to a normal distribution
(with ``scale=1/sqrt(2)``).
See Also
--------
laplace : Laplace distribution
norm : normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(0.5*beta) - sc.gammaln(1.0/beta) - abs(x)**beta
def _cdf(self, x, beta):
c = 0.5 * np.sign(x)
# evaluating (.5 + c) first prevents numerical cancellation
return (0.5 + c) - c * sc.gammaincc(1.0/beta, abs(x)**beta)
def _ppf(self, x, beta):
c = np.sign(x - 0.5)
# evaluating (1. + c) first prevents numerical cancellation
return c * sc.gammainccinv(1.0/beta, (1.0 + c) - 2.0*c*x)**(1.0/beta)
def _sf(self, x, beta):
return self._cdf(-x, beta)
def _isf(self, x, beta):
return -self._ppf(x, beta)
def _stats(self, beta):
c1, c3, c5 = sc.gammaln([1.0/beta, 3.0/beta, 5.0/beta])
return 0., np.exp(c3 - c1), 0., np.exp(c5 + c1 - 2.0*c3) - 3.
def _entropy(self, beta):
return 1. / beta - np.log(.5 * beta) + sc.gammaln(1. / beta)
gennorm = gennorm_gen(name='gennorm')
class halfgennorm_gen(rv_continuous):
"""The upper half of a generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfgennorm` is::
beta
halfgennorm.pdf(x, beta) = ------------- exp(-|x|**beta)
gamma(1/beta)
`gennorm` takes ``beta`` as a shape parameter.
For ``beta = 1``, it is identical to an exponential distribution.
For ``beta = 2``, it is identical to a half normal distribution
(with ``scale=1/sqrt(2)``).
See Also
--------
gennorm : generalized normal distribution
expon : exponential distribution
halfnorm : half normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(beta) - sc.gammaln(1.0/beta) - x**beta
def _cdf(self, x, beta):
return sc.gammainc(1.0/beta, x**beta)
def _ppf(self, x, beta):
return sc.gammaincinv(1.0/beta, x)**(1.0/beta)
def _sf(self, x, beta):
return sc.gammaincc(1.0/beta, x**beta)
def _isf(self, x, beta):
return sc.gammainccinv(1.0/beta, x)**(1.0/beta)
def _entropy(self, beta):
return 1.0/beta - np.log(beta) + sc.gammaln(1.0/beta)
halfgennorm = halfgennorm_gen(a=0, name='halfgennorm')
def _argus_phi(chi):
"""
Utility function for the argus distribution
used in the CDF and norm of the Argus Funktion
"""
return _norm_cdf(chi) - chi * _norm_pdf(chi) - 0.5
class argus_gen(rv_continuous):
"""
Argus distribution
%(before_notes)s
Notes
-----
The probability density function for `argus` is::
argus.pdf(x, chi) = chi**3 / (sqrt(2*pi) * Psi(chi)) * x * sqrt(1-x**2) * exp(- 0.5 * chi**2 * (1 - x**2))
where:
Psi(chi) = Phi(chi) - chi * phi(chi) - 1/2
with Phi and phi being the CDF and PDF of a standard normal distribution, respectively.
`argus` takes ``chi`` as shape a parameter.
References
----------
.. [1] "ARGUS distribution",
https://en.wikipedia.org/wiki/ARGUS_distribution
%(after_notes)s
.. versionadded:: 0.19.0
%(example)s
"""
def _pdf(self, x, chi):
"""
Return PDF of the argus function
"""
y = 1.0 - x**2
return chi**3 / (_norm_pdf_C * _argus_phi(chi)) * x * np.sqrt(y) * np.exp(-chi**2 * y / 2)
def _cdf(self, x, chi):
"""
Return CDF of the argus function
"""
return 1.0 - self._sf(x, chi)
def _sf(self, x, chi):
"""
Return survival function of the argus function
"""
return _argus_phi(chi * np.sqrt(1 - x**2)) / _argus_phi(chi)
argus = argus_gen(name='argus', longname="An Argus Function", a=0.0, b=1.0)
class rv_histogram(rv_continuous):
"""
Generates a distribution given by a histogram.
This is useful to generate a template distribution from a binned
datasample.
As a subclass of the `rv_continuous` class, `rv_histogram` inherits from it
a collection of generic methods (see `rv_continuous` for the full list),
and implements them based on the properties of the provided binned
datasample.
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects
The first containing the content of n bins
The second containing the (n+1) bin boundaries
In particular the return value np.histogram is accepted
Notes
-----
There are no additional shape parameters except for the loc and scale.
The pdf is defined as a stepwise function from the provided histogram
The cdf is a linear interpolation of the pdf.
.. versionadded:: 0.19.0
Examples
--------
Create a scipy.stats distribution from a numpy histogram
>>> import scipy.stats
>>> import numpy as np
>>> data = scipy.stats.norm.rvs(size=100000, loc=0, scale=1.5, random_state=123)
>>> hist = np.histogram(data, bins=100)
>>> hist_dist = scipy.stats.rv_histogram(hist)
Behaves like an ordinary scipy rv_continuous distribution
>>> hist_dist.pdf(1.0)
0.20538577847618705
>>> hist_dist.cdf(2.0)
0.90818568543056499
PDF is zero above (below) the highest (lowest) bin of the histogram,
defined by the max (min) of the original dataset
>>> hist_dist.pdf(np.max(data))
0.0
>>> hist_dist.cdf(np.max(data))
1.0
>>> hist_dist.pdf(np.min(data))
7.7591907244498314e-05
>>> hist_dist.cdf(np.min(data))
0.0
PDF and CDF follow the histogram
>>> import matplotlib.pyplot as plt
>>> X = np.linspace(-5.0, 5.0, 100)
>>> plt.title("PDF from Template")
>>> plt.hist(data, normed=True, bins=100)
>>> plt.plot(X, hist_dist.pdf(X), label='PDF')
>>> plt.plot(X, hist_dist.cdf(X), label='CDF')
>>> plt.show()
"""
_support_mask = rv_continuous._support_mask
def __init__(self, histogram, *args, **kwargs):
"""
Create a new distribution using the given histogram
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects
The first containing the content of n bins
The second containing the (n+1) bin boundaries
In particular the return value np.histogram is accepted
"""
self._histogram = histogram
if len(histogram) != 2:
raise ValueError("Expected length 2 for parameter histogram")
self._hpdf = np.asarray(histogram[0])
self._hbins = np.asarray(histogram[1])
if len(self._hpdf) + 1 != len(self._hbins):
raise ValueError("Number of elements in histogram content "
"and histogram boundaries do not match, "
"expected n and n+1.")
self._hbin_widths = self._hbins[1:] - self._hbins[:-1]
self._hpdf = self._hpdf / float(np.sum(self._hpdf * self._hbin_widths))
self._hcdf = np.cumsum(self._hpdf * self._hbin_widths)
self._hpdf = np.hstack([0.0, self._hpdf, 0.0])
self._hcdf = np.hstack([0.0, self._hcdf])
# Set support
kwargs['a'] = self._hbins[0]
kwargs['b'] = self._hbins[-1]
super(rv_histogram, self).__init__(*args, **kwargs)
def _pdf(self, x):
"""
PDF of the histogram
"""
return self._hpdf[np.searchsorted(self._hbins, x, side='right')]
def _cdf(self, x):
"""
CDF calculated from the histogram
"""
return np.interp(x, self._hbins, self._hcdf)
def _ppf(self, x):
"""
Percentile function calculated from the histogram
"""
return np.interp(x, self._hcdf, self._hbins)
def _munp(self, n):
"""Compute the n-th non-central moment."""
integrals = (self._hbins[1:]**(n+1) - self._hbins[:-1]**(n+1)) / (n+1)
return np.sum(self._hpdf[1:-1] * integrals)
def _entropy(self):
"""Compute entropy of distribution"""
res = _lazywhere(self._hpdf[1:-1] > 0.0,
(self._hpdf[1:-1],),
np.log,
0.0)
return -np.sum(self._hpdf[1:-1] * res * self._hbin_widths)
def _updated_ctor_param(self):
"""
Set the histogram as additional constructor argument
"""
dct = super(rv_histogram, self)._updated_ctor_param()
dct['histogram'] = self._histogram
return dct
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_continuous)
__all__ = _distn_names + _distn_gen_names + ['rv_histogram']
| bsd-3-clause |
planetarymike/IDL-Colorbars | IDL_py_test/031_Peppermint.py | 1 | 7852 | from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[0., 0., 0.313725],
[0., 0., 0.313725],
[0., 0., 0.627451],
[0., 0., 0.941176],
[0.313725, 0., 0.],
[0.313725, 0., 0.313725],
[0.313725, 0., 0.627451],
[0.313725, 0., 0.941176],
[0.627451, 0., 0.],
[0.627451, 0., 0.313725],
[0.627451, 0., 0.627451],
[0.627451, 0., 0.941176],
[0.941176, 0., 0.],
[0.941176, 0., 0.313725],
[0.941176, 0., 0.627451],
[0.941176, 0., 0.941176],
[0., 0.0627451, 0.],
[0., 0.0627451, 0.313725],
[0., 0.0627451, 0.627451],
[0., 0.0627451, 0.941176],
[0.313725, 0.0627451, 0.],
[0.313725, 0.0627451, 0.313725],
[0.313725, 0.0627451, 0.627451],
[0.313725, 0.0627451, 0.941176],
[0.627451, 0.0627451, 0.],
[0.627451, 0.0627451, 0.313725],
[0.627451, 0.0627451, 0.627451],
[0.627451, 0.0627451, 0.941176],
[0.941176, 0.0627451, 0.],
[0.941176, 0.0627451, 0.313725],
[0.941176, 0.0627451, 0.627451],
[0.941176, 0.0627451, 0.941176],
[0., 0.12549, 0.],
[0., 0.12549, 0.313725],
[0., 0.12549, 0.627451],
[0., 0.12549, 0.941176],
[0.313725, 0.12549, 0.],
[0.313725, 0.12549, 0.313725],
[0.313725, 0.12549, 0.627451],
[0.313725, 0.12549, 0.941176],
[0.627451, 0.12549, 0.],
[0.627451, 0.12549, 0.313725],
[0.627451, 0.12549, 0.627451],
[0.627451, 0.12549, 0.941176],
[0.941176, 0.12549, 0.],
[0.941176, 0.12549, 0.313725],
[0.941176, 0.12549, 0.627451],
[0.941176, 0.12549, 0.941176],
[0., 0.188235, 0.],
[0., 0.188235, 0.313725],
[0., 0.188235, 0.627451],
[0., 0.188235, 0.941176],
[0.313725, 0.188235, 0.],
[0.313725, 0.188235, 0.313725],
[0.313725, 0.188235, 0.627451],
[0.313725, 0.188235, 0.941176],
[0.627451, 0.188235, 0.],
[0.627451, 0.188235, 0.313725],
[0.627451, 0.188235, 0.627451],
[0.627451, 0.188235, 0.941176],
[0.941176, 0.188235, 0.],
[0.941176, 0.188235, 0.313725],
[0.941176, 0.188235, 0.627451],
[0.941176, 0.188235, 0.941176],
[0., 0.25098, 0.],
[0., 0.25098, 0.313725],
[0., 0.25098, 0.627451],
[0., 0.25098, 0.941176],
[0.313725, 0.25098, 0.],
[0.313725, 0.25098, 0.313725],
[0.313725, 0.25098, 0.627451],
[0.313725, 0.25098, 0.941176],
[0.627451, 0.25098, 0.],
[0.627451, 0.25098, 0.313725],
[0.627451, 0.25098, 0.627451],
[0.627451, 0.25098, 0.941176],
[0.941176, 0.25098, 0.],
[0.941176, 0.25098, 0.313725],
[0.941176, 0.25098, 0.627451],
[0.941176, 0.25098, 0.941176],
[0., 0.313725, 0.],
[0., 0.313725, 0.313725],
[0., 0.313725, 0.627451],
[0., 0.313725, 0.941176],
[0.313725, 0.313725, 0.],
[0.313725, 0.313725, 0.313725],
[0.313725, 0.313725, 0.627451],
[0.313725, 0.313725, 0.941176],
[0.627451, 0.313725, 0.],
[0.627451, 0.313725, 0.313725],
[0.627451, 0.313725, 0.627451],
[0.627451, 0.313725, 0.941176],
[0.941176, 0.313725, 0.],
[0.941176, 0.313725, 0.313725],
[0.941176, 0.313725, 0.627451],
[0.941176, 0.313725, 0.941176],
[0., 0.376471, 0.],
[0., 0.376471, 0.313725],
[0., 0.376471, 0.627451],
[0., 0.376471, 0.941176],
[0.313725, 0.376471, 0.],
[0.313725, 0.376471, 0.313725],
[0.313725, 0.376471, 0.627451],
[0.313725, 0.376471, 0.941176],
[0.627451, 0.376471, 0.],
[0.627451, 0.376471, 0.313725],
[0.627451, 0.376471, 0.627451],
[0.627451, 0.376471, 0.941176],
[0.941176, 0.376471, 0.],
[0.941176, 0.376471, 0.313725],
[0.941176, 0.376471, 0.627451],
[0.941176, 0.376471, 0.941176],
[0., 0.439216, 0.],
[0., 0.439216, 0.313725],
[0., 0.439216, 0.627451],
[0., 0.439216, 0.941176],
[0.313725, 0.439216, 0.],
[0.313725, 0.439216, 0.313725],
[0.313725, 0.439216, 0.627451],
[0.313725, 0.439216, 0.941176],
[0.627451, 0.439216, 0.],
[0.627451, 0.439216, 0.313725],
[0.627451, 0.439216, 0.627451],
[0.627451, 0.439216, 0.941176],
[0.941176, 0.439216, 0.],
[0.941176, 0.439216, 0.313725],
[0.941176, 0.439216, 0.627451],
[0.941176, 0.439216, 0.941176],
[0., 0.501961, 0.],
[0., 0.501961, 0.313725],
[0., 0.501961, 0.627451],
[0., 0.501961, 0.941176],
[0.313725, 0.501961, 0.],
[0.313725, 0.501961, 0.313725],
[0.313725, 0.501961, 0.627451],
[0.313725, 0.501961, 0.941176],
[0.627451, 0.501961, 0.],
[0.627451, 0.501961, 0.313725],
[0.627451, 0.501961, 0.627451],
[0.627451, 0.501961, 0.941176],
[0.941176, 0.501961, 0.],
[0.941176, 0.501961, 0.313725],
[0.941176, 0.501961, 0.627451],
[0.941176, 0.501961, 0.941176],
[0., 0.564706, 0.],
[0., 0.564706, 0.313725],
[0., 0.564706, 0.627451],
[0., 0.564706, 0.941176],
[0.313725, 0.564706, 0.],
[0.313725, 0.564706, 0.313725],
[0.313725, 0.564706, 0.627451],
[0.313725, 0.564706, 0.941176],
[0.627451, 0.564706, 0.],
[0.627451, 0.564706, 0.313725],
[0.627451, 0.564706, 0.627451],
[0.627451, 0.564706, 0.941176],
[0.941176, 0.564706, 0.],
[0.941176, 0.564706, 0.313725],
[0.941176, 0.564706, 0.627451],
[0.941176, 0.564706, 0.941176],
[0., 0.627451, 0.],
[0., 0.627451, 0.313725],
[0., 0.627451, 0.627451],
[0., 0.627451, 0.941176],
[0.313725, 0.627451, 0.],
[0.313725, 0.627451, 0.313725],
[0.313725, 0.627451, 0.627451],
[0.313725, 0.627451, 0.941176],
[0.627451, 0.627451, 0.],
[0.627451, 0.627451, 0.313725],
[0.627451, 0.627451, 0.627451],
[0.627451, 0.627451, 0.941176],
[0.941176, 0.627451, 0.],
[0.941176, 0.627451, 0.313725],
[0.941176, 0.627451, 0.627451],
[0.941176, 0.627451, 0.941176],
[0., 0.690196, 0.],
[0., 0.690196, 0.313725],
[0., 0.690196, 0.627451],
[0., 0.690196, 0.941176],
[0.313725, 0.690196, 0.],
[0.313725, 0.690196, 0.313725],
[0.313725, 0.690196, 0.627451],
[0.313725, 0.690196, 0.941176],
[0.627451, 0.690196, 0.],
[0.627451, 0.690196, 0.313725],
[0.627451, 0.690196, 0.627451],
[0.627451, 0.690196, 0.941176],
[0.941176, 0.690196, 0.],
[0.941176, 0.690196, 0.313725],
[0.941176, 0.690196, 0.627451],
[0.941176, 0.690196, 0.941176],
[0., 0.752941, 0.],
[0., 0.752941, 0.313725],
[0., 0.752941, 0.627451],
[0., 0.752941, 0.941176],
[0.313725, 0.752941, 0.],
[0.313725, 0.752941, 0.313725],
[0.317647, 0.756863, 0.627451],
[0.313725, 0.752941, 0.941176],
[0.627451, 0.752941, 0.],
[0.627451, 0.752941, 0.313725],
[0.627451, 0.752941, 0.627451],
[0.627451, 0.752941, 0.941176],
[0.941176, 0.752941, 0.],
[0.941176, 0.752941, 0.313725],
[0.941176, 0.752941, 0.627451],
[0.941176, 0.752941, 0.941176],
[0., 0.815686, 0.],
[0., 0.815686, 0.313725],
[0., 0.815686, 0.627451],
[0., 0.815686, 0.941176],
[0.313725, 0.815686, 0.],
[0.313725, 0.815686, 0.313725],
[0.313725, 0.815686, 0.627451],
[0.313725, 0.815686, 0.941176],
[0.627451, 0.815686, 0.],
[0.627451, 0.815686, 0.313725],
[0.627451, 0.815686, 0.627451],
[0.627451, 0.815686, 0.941176],
[0.941176, 0.815686, 0.],
[0.941176, 0.815686, 0.313725],
[0.941176, 0.815686, 0.627451],
[0.941176, 0.815686, 0.941176],
[0., 0.878431, 0.],
[0., 0.878431, 0.313725],
[0., 0.878431, 0.627451],
[0., 0.878431, 0.941176],
[0.313725, 0.878431, 0.],
[0.313725, 0.878431, 0.313725],
[0.313725, 0.878431, 0.627451],
[0.313725, 0.878431, 0.941176],
[0.627451, 0.878431, 0.],
[0.627451, 0.878431, 0.313725],
[0.627451, 0.878431, 0.627451],
[0.627451, 0.878431, 0.941176],
[0.941176, 0.878431, 0.],
[0.941176, 0.878431, 0.313725],
[0.941176, 0.878431, 0.627451],
[0.941176, 0.878431, 0.941176],
[0., 0.941176, 0.],
[0., 0.941176, 0.313725],
[0., 0.941176, 0.627451],
[0., 0.941176, 0.941176],
[0.313725, 0.941176, 0.],
[0.313725, 0.941176, 0.313725],
[0.313725, 0.941176, 0.627451],
[0.313725, 0.941176, 0.941176],
[0.627451, 0.941176, 0.],
[0.627451, 0.941176, 0.313725],
[0.627451, 0.941176, 0.627451],
[0.627451, 0.941176, 0.941176],
[0.941176, 0.941176, 0.],
[0.941176, 0.941176, 0.313725],
[0.941176, 0.941176, 0.627451],
[0.941176, 0.941176, 0.627451]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| gpl-2.0 |
dgwakeman/mne-python | examples/inverse/plot_lcmv_beamformer.py | 18 | 2801 | """
======================================
Compute LCMV beamformer on evoked data
======================================
Compute LCMV beamformer solutions on evoked dataset for three different choices
of source orientation and stores the solutions in stc files for visualisation.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import numpy as np
import mne
from mne.datasets import sample
from mne.io import Raw
from mne.beamformer import lcmv
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
###############################################################################
# Get epochs
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = Raw(raw_fname)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads', selection=left_temporal_channels)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()
forward = mne.read_forward_solution(fname_fwd, surf_ori=True)
# Read regularized noise covariance and compute regularized data covariance
noise_cov = mne.read_cov(fname_cov)
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
method='shrunk')
plt.close('all')
pick_oris = [None, 'normal', 'max-power']
names = ['free', 'normal', 'max-power']
descriptions = ['Free orientation', 'Normal orientation', 'Max-power '
'orientation']
colors = ['b', 'k', 'r']
for pick_ori, name, desc, color in zip(pick_oris, names, descriptions, colors):
stc = lcmv(evoked, forward, noise_cov, data_cov, reg=0.01,
pick_ori=pick_ori)
# View activation time-series
label = mne.read_label(fname_label)
stc_label = stc.in_label(label)
plt.plot(1e3 * stc_label.times, np.mean(stc_label.data, axis=0), color,
hold=True, label=desc)
plt.xlabel('Time (ms)')
plt.ylabel('LCMV value')
plt.ylim(-0.8, 2.2)
plt.title('LCMV in %s' % label_name)
plt.legend()
plt.show()
| bsd-3-clause |
waynenilsen/statsmodels | setup.py | 16 | 15962 | """
Much of the build system code was adapted from work done by the pandas
developers [1], which was in turn based on work done in pyzmq [2] and lxml [3].
[1] http://pandas.pydata.org
[2] http://zeromq.github.io/pyzmq/
[3] http://lxml.de/
"""
import os
from os.path import relpath, join as pjoin
import sys
import subprocess
import re
from distutils.version import StrictVersion
# temporarily redirect config directory to prevent matplotlib importing
# testing that for writeable directory which results in sandbox error in
# certain easy_install versions
os.environ["MPLCONFIGDIR"] = "."
no_frills = (len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info', '--version',
'clean')))
# try bootstrapping setuptools if it doesn't exist
try:
import pkg_resources
try:
pkg_resources.require("setuptools>=0.6c5")
except pkg_resources.VersionConflict:
from ez_setup import use_setuptools
use_setuptools(version="0.6c5")
from setuptools import setup, Command, find_packages
_have_setuptools = True
except ImportError:
# no setuptools installed
from distutils.core import setup, Command
_have_setuptools = False
if _have_setuptools:
setuptools_kwargs = {"zip_safe": False,
"test_suite": "nose.collector"}
else:
setuptools_kwargs = {}
if sys.version_info[0] >= 3:
sys.exit("Need setuptools to install statsmodels for Python 3.x")
curdir = os.path.abspath(os.path.dirname(__file__))
README = open(pjoin(curdir, "README.rst")).read()
DISTNAME = 'statsmodels'
DESCRIPTION = 'Statistical computations and models for use with SciPy'
LONG_DESCRIPTION = README
MAINTAINER = 'Skipper Seabold, Josef Perktold'
MAINTAINER_EMAIL ='[email protected]'
URL = 'http://statsmodels.sourceforge.net/'
LICENSE = 'BSD License'
DOWNLOAD_URL = ''
# These imports need to be here; setuptools needs to be imported first.
from distutils.extension import Extension
from distutils.command.build import build
from distutils.command.build_ext import build_ext as _build_ext
class build_ext(_build_ext):
def build_extensions(self):
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
for ext in self.extensions:
if (hasattr(ext, 'include_dirs') and
not numpy_incl in ext.include_dirs):
ext.include_dirs.append(numpy_incl)
_build_ext.build_extensions(self)
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
p = subprocess.call([sys.executable,
os.path.join(cwd, 'tools', 'cythonize.py'),
'statsmodels'],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
def strip_rc(version):
return re.sub(r"rc\d+$", "", version)
def check_dependency_versions(min_versions):
"""
Don't let pip/setuptools do this all by itself. It's rude.
For all dependencies, try to import them and check if the versions of
installed dependencies match the minimum version requirements. If
installed but version too low, raise an error. If not installed at all,
return the correct ``setup_requires`` and ``install_requires`` arguments to
be added to the setuptools kwargs. This prevents upgrading installed
dependencies like numpy (that should be an explicit choice by the user and
never happen automatically), but make things work when installing into an
empty virtualenv for example.
"""
setup_requires = []
install_requires = []
try:
from numpy.version import short_version as npversion
except ImportError:
setup_requires.append('numpy')
install_requires.append('numpy')
else:
if not (StrictVersion(strip_rc(npversion)) >= min_versions['numpy']):
raise ImportError("Numpy version is %s. Requires >= %s" %
(npversion, min_versions['numpy']))
try:
import scipy
except ImportError:
install_requires.append('scipy')
else:
try:
from scipy.version import short_version as spversion
except ImportError:
from scipy.version import version as spversion # scipy 0.7.0
if not (StrictVersion(strip_rc(spversion)) >= min_versions['scipy']):
raise ImportError("Scipy version is %s. Requires >= %s" %
(spversion, min_versions['scipy']))
try:
from pandas.version import short_version as pversion
except ImportError:
install_requires.append('pandas')
else:
if not (StrictVersion(strip_rc(pversion)) >= min_versions['pandas']):
ImportError("Pandas version is %s. Requires >= %s" %
(pversion, min_versions['pandas']))
try:
from patsy import __version__ as patsy_version
except ImportError:
install_requires.append('patsy')
else:
# patsy dev looks like 0.1.0+dev
pversion = re.match("\d*\.\d*\.\d*", patsy_version).group()
if not (StrictVersion(pversion) >= min_versions['patsy']):
raise ImportError("Patsy version is %s. Requires >= %s" %
(pversion, min_versions["patsy"]))
return setup_requires, install_requires
MAJ = 0
MIN = 7
REV = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJ,MIN,REV)
classifiers = [ 'Development Status :: 4 - Beta',
'Environment :: Console',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Topic :: Scientific/Engineering']
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(" ".join(cmd), stdout = subprocess.PIPE, env=env,
shell=True).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def write_version_py(filename=pjoin(curdir, 'statsmodels/version.py')):
cnt = "\n".join(["",
"# THIS FILE IS GENERATED FROM SETUP.PY",
"short_version = '%(version)s'",
"version = '%(version)s'",
"full_version = '%(full_version)s'",
"git_revision = '%(git_revision)s'",
"release = %(isrelease)s", "",
"if not release:",
" version = full_version"])
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
dowrite = True
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists(filename):
# must be a source distribution, use existing version file
try:
from statsmodels.version import git_revision as GIT_REVISION
except ImportError:
dowrite = False
GIT_REVISION = "Unknown"
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
if dowrite:
try:
a = open(filename, 'w')
a.write(cnt % {'version': VERSION,
'full_version' : FULLVERSION,
'git_revision' : GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
class CleanCommand(Command):
"""Custom distutils command to clean the .so and .pyc files."""
user_options = [("all", "a", "")]
def initialize_options(self):
self.all = True
self._clean_me = []
self._clean_trees = []
self._clean_exclude = ["bspline_ext.c",
"bspline_impl.c"]
for root, dirs, files in list(os.walk('statsmodels')):
for f in files:
if f in self._clean_exclude:
continue
if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o',
'.pyo',
'.pyd', '.c', '.orig'):
self._clean_me.append(pjoin(root, f))
for d in dirs:
if d == '__pycache__':
self._clean_trees.append(pjoin(root, d))
for d in ('build',):
if os.path.exists(d):
self._clean_trees.append(d)
def finalize_options(self):
pass
def run(self):
for clean_me in self._clean_me:
try:
os.unlink(clean_me)
except Exception:
pass
for clean_tree in self._clean_trees:
try:
import shutil
shutil.rmtree(clean_tree)
except Exception:
pass
class CheckingBuildExt(build_ext):
"""Subclass build_ext to get clearer report if Cython is necessary."""
def check_cython_extensions(self, extensions):
for ext in extensions:
for src in ext.sources:
if not os.path.exists(src):
raise Exception("""Cython-generated file '%s' not found.
Cython is required to compile statsmodels from a development branch.
Please install Cython or download a source release of statsmodels.
""" % src)
def build_extensions(self):
self.check_cython_extensions(self.extensions)
build_ext.build_extensions(self)
class DummyBuildSrc(Command):
""" numpy's build_src command interferes with Cython's build_ext.
"""
user_options = []
def initialize_options(self):
self.py_modules_dict = {}
def finalize_options(self):
pass
def run(self):
pass
cmdclass = {'clean': CleanCommand,
'build': build}
cmdclass["build_src"] = DummyBuildSrc
cmdclass["build_ext"] = CheckingBuildExt
# some linux distros require it
#NOTE: we are not currently using this but add it to Extension, if needed.
# libraries = ['m'] if 'win32' not in sys.platform else []
from numpy.distutils.misc_util import get_info
npymath_info = get_info("npymath")
ext_data = dict(
kalman_loglike = {"name" : "statsmodels/tsa/kalmanf/kalman_loglike.c",
"depends" : ["statsmodels/src/capsule.h"],
"include_dirs": ["statsmodels/src"],
"sources" : []},
_statespace = {"name" : "statsmodels/tsa/statespace/_statespace.c",
"depends" : ["statsmodels/src/capsule.h"],
"include_dirs": ["statsmodels/src"] + npymath_info['include_dirs'],
"libraries": npymath_info['libraries'],
"library_dirs": npymath_info['library_dirs'],
"sources" : []},
linbin = {"name" : "statsmodels/nonparametric/linbin.c",
"depends" : [],
"sources" : []},
_smoothers_lowess = {"name" : "statsmodels/nonparametric/_smoothers_lowess.c",
"depends" : [],
"sources" : []}
)
extensions = []
for name, data in ext_data.items():
data['sources'] = data.get('sources', []) + [data['name']]
destdir = ".".join(os.path.dirname(data["name"]).split("/"))
data.pop('name')
obj = Extension('%s.%s' % (destdir, name), **data)
extensions.append(obj)
def get_data_files():
sep = os.path.sep
# install the datasets
data_files = {}
root = pjoin(curdir, "statsmodels", "datasets")
for i in os.listdir(root):
if i is "tests":
continue
path = pjoin(root, i)
if os.path.isdir(path):
data_files.update({relpath(path, start=curdir).replace(sep, ".") : ["*.csv",
"*.dta"]})
# add all the tests and results files
for r, ds, fs in os.walk(pjoin(curdir, "statsmodels")):
r_ = relpath(r, start=curdir)
if r_.endswith('results'):
data_files.update({r_.replace(sep, ".") : ["*.csv",
"*.txt"]})
return data_files
if __name__ == "__main__":
if os.path.exists('MANIFEST'):
os.unlink('MANIFEST')
min_versions = {
'numpy' : '1.4.0',
'scipy' : '0.7.0',
'pandas' : '0.7.1',
'patsy' : '0.1.0',
}
if sys.version_info[0] == 3 and sys.version_info[1] >= 3:
# 3.3 needs numpy 1.7+
min_versions.update({"numpy" : "1.7.0b2"})
(setup_requires,
install_requires) = check_dependency_versions(min_versions)
if _have_setuptools:
setuptools_kwargs['setup_requires'] = setup_requires
setuptools_kwargs['install_requires'] = install_requires
write_version_py()
# this adds *.csv and *.dta files in datasets folders
# and *.csv and *.txt files in test/results folders
package_data = get_data_files()
packages = find_packages()
packages.append("statsmodels.tsa.vector_ar.data")
package_data["statsmodels.datasets.tests"].append("*.zip")
package_data["statsmodels.iolib.tests.results"].append("*.dta")
package_data["statsmodels.stats.tests.results"].append("*.json")
package_data["statsmodels.tsa.vector_ar.tests.results"].append("*.npz")
# data files that don't follow the tests/results pattern. should fix.
package_data.update({"statsmodels.stats.tests" : ["*.txt"]})
package_data.update({"statsmodels.stats.libqsturng" :
["*.r", "*.txt", "*.dat"]})
package_data.update({"statsmodels.stats.libqsturng.tests" :
["*.csv", "*.dat"]})
package_data.update({"statsmodels.tsa.vector_ar.data" : ["*.dat"]})
package_data.update({"statsmodels.tsa.vector_ar.data" : ["*.dat"]})
# temporary, until moved:
package_data.update({"statsmodels.sandbox.regression.tests" :
["*.dta", "*.csv"]})
#TODO: deal with this. Not sure if it ever worked for bdists
#('docs/build/htmlhelp/statsmodelsdoc.chm',
# 'statsmodels/statsmodelsdoc.chm')
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')) and not no_frills:
# Generate Cython sources, unless building from source release
generate_cython()
setup(name = DISTNAME,
version = VERSION,
maintainer = MAINTAINER,
ext_modules = extensions,
maintainer_email = MAINTAINER_EMAIL,
description = DESCRIPTION,
license = LICENSE,
url = URL,
download_url = DOWNLOAD_URL,
long_description = LONG_DESCRIPTION,
classifiers = classifiers,
platforms = 'any',
cmdclass = cmdclass,
packages = packages,
package_data = package_data,
include_package_data=False, # True will install all files in repo
**setuptools_kwargs)
| bsd-3-clause |
miloharper/neural-network-animation | matplotlib/tests/test_patches.py | 9 | 8388 | """
Tests specific to the patches module.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from numpy.testing import assert_almost_equal
from matplotlib.patches import Polygon
from matplotlib.patches import Rectangle
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.collections as mcollections
from matplotlib import path as mpath
from matplotlib import transforms as mtrans
def test_Polygon_close():
#: Github issue #1018 identified a bug in the Polygon handling
#: of the closed attribute; the path was not getting closed
#: when set_xy was used to set the vertices.
# open set of vertices:
xy = [[0, 0], [0, 1], [1, 1]]
# closed set:
xyclosed = xy + [[0, 0]]
# start with open path and close it:
p = Polygon(xy, closed=True)
assert_array_equal(p.get_xy(), xyclosed)
p.set_xy(xy)
assert_array_equal(p.get_xy(), xyclosed)
# start with closed path and open it:
p = Polygon(xyclosed, closed=False)
assert_array_equal(p.get_xy(), xy)
p.set_xy(xyclosed)
assert_array_equal(p.get_xy(), xy)
# start with open path and leave it open:
p = Polygon(xy, closed=False)
assert_array_equal(p.get_xy(), xy)
p.set_xy(xy)
assert_array_equal(p.get_xy(), xy)
# start with closed path and leave it closed:
p = Polygon(xyclosed, closed=True)
assert_array_equal(p.get_xy(), xyclosed)
p.set_xy(xyclosed)
assert_array_equal(p.get_xy(), xyclosed)
def test_rotate_rect():
loc = np.asarray([1.0, 2.0])
width = 2
height = 3
angle = 30.0
# A rotated rectangle
rect1 = Rectangle(loc, width, height, angle=angle)
# A non-rotated rectangle
rect2 = Rectangle(loc, width, height)
# Set up an explicit rotation matrix (in radians)
angle_rad = np.pi * angle / 180.0
rotation_matrix = np.array([[np.cos(angle_rad), -np.sin(angle_rad)],
[np.sin(angle_rad), np.cos(angle_rad)]])
# Translate to origin, rotate each vertex, and then translate back
new_verts = np.inner(rotation_matrix, rect2.get_verts() - loc).T + loc
# They should be the same
assert_almost_equal(rect1.get_verts(), new_verts)
@image_comparison(baseline_images=['clip_to_bbox'])
def test_clip_to_bbox():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([-18, 20])
ax.set_ylim([-150, 100])
path = mpath.Path.unit_regular_star(8).deepcopy()
path.vertices *= [10, 100]
path.vertices -= [5, 25]
path2 = mpath.Path.unit_circle().deepcopy()
path2.vertices *= [10, 100]
path2.vertices += [10, -25]
combined = mpath.Path.make_compound_path(path, path2)
patch = mpatches.PathPatch(
combined, alpha=0.5, facecolor='coral', edgecolor='none')
ax.add_patch(patch)
bbox = mtrans.Bbox([[-12, -77.5], [50, -110]])
result_path = combined.clip_to_bbox(bbox)
result_patch = mpatches.PathPatch(
result_path, alpha=0.5, facecolor='green', lw=4, edgecolor='black')
ax.add_patch(result_patch)
@image_comparison(baseline_images=['patch_alpha_coloring'], remove_text=True)
def test_patch_alpha_coloring():
"""
Test checks that the patch and collection are rendered with the specified
alpha values in their facecolor and edgecolor.
"""
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
cut_star1 = mpath.Path(verts, codes)
cut_star2 = mpath.Path(verts + 1, codes)
ax = plt.axes()
patch = mpatches.PathPatch(cut_star1,
linewidth=5, linestyle='dashdot',
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_patch(patch)
col = mcollections.PathCollection([cut_star2],
linewidth=5, linestyles='dashdot',
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_collection(col)
ax.set_xlim([-1, 2])
ax.set_ylim([-1, 2])
@image_comparison(baseline_images=['patch_alpha_override'], remove_text=True)
def test_patch_alpha_override():
#: Test checks that specifying an alpha attribute for a patch or
#: collection will override any alpha component of the facecolor
#: or edgecolor.
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
cut_star1 = mpath.Path(verts, codes)
cut_star2 = mpath.Path(verts + 1, codes)
ax = plt.axes()
patch = mpatches.PathPatch(cut_star1,
linewidth=5, linestyle='dashdot',
alpha=0.25,
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_patch(patch)
col = mcollections.PathCollection([cut_star2],
linewidth=5, linestyles='dashdot',
alpha=0.25,
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_collection(col)
ax.set_xlim([-1, 2])
ax.set_ylim([-1, 2])
@image_comparison(baseline_images=['patch_custom_linestyle'],
remove_text=True)
def test_patch_custom_linestyle():
#: A test to check that patches and collections accept custom dash
#: patterns as linestyle and that they display correctly.
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
cut_star1 = mpath.Path(verts, codes)
cut_star2 = mpath.Path(verts + 1, codes)
ax = plt.axes()
patch = mpatches.PathPatch(cut_star1,
linewidth=5, linestyle=(0.0, (5.0, 7.0, 10.0, 7.0)),
facecolor=(1, 0, 0),
edgecolor=(0, 0, 1))
ax.add_patch(patch)
col = mcollections.PathCollection([cut_star2],
linewidth=5, linestyles=[(0.0, (5.0, 7.0, 10.0, 7.0))],
facecolor=(1, 0, 0),
edgecolor=(0, 0, 1))
ax.add_collection(col)
ax.set_xlim([-1, 2])
ax.set_ylim([-1, 2])
def test_wedge_movement():
param_dict = {'center': ((0, 0), (1, 1), 'set_center'),
'r': (5, 8, 'set_radius'),
'width': (2, 3, 'set_width'),
'theta1': (0, 30, 'set_theta1'),
'theta2': (45, 50, 'set_theta2')}
init_args = dict((k, v[0]) for (k, v) in six.iteritems(param_dict))
w = mpatches.Wedge(**init_args)
for attr, (old_v, new_v, func) in six.iteritems(param_dict):
assert_equal(getattr(w, attr), old_v)
getattr(w, func)(new_v)
assert_equal(getattr(w, attr), new_v)
@image_comparison(baseline_images=['wedge_range'],
remove_text=True)
def test_wedge_range():
ax = plt.axes()
t1 = 2.313869244286224
args = [[52.31386924, 232.31386924],
[52.313869244286224, 232.31386924428622],
[t1, t1 + 180.0],
[0, 360],
[90, 90 + 360],
[-180, 180],
[0, 380],
[45, 46],
[46, 45]]
for i, (theta1, theta2) in enumerate(args):
x = i % 3
y = i // 3
wedge = mpatches.Wedge((x * 3, y * 3), 1, theta1, theta2,
facecolor='none', edgecolor='k', lw=3)
ax.add_artist(wedge)
ax.set_xlim([-2, 8])
ax.set_ylim([-2, 9])
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
tmhm/scikit-learn | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
jmschrei/scikit-learn | examples/gaussian_process/plot_gpc_iris.py | 81 | 2231 | """
=====================================================
Gaussian process classification (GPC) on iris dataset
=====================================================
This example illustrates the predicted probability of GPC for an isotropic
and anisotropic RBF kernel on a two-dimensional version for the iris-dataset.
The anisotropic RBF kernel obtains slightly higher log-marginal-likelihood by
assigning different length-scales to the two feature dimensions.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = np.array(iris.target, dtype=int)
h = .02 # step size in the mesh
kernel = 1.0 * RBF([1.0])
gpc_rbf_isotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
kernel = 1.0 * RBF([1.0, 1.0])
gpc_rbf_anisotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
titles = ["Isotropic RBF", "Anisotropic RBF"]
plt.figure(figsize=(10, 5))
for i, clf in enumerate((gpc_rbf_isotropic, gpc_rbf_anisotropic)):
# Plot the predicted probabilities. For that, we will assign a color to
# each point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 2, i + 1)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape((xx.shape[0], xx.shape[1], 3))
plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower")
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g", "b"])[y])
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title("%s, LML: %.3f" %
(titles[i], clf.log_marginal_likelihood(clf.kernel_.theta)))
plt.tight_layout()
plt.show()
| bsd-3-clause |
ihmeuw/vivarium | src/vivarium/examples/boids/location.py | 1 | 1167 | import numpy as np
import pandas as pd
class Location:
configuration_defaults = {
'location': {
'width': 1000, # Width of our field
'height': 1000, # Height of our field
}
}
def __init__(self):
self.name = 'location'
def setup(self, builder):
self.width = builder.configuration.location.width
self.height = builder.configuration.location.height
columns_created = ['x', 'vx', 'y', 'vy']
builder.population.initializes_simulants(self.on_create_simulants, columns_created)
self.population_view = builder.population.get_view(columns_created)
def on_create_simulants(self, pop_data):
count = len(pop_data.index)
# Start clustered in the center with small random velocities
new_population = pd.DataFrame({
'x': self.width * (0.4 + 0.2 * np.random.random(count)),
'y': self.height * (0.4 + 0.2 * np.random.random(count)),
'vx': -0.5 + np.random.random(count),
'vy': -0.5 + np.random.random(count),
}, index= pop_data.index)
self.population_view.update(new_population)
| gpl-3.0 |
fabioticconi/scikit-learn | sklearn/utils/setup.py | 296 | 2884 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
lbishal/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
jseabold/statsmodels | statsmodels/sandbox/examples/thirdparty/findow_1.py | 5 | 2569 | # -*- coding: utf-8 -*-
"""A quick look at volatility of stock returns for 2009
Just an exercise to find my way around the pandas methods.
Shows the daily rate of return, the square of it (volatility) and
a 5 day moving average of the volatility.
No guarantee for correctness.
Assumes no missing values.
colors of lines in graphs are not great
uses DataFrame and WidePanel to hold data downloaded from yahoo using matplotlib.
I have not figured out storage, so the download happens at each run
of the script.
Created on Sat Jan 30 16:30:18 2010
Author: josef-pktd
"""
import os
from statsmodels.compat.python import lzip
import numpy as np
import matplotlib.finance as fin
import matplotlib.pyplot as plt
import datetime as dt
import pandas as pd
def getquotes(symbol, start, end):
# Taken from the no-longer-existent pandas.examples.finance
quotes = fin.quotes_historical_yahoo(symbol, start, end)
dates, open, close, high, low, volume = lzip(*quotes)
data = {
'open' : open,
'close' : close,
'high' : high,
'low' : low,
'volume' : volume
}
dates = pd.Index([dt.datetime.fromordinal(int(d)) for d in dates])
return pd.DataFrame(data, index=dates)
start_date = dt.datetime(2007, 1, 1)
end_date = dt.datetime(2009, 12, 31)
dj30 = ['MMM', 'AA', 'AXP', 'T', 'BAC', 'BA', 'CAT', 'CVX', 'CSCO',
'KO', 'DD', 'XOM', 'GE', 'HPQ', 'HD', 'INTC', 'IBM', 'JNJ',
'JPM', 'KFT', 'MCD', 'MRK', 'MSFT', 'PFE', 'PG', 'TRV',
'UTX', 'VZ', 'WMT', 'DIS']
mysym = ['msft', 'ibm', 'goog']
indexsym = ['gspc', 'dji']
# download data
dmall = {}
for sy in dj30:
dmall[sy] = getquotes(sy, start_date, end_date)
# combine into WidePanel
pawp = pd.WidePanel.fromDict(dmall)
print(pawp.values.shape)
# select closing prices
paclose = pawp.getMinorXS('close')
# take log and first difference over time
paclose_ratereturn = paclose.apply(np.log).diff()
if not os.path.exists('dj30rr'):
#if pandas is updated, then sometimes unpickling fails, and need to save again
paclose_ratereturn.save('dj30rr')
plt.figure()
paclose_ratereturn.plot()
plt.title('daily rate of return')
# square the returns
paclose_ratereturn_vol = paclose_ratereturn.apply(lambda x:np.power(x,2))
plt.figure()
plt.title('volatility (with 5 day moving average')
paclose_ratereturn_vol.plot()
# use convolution to get moving average
paclose_ratereturn_vol_mov = paclose_ratereturn_vol.apply(
lambda x:np.convolve(x,np.ones(5)/5.,'same'))
paclose_ratereturn_vol_mov.plot()
#plt.show()
| bsd-3-clause |
hlin117/scikit-learn | sklearn/tests/test_multioutput.py | 23 | 12429 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier
from sklearn.linear_model import Lasso
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import SGDRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier
def test_multi_target_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
for n in range(3):
rgr = GradientBoostingRegressor(random_state=0)
rgr.fit(X_train, y_train[:, n])
references[:, n] = rgr.predict(X_test)
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X_train, y_train)
y_pred = rgr.predict(X_test)
assert_almost_equal(references, y_pred)
def test_multi_target_regression_partial_fit():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test, y_test = X[50:], y[50:]
references = np.zeros_like(y_test)
half_index = 25
for n in range(3):
sgr = SGDRegressor(random_state=0)
sgr.partial_fit(X_train[:half_index], y_train[:half_index, n])
sgr.partial_fit(X_train[half_index:], y_train[half_index:, n])
references[:, n] = sgr.predict(X_test)
sgr = MultiOutputRegressor(SGDRegressor(random_state=0))
sgr.partial_fit(X_train[:half_index], y_train[:half_index])
sgr.partial_fit(X_train[half_index:], y_train[half_index:])
y_pred = sgr.predict(X_test)
assert_almost_equal(references, y_pred)
def test_multi_target_regression_one_target():
# Test multi target regression raises
X, y = datasets.make_regression(n_targets=1)
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
assert_raises(ValueError, rgr.fit, X, y)
def test_multi_target_sparse_regression():
X, y = datasets.make_regression(n_targets=3)
X_train, y_train = X[:50], y[:50]
X_test = X[50:]
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
rgr = MultiOutputRegressor(Lasso(random_state=0))
rgr_sparse = MultiOutputRegressor(Lasso(random_state=0))
rgr.fit(X_train, y_train)
rgr_sparse.fit(sparse(X_train), y_train)
assert_almost_equal(rgr.predict(X_test),
rgr_sparse.predict(sparse(X_test)))
def test_multi_target_sample_weights_api():
X = [[1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [0.8, 0.6]
rgr = MultiOutputRegressor(Lasso())
assert_raises_regex(ValueError, "does not support sample weights",
rgr.fit, X, y, w)
# no exception should be raised if the base estimator supports weights
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y, w)
def test_multi_target_sample_weight_partial_fit():
# weighted regressor
X = [[1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(SGDRegressor(random_state=0))
rgr_w.partial_fit(X, y, w)
# weighted with different weights
w = [2., 2.]
rgr = MultiOutputRegressor(SGDRegressor(random_state=0))
rgr.partial_fit(X, y, w)
assert_not_equal(rgr.predict(X)[0][0], rgr_w.predict(X)[0][0])
def test_multi_target_sample_weights():
# weighted regressor
Xw = [[1, 2, 3], [4, 5, 6]]
yw = [[3.141, 2.718], [2.718, 3.141]]
w = [2., 1.]
rgr_w = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]]
y = [[3.141, 2.718], [3.141, 2.718], [2.718, 3.141]]
rgr = MultiOutputRegressor(GradientBoostingRegressor(random_state=0))
rgr.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(rgr.predict(X_test), rgr_w.predict(X_test))
# Import the data
iris = datasets.load_iris()
# create a multiple targets by randomized shuffling and concatenating y.
X = iris.data
y1 = iris.target
y2 = shuffle(y1, random_state=1)
y3 = shuffle(y1, random_state=2)
y = np.column_stack((y1, y2, y3))
n_samples, n_features = X.shape
n_outputs = y.shape[1]
n_classes = len(np.unique(y1))
classes = list(map(np.unique, (y1, y2, y3)))
def test_multi_output_classification_partial_fit_parallelism():
sgd_linear_clf = SGDClassifier(loss='log', random_state=1)
mor = MultiOutputClassifier(sgd_linear_clf, n_jobs=-1)
mor.partial_fit(X, y, classes)
est1 = mor.estimators_[0]
mor.partial_fit(X, y)
est2 = mor.estimators_[0]
# parallelism requires this to be the case for a sane implementation
assert_false(est1 is est2)
def test_multi_output_classification_partial_fit():
# test if multi_target initializes correctly with base estimator and fit
# assert predictions work as expected for predict
sgd_linear_clf = SGDClassifier(loss='log', random_state=1)
multi_target_linear = MultiOutputClassifier(sgd_linear_clf)
# train the multi_target_linear and also get the predictions.
half_index = X.shape[0] // 2
multi_target_linear.partial_fit(
X[:half_index], y[:half_index], classes=classes)
first_predictions = multi_target_linear.predict(X)
assert_equal((n_samples, n_outputs), first_predictions.shape)
multi_target_linear.partial_fit(X[half_index:], y[half_index:])
second_predictions = multi_target_linear.predict(X)
assert_equal((n_samples, n_outputs), second_predictions.shape)
# train the linear classification with each column and assert that
# predictions are equal after first partial_fit and second partial_fit
for i in range(3):
# create a clone with the same state
sgd_linear_clf = clone(sgd_linear_clf)
sgd_linear_clf.partial_fit(
X[:half_index], y[:half_index, i], classes=classes[i])
assert_array_equal(sgd_linear_clf.predict(X), first_predictions[:, i])
sgd_linear_clf.partial_fit(X[half_index:], y[half_index:, i])
assert_array_equal(sgd_linear_clf.predict(X), second_predictions[:, i])
def test_mutli_output_classifiation_partial_fit_no_first_classes_exception():
sgd_linear_clf = SGDClassifier(loss='log', random_state=1)
multi_target_linear = MultiOutputClassifier(sgd_linear_clf)
assert_raises_regex(ValueError, "classes must be passed on the first call "
"to partial_fit.",
multi_target_linear.partial_fit, X, y)
def test_multi_output_classification():
# test if multi_target initializes correctly with base estimator and fit
# assert predictions work as expected for predict, prodict_proba and score
forest = RandomForestClassifier(n_estimators=10, random_state=1)
multi_target_forest = MultiOutputClassifier(forest)
# train the multi_target_forest and also get the predictions.
multi_target_forest.fit(X, y)
predictions = multi_target_forest.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
predict_proba = multi_target_forest.predict_proba(X)
assert len(predict_proba) == n_outputs
for class_probabilities in predict_proba:
assert_equal((n_samples, n_classes), class_probabilities.shape)
assert_array_equal(np.argmax(np.dstack(predict_proba), axis=1),
predictions)
# train the forest with each column and assert that predictions are equal
for i in range(3):
forest_ = clone(forest) # create a clone with the same state
forest_.fit(X, y[:, i])
assert_equal(list(forest_.predict(X)), list(predictions[:, i]))
assert_array_equal(list(forest_.predict_proba(X)),
list(predict_proba[i]))
def test_multiclass_multioutput_estimator():
# test to check meta of meta estimators
svc = LinearSVC(random_state=0)
multi_class_svc = OneVsRestClassifier(svc)
multi_target_svc = MultiOutputClassifier(multi_class_svc)
multi_target_svc.fit(X, y)
predictions = multi_target_svc.predict(X)
assert_equal((n_samples, n_outputs), predictions.shape)
# train the forest with each column and assert that predictions are equal
for i in range(3):
multi_class_svc_ = clone(multi_class_svc) # create a clone
multi_class_svc_.fit(X, y[:, i])
assert_equal(list(multi_class_svc_.predict(X)),
list(predictions[:, i]))
def test_multiclass_multioutput_estimator_predict_proba():
seed = 542
# make test deterministic
rng = np.random.RandomState(seed)
# random features
X = rng.normal(size=(5, 5))
# random labels
y1 = np.array(['b', 'a', 'a', 'b', 'a']).reshape(5, 1) # 2 classes
y2 = np.array(['d', 'e', 'f', 'e', 'd']).reshape(5, 1) # 3 classes
Y = np.concatenate([y1, y2], axis=1)
clf = MultiOutputClassifier(LogisticRegression(random_state=seed))
clf.fit(X, Y)
y_result = clf.predict_proba(X)
y_actual = [np.array([[0.23481764, 0.76518236],
[0.67196072, 0.32803928],
[0.54681448, 0.45318552],
[0.34883923, 0.65116077],
[0.73687069, 0.26312931]]),
np.array([[0.5171785, 0.23878628, 0.24403522],
[0.22141451, 0.64102704, 0.13755846],
[0.16751315, 0.18256843, 0.64991843],
[0.27357372, 0.55201592, 0.17441036],
[0.65745193, 0.26062899, 0.08191907]])]
for i in range(len(y_actual)):
assert_almost_equal(y_result[i], y_actual[i])
def test_multi_output_classification_sample_weights():
# weighted classifier
Xw = [[1, 2, 3], [4, 5, 6]]
yw = [[3, 2], [2, 3]]
w = np.asarray([2., 1.])
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf_w = MultiOutputClassifier(forest)
clf_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6]]
y = [[3, 2], [3, 2], [2, 3]]
forest = RandomForestClassifier(n_estimators=10, random_state=1)
clf = MultiOutputClassifier(forest)
clf.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(clf.predict(X_test), clf_w.predict(X_test))
def test_multi_output_classification_partial_fit_sample_weights():
# weighted classifier
Xw = [[1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]]
yw = [[3, 2], [2, 3], [3, 2]]
w = np.asarray([2., 1., 1.])
sgd_linear_clf = SGDClassifier(random_state=1)
clf_w = MultiOutputClassifier(sgd_linear_clf)
clf_w.fit(Xw, yw, w)
# unweighted, but with repeated samples
X = [[1, 2, 3], [1, 2, 3], [4, 5, 6], [1.5, 2.5, 3.5]]
y = [[3, 2], [3, 2], [2, 3], [3, 2]]
sgd_linear_clf = SGDClassifier(random_state=1)
clf = MultiOutputClassifier(sgd_linear_clf)
clf.fit(X, y)
X_test = [[1.5, 2.5, 3.5]]
assert_array_almost_equal(clf.predict(X_test), clf_w.predict(X_test))
def test_multi_output_exceptions():
# NotFittedError when fit is not done but score, predict and
# and predict_proba are called
moc = MultiOutputClassifier(LinearSVC(random_state=0))
assert_raises(NotFittedError, moc.predict, y)
assert_raises(NotFittedError, moc.predict_proba, y)
assert_raises(NotFittedError, moc.score, X, y)
# ValueError when number of outputs is different
# for fit and score
y_new = np.column_stack((y1, y2))
moc.fit(X, y)
assert_raises(ValueError, moc.score, X, y_new)
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/datasets/tests/test_samples_generator.py | 181 | 15664 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| mit |
carrillo/scikit-learn | sklearn/datasets/svmlight_format.py | 79 | 15976 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_labels]
Target values. Class labels must be an integer or float, or array-like
objects of integer or float for multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
wanglei828/apollo | modules/tools/plot_planning/angular_velocity.py | 1 | 4080 | #!/usr/bin/env python
###############################################################################
# Copyright 2019 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import math
from record_reader import RecordItemReader
class AngularVelocity:
def __init__(self):
self.timestamp_list = []
self.angular_velocity_list = []
self.corrected_angular_velocity_list = []
self.last_corrected_angular_velocity = None
self.last_timestamp = None
def add(self, location_est):
timestamp_sec = location_est.header.timestamp_sec
angular_velocity = location_est.pose.angular_velocity.z
if self.last_corrected_angular_velocity is not None:
corrected = self.correct_angular_velocity(
angular_velocity, timestamp_sec)
else:
corrected = angular_velocity
self.timestamp_list.append(timestamp_sec)
self.angular_velocity_list.append(angular_velocity)
self.corrected_angular_velocity_list.append(corrected)
self.last_corrected_angular_velocity = corrected
self.last_timestamp = timestamp_sec
def correct_angular_velocity(self, angular_velocity, timestamp_sec):
if self.last_corrected_angular_velocity is None:
return angular_velocity
delta = abs(angular_velocity - self.last_corrected_angular_velocity)\
/ abs(self.last_corrected_angular_velocity)
if delta > 0.25:
corrected = angular_velocity / 2.0
return corrected
else:
return angular_velocity
def get_anglular_velocity_list(self):
return self.angular_velocity_list
def get_corrected_anglular_velocity_list(self):
return self.corrected_angular_velocity_list
def get_timestamp_list(self):
return self.timestamp_list
def get_latest_angular_velocity(self):
if len(self.angular_velocity_list) == 0:
return None
else:
return self.angular_velocity_list[-1]
def get_latest_corrected_angular_velocity(self):
if len(self.corrected_angular_velocity_list) == 0:
return None
else:
return self.corrected_angular_velocity_list[-1]
def get_latest_timestamp(self):
if len(self.timestamp_list) == 0:
return None
else:
return self.timestamp_list[-1]
if __name__ == "__main__":
import sys
import matplotlib.pyplot as plt
from os import listdir
from os.path import isfile, join
folders = sys.argv[1:]
fig, ax = plt.subplots()
colors = ["g", "b", "r", "m", "y"]
markers = ["o", "o", "o", "o"]
for i in range(len(folders)):
folder = folders[i]
color = colors[i % len(colors)]
marker = markers[i % len(markers)]
fns = [f for f in listdir(folder) if isfile(join(folder, f))]
for fn in fns:
reader = RecordItemReader(folder+"/"+fn)
processor = AngularVelocity()
for data in reader.read(["/apollo/localization/pose"]):
processor.add(data["pose"])
data_x = processor.get_timestamp_list()
data_y = processor.get_corrected_anglular_velocity_list()
ax.scatter(data_x, data_y, c=color, marker=marker, alpha=0.4)
data_y = processor.get_anglular_velocity_list()
ax.scatter(data_x, data_y, c='k', marker="+", alpha=0.8)
plt.show()
| apache-2.0 |
effigies/mne-python | examples/realtime/rt_feedback_server.py | 2 | 4956 | """
==============================================
Real-time feedback for decoding :: Server Side
==============================================
This example demonstrates how to setup a real-time feedback
mechanism using StimServer and StimClient.
The idea here is to display future stimuli for the class which
is predicted less accurately. This allows on-demand adaptation
of the stimuli depending on the needs of the classifier.
To run this example, open ipython in two separate terminals.
In the first, run rt_feedback_server.py and then wait for the
message
RtServer: Start
Once that appears, run rt_feedback_client.py in the other terminal
and the feedback script should start.
All brain responses are simulated from a fiff file to make it easy
to test. However, it should be possible to adapt this script
for a real experiment.
"""
print(__doc__)
# Author: Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
import time
import mne
import numpy as np
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne.realtime import StimServer
from mne.realtime import MockRtClient
from mne.decoding import ConcatenateChannels, FilterEstimator
from sklearn import preprocessing
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# Load fiff file to simulate data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.Raw(raw_fname, preload=True)
# Instantiating stimulation server
# The with statement is necessary to ensure a clean exit
with StimServer('localhost', port=4218) as stim_server:
# The channels to be used while decoding
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=True, exclude=raw.info['bads'])
rt_client = MockRtClient(raw)
# Constructing the pipeline for classification
filt = FilterEstimator(raw.info, 1, 40)
scaler = preprocessing.StandardScaler()
concatenator = ConcatenateChannels()
clf = SVC(C=1, kernel='linear')
concat_classifier = Pipeline([('filter', filt), ('concat', concatenator),
('scaler', scaler), ('svm', clf)])
stim_server.start(verbose=True)
# Just some initially decided events to be simulated
# Rest will decided on the fly
ev_list = [4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4]
score_c1, score_c2, score_x = [], [], []
for ii in range(50):
# Tell the stim_client about the next stimuli
stim_server.add_trigger(ev_list[ii])
# Collecting data
if ii == 0:
X = rt_client.get_event_data(event_id=ev_list[ii], tmin=-0.2,
tmax=0.5, picks=picks,
stim_channel='STI 014')[None, ...]
y = ev_list[ii]
else:
X_temp = rt_client.get_event_data(event_id=ev_list[ii], tmin=-0.2,
tmax=0.5, picks=picks,
stim_channel='STI 014')
X_temp = X_temp[np.newaxis, ...]
X = np.concatenate((X, X_temp), axis=0)
time.sleep(1) # simulating the isi
y = np.append(y, ev_list[ii])
# Start decoding after collecting sufficient data
if ii >= 10:
# Now start doing rtfeedback
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2,
random_state=7)
y_pred = concat_classifier.fit(X_train, y_train).predict(X_test)
cm = confusion_matrix(y_test, y_pred)
score_c1.append(float(cm[0, 0]) / sum(cm, 1)[0] * 100)
score_c2.append(float(cm[1, 1]) / sum(cm, 1)[1] * 100)
# do something if one class is decoded better than the other
if score_c1[-1] < score_c2[-1]:
print("We decoded class RV better than class LV")
ev_list.append(3) # adding more LV to future simulated data
else:
print("We decoded class LV better than class RV")
ev_list.append(4) # adding more RV to future simulated data
# Clear the figure
plt.clf()
# The x-axis for the plot
score_x.append(ii)
# Now plot the accuracy
plt.plot(score_x[-5:], score_c1[-5:])
plt.hold(True)
plt.plot(score_x[-5:], score_c2[-5:])
plt.xlabel('Trials')
plt.ylabel('Classification score (% correct)')
plt.title('Real-time feedback')
plt.ylim([0, 100])
plt.xticks(score_x[-5:])
plt.legend(('LV', 'RV'), loc='upper left')
plt.show()
| bsd-3-clause |
airanmehr/bio | Scripts/TimeSeriesPaper/Plot/ThetaH.py | 1 | 2742 | '''
Copyleft Dec 13, 2015 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: [email protected]
'''
import Utils.Estimate as e
from Utils import Simulation
reload(e)
import numpy as np; np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd; pd.options.display.max_rows=50;pd.options.display.expand_frame_repr=False
import pylab as plt;
import os; home=os.path.expanduser('~') +'/'
plt.figure(figsize=(12,10))
s=0.0
sim = Simulation.Simulation.load(s=s, theta=200, L=50000, experimentID=0)
y=pd.DataFrame(np.array([[Xrt.dot(Xrt) for Xrt in Xt.T] for Xt in sim.X0t]),index=sim.getGenerationTimes())
n=200;bins=200;k=bins;alpha=(n-1.)/(2*n)
a=e.Estimate.getEstimate(sim.X0t, n=n, method='faywu',bins=bins);a.index=sim.getGenerationTimes()
y.mean(1).plot(ax=plt.gca(),label=r'||x||2');((alpha)*a).mean(1).plot(ax=plt.gca())
n=200;bins=10;k=bins;alpha=(n-1.)/(2*n)
a=e.Estimate.getEstimate(sim.X0t, n=n, method='faywu',bins=bins);a.index=sim.getGenerationTimes();((alpha)*a).mean(1).plot(ax=plt.gca());
sim.getAverageHAF().mean(1).plot(ax=plt.gca())
plt.grid();plt.legend([r'$\|x\|^2$',r'$\theta_H$ (using 100-bin-histogram)',r'$\theta_H$ (using 10-bin-histogram)',r'$\frac{1}{n}$AverageHAF'],loc='best')
plt.xlabel('Generations')
plt.title('s={}'.format(s))
# plt.savefig(Simulation.paperFiguresPath+'thetaHs{:02.0f}.png'.format(s*100))
plt.show()
print e.Estimate.getAllEstimates(sim.H0.values)
bins=10
print 'bins={} watterson={} pi={} td={} faywu={}'.format(bins,e.Estimate.getEstimate(sim.X0, n=n, method='watterson',bins=bins),e.Estimate.getEstimate(sim.X0, n=n, method='pi',bins=bins),e.Estimate.getEstimate(sim.X0, n=n, method='tajimaD',bins=bins), e.Estimate.getEstimate(sim.X0, n=n, method='faywu',bins=bins))
bins=20
print 'bins={} watterson={} pi={} td={} faywu={}'.format(bins,e.Estimate.getEstimate(sim.X0, n=n, method='watterson',bins=bins),e.Estimate.getEstimate(sim.X0, n=n, method='pi',bins=bins),e.Estimate.getEstimate(sim.X0, n=n, method='tajimaD',bins=bins), e.Estimate.getEstimate(sim.X0, n=n, method='faywu',bins=bins))
bins=-1
print 'bins={} watterson={} pi={} td={} faywu={}'.format(bins,e.Estimate.getEstimate(sim.X0, n=n, method='watterson',bins=bins),e.Estimate.getEstimate(sim.X0, n=n, method='pi',bins=bins),e.Estimate.getEstimate(sim.X0, n=n, method='tajimaD',bins=bins), e.Estimate.getEstimate(sim.X0, n=n, method='faywu',bins=bins))
bins=200
print 'bins={} watterson={} pi={} td={} faywu={}'.format(bins,e.Estimate.getEstimate(sim.X0, n=n, method='watterson',bins=bins),e.Estimate.getEstimate(sim.X0, n=n, method='pi',bins=bins),e.Estimate.getEstimate(sim.X0, n=n, method='tajimaD',bins=bins), e.Estimate.getEstimate(sim.X0, n=n, method='faywu',bins=bins)) | mit |
mtp401/airflow | setup.py | 1 | 5424 | from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
import os
import sys
# Kept manually in sync with airflow.__version__
version = '1.7.0'
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
async = [
'greenlet>=0.4.9',
'eventlet>= 0.9.7',
'gevent>=0.13'
]
celery = [
'celery>=3.1.17',
'flower>=0.7.3'
]
crypto = ['cryptography>=0.9.3']
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
docker = ['docker-py>=1.6.0']
druid = ['pydruid>=0.2.1']
gcloud = [
'gcloud>=0.11.0',
]
gcp_api = [
'httplib2',
'google-api-python-client<=1.4.2',
'oauth2client>=1.5.2, <2.0.0',
'PyOpenSSL',
]
hdfs = ['snakebite>=2.7.8']
webhdfs = ['hdfs[dataframe,avro,kerberos]>=2.0.4']
hive = [
'hive-thrift-py>=0.0.1',
'pyhive>=0.1.3',
'impyla>=0.13.3',
'unicodecsv>=0.14.1'
]
jdbc = ['jaydebeapi>=0.2.0']
mssql = ['pymssql>=2.1.1', 'unicodecsv>=0.14.1']
mysql = ['mysqlclient>=1.3.6']
rabbitmq = ['librabbitmq>=1.6.1']
oracle = ['cx_Oracle>=5.1.2']
postgres = ['psycopg2>=2.6']
s3 = [
'boto>=2.36.0',
'filechunkio>=1.6',
]
samba = ['pysmbclient>=0.1.3']
slack = ['slackclient>=1.0.0']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
ldap = ['ldap3>=0.9.9.1']
kerberos = ['pykerberos>=1.1.8',
'thrift_sasl>=0.2.0',
'snakebite[kerberos]>=2.7.8']
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
github_enterprise = ['Flask-OAuthlib>=0.9.1']
qds = ['qds-sdk>=1.9.0']
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica
devel = ['lxml>=3.3.4', 'nose', 'nose-parameterized', 'mock']
devel_minreq = devel + mysql + doc + password + s3
devel_hadoop = devel_minreq + hive + hdfs + webhdfs + kerberos
devel_all = devel + all_dbs + doc + samba + s3 + slack + crypto + oracle + docker
setup(
name='airflow',
description='Programmatically author, schedule and monitor data pipelines',
license='Apache License 2.0',
version=version,
packages=find_packages(),
package_data={'': ['airflow/alembic.ini']},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.8.3, <0.9',
'babel>=1.3, <2.0',
'chartkick>=0.4.2, < 0.5',
'croniter>=0.3.8, <0.4',
'dill>=0.2.2, <0.3',
'python-daemon>=2.1.1, <2.2',
'flask>=0.10.1, <0.11',
'flask-admin>=1.4.0, <2.0.0',
'flask-cache>=0.13.1, <0.14',
'flask-login==0.2.11',
'future>=0.15.0, <0.16',
'funcsigs>=0.4, <1',
'gunicorn>=19.3.0, <19.4.0', # 19.4.? seemed to have issues
'jinja2>=2.7.3, <3.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.15.2, <1.0.0',
'pygments>=2.0.1, <3.0',
'python-dateutil>=2.3, <3',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=0.9.8',
'thrift>=0.9.2, <0.10',
'Flask-WTF==0.12'
],
extras_require={
'all': devel_all,
'all_dbs': all_dbs,
'async': async,
'celery': celery,
'crypto': crypto,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'druid': druid,
'gcloud': gcloud,
'gcp_api': gcp_api,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'postgres': postgres,
'rabbitmq': rabbitmq,
's3': s3,
'samba': samba,
'slack': slack,
'statsd': statsd,
'vertica': vertica,
'ldap': ldap,
'webhdfs': webhdfs,
'kerberos': kerberos,
'password': password,
'github_enterprise': github_enterprise,
'qds': qds
},
classifiers={
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Monitoring',
},
author='Maxime Beauchemin',
author_email='[email protected]',
url='https://github.com/airbnb/airflow',
download_url=(
'https://github.com/airbnb/airflow/tarball/' + version),
cmdclass={'test': Tox,
'extra_clean': CleanCommand,
},
)
| apache-2.0 |
freedomflyer/test | examples/delay.py | 1 | 2489 | import sys
import matplotlib
sys.path.append('..')
from src.sim import Sim
from src import node
from src import link
from src import packet
from networks.network import Network
import random
class Generator(object):
def __init__(self,node,destination,load,duration):
self.node = node
self.load = load
self.duration = duration
self.start = 0
self.ident = 1
self.destination = destination
def handle(self,event):
# quit if done
now = Sim.scheduler.current_time()
if (now - self.start) > self.duration:
return
# generate a packet
self.ident += 1
p = packet.Packet(destination_address=self.destination,ident=self.ident,protocol='delay',length=1000)
Sim.scheduler.add(delay=0, event=p, handler=self.node.send_packet)
# schedule the next time we should generate a packet
Sim.scheduler.add(delay=random.expovariate(self.load), event='generate', handler=self.handle)
class DelayHandler(object):
def __init__(self):
self.iteration = 0
print "It\tCurrent Time\tPacket Ident\tCreated At\tElapsed Time\tTransm Delay\tProp Delay\tQueue Delay"
def receive_packet(self, packet):
self.iteration += 1
print "%d\t%f\t%f\t%f\t%f\t%f\t%f\t%f" % \
(self.iteration, Sim.scheduler.current_time(), packet.ident, packet.created, \
(Sim.scheduler.current_time() - packet.created), packet.transmission_delay, \
packet.propagation_delay, packet.queueing_delay)
if __name__ == '__main__':
# parameters
Sim.scheduler.reset()
# setup network
net = Network('../networks/one-hop.txt')
# setup routes
n1 = net.get_node('n1')
n2 = net.get_node('n2')
n1.add_forwarding_entry(address=n2.get_address('n1'),link=n1.links[0])
n2.add_forwarding_entry(address=n1.get_address('n2'),link=n2.links[0])
# setup app
d = DelayHandler()
net.nodes['n2'].add_protocol(protocol="delay",handler=d)
# setup packet generator
destination = n2.get_address('n1')
max_rate = 1000000/(1000*8)
load = .8 *max_rate
g = Generator(node=n1,destination=destination,load=load,duration=10)
Sim.scheduler.add(delay=0, event='generate', handler=g.handle)
# run the simulation
Sim.scheduler.run()
#141 9.989235 142.000000 9.980235 0.009000 0.008000 0.001000 0.000000
#962 10.009724 963.000000 9.995443 0.014281 0.008000 0.001000 0.005281 | gpl-2.0 |
cycomachead/info290 | lab3/q7.py | 1 | 2502 | from __future__ import print_function
from sklearn import cluster, metrics
from numpy import recfromcsv
import numpy as np
#from file_utils import reviewers
import csv
### utility functions
def na_rm(data):
data = data[~np.isnan(data).any(axis=1)]
return data[~np.isinf(data).any(axis=1)]
def returnNaNs(data):
return [i for i in data if np.isnan(i)]
D = recfromcsv("../yelp_reviewers.txt", delimiter='|')
D7 = np.array(D[['q8', 'q9', 'q10', 'q11', 'q12', 'q13',
'q18_group2', 'q18_group3', 'q18_group5', 'q18_group6',
'q18_group7', 'q18_group11', 'q18_group13', 'q18_group14',
'q18_group15', 'q18_group16_a', 'q18_group16_b',
'q18_group16_c', 'q18_group16_d', 'q18_group16_e',
'q18_group16_f', 'q18_group16_g', 'q18_group16_h']].tolist())
def get_clustering(n, data):
clusterer = cluster.KMeans(n_clusters = n)
clustering = clusterer.fit(data)
return clustering
def pctNaN(col):
return len(returnNaNs(col))/len(col)
def preprocess(data):
i = 0
realCol = 0
while i < data.shape[1]:
row = data[:, i]
pct = pctNaN(row)
if pct > 0.50:
# The last 1 specifies to delete a column not a row
data = np.delete(data, i, 1)
else:
i += 1
realCol += 1
return na_rm(data)
def question7b(data):
with open('q7b.feature', 'w+') as f:
file_writer = csv.writer(f)
file_writer.writerow(['num_clusters', 'sum_win_var_clust'])
for i in range(2, 9):
try:
clustering = get_clustering(i, data)
file_writer.writerow([i, clustering.inertia_])
except Exception as e:
print(str(i) + " clusters had a problem:")
print(e)
def question7a(data):
with open('q7a.feature', 'w+') as f:
file_writer = csv.writer(f)
file_writer.writerow(['num_clusters', 'silhouette_coeff'])
for i in range(2, 9):
try:
clustering = get_clustering(i, data)
cluster_fits[i] = clustering
m = metrics.silhouette_score(data, clustering.labels_, metric='euclidean', sample_size = 10000)
silhouettes[i] = m
file_writer.writerow([i, m])
except Exception as e:
print(str(i) + " clusters had a problem:")
print(e)
D7 = preprocess(D7)
question7a(D7)
question7b(D7)
| bsd-2-clause |
NINAnor/QGIS | python/plugins/processing/algs/qgis/QGISAlgorithmProvider.py | 1 | 10160 | # -*- coding: utf-8 -*-
"""
***************************************************************************
QGISAlgorithmProvider.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
try:
import matplotlib.pyplot
hasMatplotlib = True
except:
hasMatplotlib = False
from PyQt4.QtGui import QIcon
from processing.core.AlgorithmProvider import AlgorithmProvider
from processing.script.ScriptUtils import ScriptUtils
from RegularPoints import RegularPoints
from SymmetricalDifference import SymmetricalDifference
from VectorSplit import VectorSplit
from VectorGrid import VectorGrid
from RandomExtract import RandomExtract
from RandomExtractWithinSubsets import RandomExtractWithinSubsets
from ExtractByLocation import ExtractByLocation
from PointsInPolygon import PointsInPolygon
from PointsInPolygonUnique import PointsInPolygonUnique
from PointsInPolygonWeighted import PointsInPolygonWeighted
from SumLines import SumLines
from BasicStatisticsNumbers import BasicStatisticsNumbers
from BasicStatisticsStrings import BasicStatisticsStrings
from NearestNeighbourAnalysis import NearestNeighbourAnalysis
from LinesIntersection import LinesIntersection
from MeanCoords import MeanCoords
from PointDistance import PointDistance
from UniqueValues import UniqueValues
from ReprojectLayer import ReprojectLayer
from ExportGeometryInfo import ExportGeometryInfo
from Centroids import Centroids
from Delaunay import Delaunay
from VoronoiPolygons import VoronoiPolygons
from DensifyGeometries import DensifyGeometries
from MultipartToSingleparts import MultipartToSingleparts
from SimplifyGeometries import SimplifyGeometries
from LinesToPolygons import LinesToPolygons
from PolygonsToLines import PolygonsToLines
from SinglePartsToMultiparts import SinglePartsToMultiparts
from ExtractNodes import ExtractNodes
from ConvexHull import ConvexHull
from FixedDistanceBuffer import FixedDistanceBuffer
from VariableDistanceBuffer import VariableDistanceBuffer
from Clip import Clip
from Difference import Difference
from Dissolve import Dissolve
from Intersection import Intersection
from ExtentFromLayer import ExtentFromLayer
from RandomSelection import RandomSelection
from RandomSelectionWithinSubsets import RandomSelectionWithinSubsets
from SelectByLocation import SelectByLocation
from Union import Union
from DensifyGeometriesInterval import DensifyGeometriesInterval
from Eliminate import Eliminate
from SpatialJoin import SpatialJoin
from DeleteColumn import DeleteColumn
from DeleteHoles import DeleteHoles
from DeleteDuplicateGeometries import DeleteDuplicateGeometries
from TextToFloat import TextToFloat
from ExtractByAttribute import ExtractByAttribute
from SelectByAttribute import SelectByAttribute
from Grid import Grid
from Gridify import Gridify
from HubDistance import HubDistance
from HubLines import HubLines
from Merge import Merge
from GeometryConvert import GeometryConvert
from ConcaveHull import ConcaveHull
from Polygonize import Polygonize
from RasterLayerStatistics import RasterLayerStatistics
from StatisticsByCategories import StatisticsByCategories
from EquivalentNumField import EquivalentNumField
from AddTableField import AddTableField
from FieldsCalculator import FieldsCalculator
from SaveSelectedFeatures import SaveSelectedFeatures
from Explode import Explode
from AutoincrementalField import AutoincrementalField
from FieldPyculator import FieldsPyculator
from JoinAttributes import JoinAttributes
from CreateConstantRaster import CreateConstantRaster
from PointsLayerFromTable import PointsLayerFromTable
from PointsDisplacement import PointsDisplacement
from ZonalStatistics import ZonalStatistics
from PointsFromPolygons import PointsFromPolygons
from PointsFromLines import PointsFromLines
from RandomPointsExtent import RandomPointsExtent
from RandomPointsLayer import RandomPointsLayer
from RandomPointsPolygonsFixed import RandomPointsPolygonsFixed
from RandomPointsPolygonsVariable import RandomPointsPolygonsVariable
from RandomPointsAlongLines import RandomPointsAlongLines
from PointsToPaths import PointsToPaths
from PostGISExecuteSQL import PostGISExecuteSQL
from ImportIntoPostGIS import ImportIntoPostGIS
from SetVectorStyle import SetVectorStyle
from SetRasterStyle import SetRasterStyle
from SelectByExpression import SelectByExpression
from SelectByAttributeSum import SelectByAttributeSum
from HypsometricCurves import HypsometricCurves
from SplitLinesWithLines import SplitLinesWithLines
from FieldsMapper import FieldsMapper
from Datasources2Vrt import Datasources2Vrt
from CheckValidity import CheckValidity
from OrientedMinimumBoundingBox import OrientedMinimumBoundingBox
from Smooth import Smooth
from ReverseLineDirection import ReverseLineDirection
from ExecuteSQL import ExecuteSQL
pluginPath = os.path.normpath(os.path.join(
os.path.split(os.path.dirname(__file__))[0], os.pardir))
class QGISAlgorithmProvider(AlgorithmProvider):
def __init__(self):
AlgorithmProvider.__init__(self)
self._icon = QIcon(os.path.join(pluginPath, 'images', 'qgis.png'))
self.alglist = [SumLines(), PointsInPolygon(),
PointsInPolygonWeighted(), PointsInPolygonUnique(),
BasicStatisticsStrings(), BasicStatisticsNumbers(),
NearestNeighbourAnalysis(), MeanCoords(),
LinesIntersection(), UniqueValues(), PointDistance(),
ReprojectLayer(), ExportGeometryInfo(), Centroids(),
Delaunay(), VoronoiPolygons(), SimplifyGeometries(),
DensifyGeometries(), DensifyGeometriesInterval(),
MultipartToSingleparts(), SinglePartsToMultiparts(),
PolygonsToLines(), LinesToPolygons(), ExtractNodes(),
Eliminate(), ConvexHull(), FixedDistanceBuffer(),
VariableDistanceBuffer(), Dissolve(), Difference(),
Intersection(), Union(), Clip(), ExtentFromLayer(),
RandomSelection(), RandomSelectionWithinSubsets(),
SelectByLocation(), RandomExtract(), DeleteHoles(),
RandomExtractWithinSubsets(), ExtractByLocation(),
SpatialJoin(), RegularPoints(), SymmetricalDifference(),
VectorSplit(), VectorGrid(), DeleteColumn(),
DeleteDuplicateGeometries(), TextToFloat(),
ExtractByAttribute(), SelectByAttribute(), Grid(),
Gridify(), HubDistance(), HubLines(), Merge(),
GeometryConvert(), AddTableField(), FieldsCalculator(),
SaveSelectedFeatures(), JoinAttributes(),
AutoincrementalField(), Explode(), FieldsPyculator(),
EquivalentNumField(), PointsLayerFromTable(),
StatisticsByCategories(), ConcaveHull(), Polygonize(),
RasterLayerStatistics(), PointsDisplacement(),
ZonalStatistics(), PointsFromPolygons(),
PointsFromLines(), RandomPointsExtent(),
RandomPointsLayer(), RandomPointsPolygonsFixed(),
RandomPointsPolygonsVariable(),
RandomPointsAlongLines(), PointsToPaths(),
PostGISExecuteSQL(), ImportIntoPostGIS(),
SetVectorStyle(), SetRasterStyle(),
SelectByExpression(), HypsometricCurves(),
SplitLinesWithLines(), CreateConstantRaster(),
FieldsMapper(), SelectByAttributeSum(), Datasources2Vrt(),
CheckValidity(), OrientedMinimumBoundingBox(), Smooth(),
ReverseLineDirection(), ExecuteSQL()
]
if hasMatplotlib:
from VectorLayerHistogram import VectorLayerHistogram
from RasterLayerHistogram import RasterLayerHistogram
from VectorLayerScatterplot import VectorLayerScatterplot
from MeanAndStdDevPlot import MeanAndStdDevPlot
from BarPlot import BarPlot
from PolarPlot import PolarPlot
self.alglist.extend([
VectorLayerHistogram(), RasterLayerHistogram(),
VectorLayerScatterplot(), MeanAndStdDevPlot(), BarPlot(),
PolarPlot(),
])
folder = os.path.join(os.path.dirname(__file__), 'scripts')
scripts = ScriptUtils.loadFromFolder(folder)
for script in scripts:
script.allowEdit = False
self.alglist.extend(scripts)
for alg in self.alglist:
alg._icon = self._icon
def initializeSettings(self):
AlgorithmProvider.initializeSettings(self)
def unload(self):
AlgorithmProvider.unload(self)
def getName(self):
return 'qgis'
def getDescription(self):
return self.tr('QGIS geoalgorithms')
def getIcon(self):
return self._icon
def _loadAlgorithms(self):
self.algs = self.alglist
def supportsNonFileBasedOutput(self):
return True
| gpl-2.0 |
etkirsch/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
ycasg/PyNLO | src/validation/Old and Partial Tests/Dudley_SSFM.py | 2 | 2731 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 15 15:39:12 2014
This file is part of pyNLO.
pyNLO is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyNLO is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pyNLO. If not, see <http://www.gnu.org/licenses/>.
@author: dim1
"""
import numpy as np
import matplotlib.pyplot as plt
from pynlo.interactions.FourWaveMixing import SSFM
from pynlo.media.fibers import fiber
from pynlo.light.DerivedPulses import SechPulse
#plt.close('all')
dz = 1e-3
steps = 100
range1 = np.arange(steps)
centerwl = 835.0
fiber_length = 0.04
pump_power = 1.0e4
pump_pulse_length = 28.4e-3
npoints = 2**13
init = SechPulse(pump_power, pump_pulse_length, centerwl, time_window = 10.0,
GDD = 0, TOD = 0.0, NPTS = npoints, frep_MHz = 100, power_is_avg = False)
fiber1 = fiber.FiberInstance()
fiber1.load_from_db( fiber_length, 'dudley')
evol = SSFM.SSFM(dz = 1e-6, local_error = 0.001, USE_SIMPLE_RAMAN = True)
y = np.zeros(steps)
AW = np.zeros((init.NPTS, steps))
AT = np.copy(AW)
y, AW, AT, pulse1 = evol.propagate(pulse_in = init, fiber = fiber1,
n_steps = steps)
wl = init.wl_nm
loWL = 400
hiWL = 1400
iis = np.logical_and(wl>loWL,wl<hiWL)
iisT = np.logical_and(init.T_ps>-1,init.T_ps<5)
xW = wl[iis]
xT = init.T_ps[iisT]
zW_in = np.transpose(AW)[:,iis]
zT_in = np.transpose(AT)[:,iisT]
zW = 10*np.log10(np.abs(zW_in)**2)
zT = 10*np.log10(np.abs(zT_in)**2)
mlIW = np.max(zW)
mlIT = np.max(zT)
D = fiber1.Beta2_to_D(init)
beta = fiber1.Beta2(init)
#
#plt.figure()
#plt.subplot(121)
#plt.plot(wl,D,'x')
#plt.xlim(400,1600)
#plt.ylim(-400,300)
#plt.xlabel('Wavelength (nm)')
#plt.ylabel('D (ps/nm/km)')
#plt.subplot(122)
#plt.plot(wl,beta*1000,'x')
#plt.xlim(400,1600)
#plt.ylim(-350,200)
#plt.xlabel('Wavelength (nm)')
#plt.ylabel(r'$\beta_2$ (ps$^2$/km)')
plt.figure()
plt.subplot(121)
plt.pcolormesh(xW, y, zW, vmin = mlIW - 40.0, vmax = mlIW)
plt.autoscale(tight=True)
plt.xlim([loWL, hiWL])
plt.xlabel('Wavelength (nm)')
plt.ylabel('Distance (m)')
plt.subplot(122)
plt.pcolormesh(xT, y, zT, vmin = mlIT - 40.0, vmax = mlIT)
plt.autoscale(tight=True)
plt.xlabel('Delay (ps)')
plt.ylabel('Distance (m)')
plt.show() | gpl-3.0 |
annayqho/TheCannon | code/lamost/mass_age/cn/run_full_test.py | 1 | 4784 | """
Run the test step on all the LAMOST DR2 objects.
You have to run this script on aida42082
"""
import numpy as np
import glob
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, '/home/annaho/TheCannon')
#sys.path.insert(0, '/home/annaho')
#from lamost import load_spectra
#import dataset
#import model
from TheCannon import dataset
from TheCannon import model
#from astropy.table import Table
from matplotlib.colors import LogNorm
from matplotlib import rc
rc('font', family='serif')
rc('text', usetex=True)
import os
from pull_data import find_colors, apply_mask
SPEC_DIR = "/home/annaho/TheCannon/code/apogee_lamost/xcalib_4labels"
COL_DIR = "/home/annaho/TheCannon/data/lamost"
MODEL_DIR = "."
def test_step_iteration(ds, m, starting_guess):
errs, chisq = m.infer_labels(ds, starting_guess)
return ds.test_label_vals, chisq, errs
def test_step(date):
wl = np.load("%s/wl_cols.npz" %MODEL_DIR)['arr_0']
test_ID = np.load("%s/output/%s_ids.npz" %(SPEC_DIR, date))['arr_0']
print(str(len(test_ID)) + " objects")
test_flux_temp = np.load("%s/output/%s_norm.npz" %(SPEC_DIR,date))['arr_0']
test_ivar_temp = np.load("%s/output/%s_norm.npz" %(SPEC_DIR,date))['arr_1']
# Mask
mask = np.load("mask.npz")['arr_0']
test_ivar_masked = apply_mask(wl[0:3626], test_ivar_temp, mask)
# Append colors
col = np.load(COL_DIR + "/" + date + "_col.npz")['arr_0']
col_ivar = np.load(COL_DIR + "/" + date + "_col_ivar.npz")['arr_0']
bad_flux = np.logical_or(np.isnan(col), col==np.inf)
col[bad_flux] = 1.0
col_ivar[bad_flux] = 0.0
bad_ivar = np.logical_or(np.isnan(col_ivar), col_ivar==np.inf)
col_ivar[bad_ivar] = 0.0
test_flux = np.hstack((test_flux_temp, col.T))
test_ivar = np.hstack((test_ivar_temp, col_ivar.T))
lamost_label = np.load("%s/output/%s_tr_label.npz" %(SPEC_DIR,date))['arr_0']
apogee_label = np.load("./ref_label.npz")['arr_0']
ds = dataset.Dataset(wl, test_ID, test_flux[0:2,:], test_ivar[0:2,:],
lamost_label, test_ID, test_flux, test_ivar)
#np.savez(COL_DIR + "/%s_test_flux.npz" %date, ds.test_flux)
#np.savez(COL_DIR + "/%s_test_ivar.npz" %date, ds.test_ivar)
np.savez(COL_DIR + "/%s_test_snr.npz" %date, ds.test_SNR)
np.savez(COL_DIR + "/%s_test_id.npz" %date, ds.test_ID)
ds.set_label_names(
['T_{eff}', '\log g', '[Fe/H]', '[C/M]', '[N/M]', '[\\alpha/Fe]', 'A_k'])
# m = model.CannonModel(2)
# m.coeffs = np.load("./coeffs.npz")['arr_0']
# m.scatters = np.load("./scatters.npz")['arr_0']
# m.chisqs = np.load("./chisqs.npz")['arr_0']
# m.pivots = np.load("./pivots.npz")['arr_0']
#
# nlabels = len(m.pivots)
# nobj = len(test_ID)
#
# nguesses = 7
# choose = np.random.randint(0,nobj,size=nguesses)
# print(apogee_label.shape)
# print(choose.shape)
# print(m.pivots.shape)
# starting_guesses = apogee_label[choose]-m.pivots
#
# labels = np.zeros((nguesses, nobj, nlabels))
# chisq = np.zeros((nguesses, nobj))
# errs = np.zeros(labels.shape)
#
# for ii,guess in enumerate(starting_guesses):
# a,b,c = test_step_iteration(ds,m,starting_guesses[ii])
# labels[ii,:] = a
# chisq[ii,:] = b
# errs[ii,:] = c
#
# np.savez(COL_DIR + "/%s_cannon_label_guesses.npz" %date, labels)
# np.savez(COL_DIR + "/%s_cannon_chisq_guesses.npz" %date, labels)
#
# choose = np.argmin(chisq, axis=0)
# best_chisq = np.min(chisq, axis=0)
# best_labels = np.zeros((nobj, nlabels))
# best_errs = np.zeros(best_labels.shape)
# for jj,val in enumerate(choose):
# best_labels[jj,:] = labels[:,jj,:][val]
# best_errs[jj,:] = errs[:,jj,:][val]
#
# np.savez(COL_DIR + "/%s_all_cannon_labels.npz" %date, best_labels)
# np.savez(COL_DIR + "/%s_cannon_label_chisq.npz" %date, best_chisq)
# np.savez(COL_DIR + "/%s_cannon_label_errs.npz" %date, best_errs)
#
# ds.test_label_vals = best_labels
# #ds.diagnostics_survey_labels(figname="%s_survey_labels_triangle.png" %date)
# ds.test_label_vals = best_labels[:,0:3]
# ds.set_label_names(['T_{eff}', '\log g', '[M/H]'])
# ds.diagnostics_1to1(figname = COL_DIR + "/%s_1to1_test_label" %date)
#
if __name__=="__main__":
dates = os.listdir("/home/share/LAMOST/DR2/DR2_release")
dates = np.array(dates)
dates = np.delete(dates, np.where(dates=='.directory')[0][0])
dates = np.delete(dates, np.where(dates=='all_folders.list')[0][0])
dates = np.delete(dates, np.where(dates=='dr2.lis')[0][0])
for date in dates:
print("running %s" %date)
#if glob.glob(COL_DIR + "/%s_all_cannon_labels.npz" %date):
# print("already done")
#else:
test_step(date)
| mit |
pkruskal/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
mjescobar/RF_Estimation | Clustering/helpers/clustering_mj/processAll.py | 2 | 2140 | #!/usr/bin/env python
import numpy as np
import os
import matplotlib.pyplot as plt
import scipy.io
import scipy
import sys
inmat = scipy.io.loadmat('temp_curves_150_50.mat')
tempCurves = inmat['tc']
tempCurvesSpl = inmat['tci']
idx = inmat['idx']
xc = inmat['xc']
print "Shape of tempCurves: ", np.shape(tempCurves)
print "Shape of tempCurvesSpl: ", np.shape(tempCurvesSpl)
print "Shape of idx: ", np.shape(idx)
ntime, ncells = np.shape(tempCurves)
ntimeSpl, ncells = np.shape(tempCurvesSpl)
print "nTime: ", ntime, " - nCells: ", ncells
nclusters = np.max(idx)
print "Number of clusters: ", nclusters
cluster_colors = ['blue', 'red', 'green', 'orange', 'black']
meanCurves = np.zeros( (nclusters,ntimeSpl) )
meanCount = np.zeros( (nclusters,1) )
# Computing mean values
for i in range(ncells):
if( idx[i] == 1 ):
meanCurves[0,:] += tempCurvesSpl[:,i]
meanCount[0] += 1
if( idx[i] == 2 ):
meanCurves[1,:] += tempCurvesSpl[:,i]
meanCount[1] += 1
if( idx[i] == 3 ):
meanCurves[2,:] += tempCurvesSpl[:,i]
meanCount[2] += 1
if( idx[i] == 4 ):
meanCurves[3,:] += tempCurvesSpl[:,i]
meanCount[3] += 1
if( idx[i] == 5 ):
meanCurves[4,:] += tempCurvesSpl[:,i]
meanCount[4] += 1
print meanCount[0], "-", cluster_colors[0]
print meanCount[1], "-", cluster_colors[1]
print meanCount[2], "-", cluster_colors[2]
print meanCount[3], "-", cluster_colors[3]
print meanCount[4], "-", cluster_colors[4]
for i in range(nclusters):
meanCurves[i,:] /= meanCount[i]
# Plotting figures
plt.figure()
for i in range(ncells):
plt.plot(tempCurves[:,i], cluster_colors[idx[i]-1], alpha=0.2)
plt.grid(True)
plt.figure()
for i in range(ncells):
plt.plot(xc, tempCurvesSpl[:,i], cluster_colors[idx[i]-1], linewidth=0.5, alpha=0.15)
for i in range(nclusters):
plt.plot(xc, meanCurves[i,:], cluster_colors[i], linewidth=4, label= "n = %d cells" % meanCount[i])
plt.grid(True)
plt.xlabel('Time before spike [ms]')
plt.ylim(-0.4,0.4)
plt.legend(loc=2)
plt.savefig('Clusters_50uM-150uM.pdf', format='pdf', bbox_inches='tight')
plt.show()
| gpl-2.0 |
dakoner/keras-molecules | sample_latent.py | 4 | 4038 | from __future__ import print_function
import argparse
import os, sys
import h5py
import numpy as np
from molecules.model import MoleculeVAE
from molecules.utils import one_hot_array, one_hot_index, from_one_hot_array, \
decode_smiles_from_indexes, load_dataset
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from pylab import figure, axes, scatter, title, show
from rdkit import Chem
from rdkit.Chem import Draw
from keras.models import Sequential, Model, load_model
LATENT_DIM = 292
PCA_COMPONENTS = 50
TSNE_LEARNING_RATE = 750.0
TSNE_ITERATIONS = 1000
TSNE_COMPONENTS = 2
TSNE_PERPLEXITY = 30.0
def get_arguments():
parser = argparse.ArgumentParser(description='Molecular autoencoder network')
parser.add_argument('data', type=str, help='HDF5 file to read input data from.')
parser.add_argument('model', type=str, help='Trained Keras model to use.')
parser.add_argument('--save_h5', type=str, help='Name of a file to write HDF5 output to.')
parser.add_argument('--latent_dim', type=int, metavar='N', default=LATENT_DIM,
help='Dimensionality of the latent representation.')
parser.add_argument('--tsne_lr', metavar='LR', type=float, default=TSNE_LEARNING_RATE,
help='Learning to use for t-SNE.')
parser.add_argument('--tsne_components', metavar='N', type=int, default=TSNE_COMPONENTS,
help='Number of components to use for t-SNE.')
parser.add_argument('--tsne_perplexity', metavar='P', type=float, default=TSNE_PERPLEXITY)
parser.add_argument('--tsne_iterations', metavar='N', type=int, default=TSNE_ITERATIONS)
parser.add_argument('--visualize', dest='visualize', action='store_true',
help='Fit manifold and render a visualization. If this flag is not used, the sampled data' +
' will simply be returned with no further processing.')
parser.add_argument('--skip-pca', dest='use_pca', action='store_false',
help='Skip PCA preprocessing of data to feed into t-SNE.')
parser.add_argument('--pca_components', metavar='N', type=int, default=PCA_COMPONENTS,
help='Number of components to use for PCA.')
parser.set_defaults(use_pca = True)
parser.set_defaults(visualize = False)
return parser.parse_args()
def visualize_latent_rep(args, model, x_latent):
print("pca_on=%r pca_comp=%d tsne_comp=%d tsne_perplexity=%f tsne_lr=%f" % (
args.use_pca,
args.pca_components,
args.tsne_components,
args.tsne_perplexity,
args.tsne_lr
))
if args.use_pca:
pca = PCA(n_components = args.pca_components)
x_latent = pca.fit_transform(x_latent)
figure(figsize=(6, 6))
scatter(x_latent[:, 0], x_latent[:, 1], marker='.')
show()
tsne = TSNE(n_components = args.tsne_components,
perplexity = args.tsne_perplexity,
learning_rate = args.tsne_lr,
n_iter = args.tsne_iterations,
verbose = 4)
x_latent_proj = tsne.fit_transform(x_latent)
del x_latent
figure(figsize=(6, 6))
scatter(x_latent_proj[:, 0], x_latent_proj[:, 1], marker='.')
show()
def main():
args = get_arguments()
model = MoleculeVAE()
data, data_test, charset = load_dataset(args.data)
if os.path.isfile(args.model):
model.load(charset, args.model, latent_rep_size = args.latent_dim)
else:
raise ValueError("Model file %s doesn't exist" % args.model)
x_latent = model.encoder.predict(data)
if not args.visualize:
if not args.save_h5:
np.savetxt(sys.stdout, x_latent, delimiter = '\t')
else:
h5f = h5py.File(args.save_h5, 'w')
h5f.create_dataset('charset', data = charset)
h5f.create_dataset('latent_vectors', data = x_latent)
h5f.close()
else:
visualize_latent_rep(args, model, x_latent)
if __name__ == '__main__':
main()
| mit |
oemof/examples | oemof_examples/oemof.solph/v0.4.x/storage_balanced_unbalanced/storage.py | 1 | 5395 | # -*- coding: utf-8 -*-
"""
General description
-------------------
Example that shows the parameter `balanced` of `GenericStorage`.
Installation requirements
-------------------------
This example requires the version v0.3.x of oemof. Install by:
pip install 'oemof.solph>=0.4,<0.5'
Optional to see the plots:
pip install matplotlib
Copyright / Licence Info
------------------------
This file is part of project oemof (github.com/oemof/oemof). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location oemof/tests/test_scripts/test_solph/
test_generic_offsettransformer/test_generic_offsettransformer.py
SPDX-License-Identifier: GPL-3.0-or-later
"""
__copyright__ = "oemof developer group"
__license__ = "GPLv3"
import os
import pandas as pd
from oemof import solph
try:
from matplotlib import pyplot as plt
except ImportError:
print("Install matplotlib to see the plots.")
plt = None
DATA = [
{"name": "unbalanced", "initial_storage_level": 0.2, "balanced": False},
{
"name": "unbalanced_None",
"initial_storage_level": None,
"balanced": False,
},
{"name": "balanced", "initial_storage_level": 0.2, "balanced": True},
{"name": "balanced_None", "initial_storage_level": None, "balanced": True},
]
PARAMETER = {"el_price": 10, "sh_price": 5, "nominal_storage_capacity": 7}
def storage_example():
# read time series
timeseries = pd.read_csv(os.path.join(os.getcwd(), "storage_data.csv"))
# create an energy system
idx = pd.date_range("1/1/2017", periods=len(timeseries), freq="H")
es = solph.EnergySystem(timeindex=idx)
for data_set in DATA:
name = data_set["name"]
# power bus
bel = solph.Bus(label="bel_{0}".format(name))
es.add(bel)
es.add(
solph.Source(
label="source_el_{0}".format(name),
outputs={
bel: solph.Flow(variable_costs=PARAMETER["el_price"])
},
)
)
es.add(
solph.Source(
label="pv_el_{0}".format(name),
outputs={
bel: solph.Flow(fix=timeseries["pv_el"], nominal_value=1)
},
)
)
es.add(
solph.Sink(
label="demand_el_{0}".format(name),
inputs={
bel: solph.Flow(
fix=timeseries["demand_el"], nominal_value=1
)
},
)
)
es.add(
solph.Sink(
label="shunt_el_{0}".format(name),
inputs={bel: solph.Flow(variable_costs=PARAMETER["sh_price"])},
)
)
# Electric Storage
es.add(
solph.components.GenericStorage(
label="storage_elec_{0}".format(name),
nominal_storage_capacity=PARAMETER["nominal_storage_capacity"],
inputs={bel: solph.Flow()},
outputs={bel: solph.Flow()},
initial_storage_level=data_set["initial_storage_level"],
balanced=data_set["balanced"],
)
)
# create an optimization problem and solve it
om = solph.Model(es)
# solve model
om.solve(solver="cbc")
# create result object
results = solph.processing.results(om)
flows = [x for x in results if x[1] is not None]
components = [x for x in results if x[1] is None]
storage_cap = pd.DataFrame()
costs = pd.Series(dtype=float)
balance = pd.Series(dtype=float)
for flow in [x for x in flows if "source_el" in x[0].label]:
name = "_".join(flow[0].label.split("_")[2:])
print(name, float(results[flow]["sequences"].sum()))
costs[name] = float(
results[flow]["sequences"].sum() * PARAMETER["el_price"]
)
for flow in [x for x in flows if "shunt_el" in x[1].label]:
name = "_".join(flow[1].label.split("_")[2:])
costs[name] += float(
results[flow]["sequences"].sum() * PARAMETER["sh_price"]
)
storages = [x[0] for x in components if "storage" in x[0].label]
idx = results[storages[0], None]["sequences"]["storage_content"].index
last = idx[-1]
prev = idx[0] - 1 * idx.freq
for s in storages:
name = s.label
storage_cap[name] = results[s, None]["sequences"]["storage_content"]
storage_cap.loc[prev, name] = results[s, None]["scalars"][
"init_content"
]
balance[name] = (
storage_cap.loc[last][name] - storage_cap.loc[prev][name]
)
if plt is not None:
storage_cap.plot(drawstyle="steps-mid", subplots=False, sharey=True)
storage_cap.plot(drawstyle="steps-mid", subplots=True, sharey=True)
costs.plot(kind="bar", ax=plt.subplots()[1], rot=0)
balance.index = [
"balanced",
"balanced_None",
"unbalanced",
"unbalanced_None",
]
balance.plot(
kind="bar",
linewidth=1,
edgecolor="#000000",
rot=0,
ax=plt.subplots()[1],
)
plt.show()
print(storage_cap)
print(costs)
print(balance)
if __name__ == "__main__":
storage_example()
| gpl-3.0 |
ndingwall/scikit-learn | sklearn/cluster/_optics.py | 6 | 37726 | # -*- coding: utf-8 -*-
"""Ordering Points To Identify the Clustering Structure (OPTICS)
These routines execute the OPTICS algorithm, and implement various
cluster extraction methods of the ordered list.
Authors: Shane Grigsby <[email protected]>
Adrin Jalali <[email protected]>
Erich Schubert <[email protected]>
Hanmin Qin <[email protected]>
License: BSD 3 clause
"""
import warnings
import numpy as np
from ..utils import gen_batches, get_chunk_n_rows
from ..utils.validation import _deprecate_positional_args
from ..neighbors import NearestNeighbors
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
class OPTICS(ClusterMixin, BaseEstimator):
"""Estimate clustering structure from vector array.
OPTICS (Ordering Points To Identify the Clustering Structure), closely
related to DBSCAN, finds core sample of high density and expands clusters
from them [1]_. Unlike DBSCAN, keeps cluster hierarchy for a variable
neighborhood radius. Better suited for usage on large datasets than the
current sklearn implementation of DBSCAN.
Clusters are then extracted using a DBSCAN-like method
(cluster_method = 'dbscan') or an automatic
technique proposed in [1]_ (cluster_method = 'xi').
This implementation deviates from the original OPTICS by first performing
k-nearest-neighborhood searches on all points to identify core sizes, then
computing only the distances to unprocessed points when constructing the
cluster order. Note that we do not employ a heap to manage the expansion
candidates, so the time complexity will be O(n^2).
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
min_samples : int > 1 or float between 0 and 1, default=5
The number of samples in a neighborhood for a point to be considered as
a core point. Also, up and down steep regions can't have more than
``min_samples`` consecutive non-steep points. Expressed as an absolute
number or a fraction of the number of samples (rounded to be at least
2).
max_eps : float, default=np.inf
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. Default value of ``np.inf`` will
identify clusters across all scales; reducing ``max_eps`` will result
in shorter run times.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string. If metric is
"precomputed", X is assumed to be a distance matrix and must be square.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
p : int, default=2
Parameter for the Minkowski metric from
:class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
cluster_method : str, default='xi'
The extraction method used to extract clusters using the calculated
reachability and ordering. Possible values are "xi" and "dbscan".
eps : float, default=None
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. By default it assumes the same value
as ``max_eps``.
Used only when ``cluster_method='dbscan'``.
xi : float between 0 and 1, default=0.05
Determines the minimum steepness on the reachability plot that
constitutes a cluster boundary. For example, an upwards point in the
reachability plot is defined by the ratio from one point to its
successor being at most 1-xi.
Used only when ``cluster_method='xi'``.
predecessor_correction : bool, default=True
Correct clusters according to the predecessors calculated by OPTICS
[2]_. This parameter has minimal effect on most datasets.
Used only when ``cluster_method='xi'``.
min_cluster_size : int > 1 or float between 0 and 1, default=None
Minimum number of samples in an OPTICS cluster, expressed as an
absolute number or a fraction of the number of samples (rounded to be
at least 2). If ``None``, the value of ``min_samples`` is used instead.
Used only when ``cluster_method='xi'``.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method. (default)
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
labels_ : ndarray of shape (n_samples,)
Cluster labels for each point in the dataset given to fit().
Noisy samples and points which are not included in a leaf cluster
of ``cluster_hierarchy_`` are labeled as -1.
reachability_ : ndarray of shape (n_samples,)
Reachability distances per sample, indexed by object order. Use
``clust.reachability_[clust.ordering_]`` to access in cluster order.
ordering_ : ndarray of shape (n_samples,)
The cluster ordered list of sample indices.
core_distances_ : ndarray of shape (n_samples,)
Distance at which each sample becomes a core point, indexed by object
order. Points which will never be core have a distance of inf. Use
``clust.core_distances_[clust.ordering_]`` to access in cluster order.
predecessor_ : ndarray of shape (n_samples,)
Point that a sample was reached from, indexed by object order.
Seed points have a predecessor of -1.
cluster_hierarchy_ : ndarray of shape (n_clusters, 2)
The list of clusters in the form of ``[start, end]`` in each row, with
all indices inclusive. The clusters are ordered according to
``(end, -start)`` (ascending) so that larger clusters encompassing
smaller clusters come after those smaller ones. Since ``labels_`` does
not reflect the hierarchy, usually
``len(cluster_hierarchy_) > np.unique(optics.labels_)``. Please also
note that these indices are of the ``ordering_``, i.e.
``X[ordering_][start:end + 1]`` form a cluster.
Only available when ``cluster_method='xi'``.
See Also
--------
DBSCAN : A similar clustering for a specified neighborhood radius (eps).
Our implementation is optimized for runtime.
References
----------
.. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
and Jörg Sander. "OPTICS: ordering points to identify the clustering
structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
.. [2] Schubert, Erich, Michael Gertz.
"Improving the Cluster Structure Extracted from OPTICS Plots." Proc. of
the Conference "Lernen, Wissen, Daten, Analysen" (LWDA) (2018): 318-329.
Examples
--------
>>> from sklearn.cluster import OPTICS
>>> import numpy as np
>>> X = np.array([[1, 2], [2, 5], [3, 6],
... [8, 7], [8, 8], [7, 3]])
>>> clustering = OPTICS(min_samples=2).fit(X)
>>> clustering.labels_
array([0, 0, 0, 1, 1, 1])
"""
@_deprecate_positional_args
def __init__(self, *, min_samples=5, max_eps=np.inf, metric='minkowski',
p=2, metric_params=None, cluster_method='xi', eps=None,
xi=0.05, predecessor_correction=True, min_cluster_size=None,
algorithm='auto', leaf_size=30, n_jobs=None):
self.max_eps = max_eps
self.min_samples = min_samples
self.min_cluster_size = min_cluster_size
self.algorithm = algorithm
self.metric = metric
self.metric_params = metric_params
self.p = p
self.leaf_size = leaf_size
self.cluster_method = cluster_method
self.eps = eps
self.xi = xi
self.predecessor_correction = predecessor_correction
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform OPTICS clustering.
Extracts an ordered list of points and reachability distances, and
performs initial clustering using ``max_eps`` distance specified at
OPTICS object instantiation.
Parameters
----------
X : ndarray of shape (n_samples, n_features), or \
(n_samples, n_samples) if metric=’precomputed’
A feature array, or array of distances between samples if
metric='precomputed'.
y : ignored
Ignored.
Returns
-------
self : instance of OPTICS
The instance.
"""
X = self._validate_data(X, dtype=float)
if self.cluster_method not in ['dbscan', 'xi']:
raise ValueError("cluster_method should be one of"
" 'dbscan' or 'xi' but is %s" %
self.cluster_method)
(self.ordering_, self.core_distances_, self.reachability_,
self.predecessor_) = compute_optics_graph(
X=X, min_samples=self.min_samples, algorithm=self.algorithm,
leaf_size=self.leaf_size, metric=self.metric,
metric_params=self.metric_params, p=self.p, n_jobs=self.n_jobs,
max_eps=self.max_eps)
# Extract clusters from the calculated orders and reachability
if self.cluster_method == 'xi':
labels_, clusters_ = cluster_optics_xi(
reachability=self.reachability_,
predecessor=self.predecessor_,
ordering=self.ordering_,
min_samples=self.min_samples,
min_cluster_size=self.min_cluster_size,
xi=self.xi,
predecessor_correction=self.predecessor_correction)
self.cluster_hierarchy_ = clusters_
elif self.cluster_method == 'dbscan':
if self.eps is None:
eps = self.max_eps
else:
eps = self.eps
if eps > self.max_eps:
raise ValueError('Specify an epsilon smaller than %s. Got %s.'
% (self.max_eps, eps))
labels_ = cluster_optics_dbscan(
reachability=self.reachability_,
core_distances=self.core_distances_,
ordering=self.ordering_, eps=eps)
self.labels_ = labels_
return self
def _validate_size(size, n_samples, param_name):
if size <= 0 or (size !=
int(size)
and size > 1):
raise ValueError('%s must be a positive integer '
'or a float between 0 and 1. Got %r' %
(param_name, size))
elif size > n_samples:
raise ValueError('%s must be no greater than the'
' number of samples (%d). Got %d' %
(param_name, n_samples, size))
# OPTICS helper functions
def _compute_core_distances_(X, neighbors, min_samples, working_memory):
"""Compute the k-th nearest neighbor of each sample
Equivalent to neighbors.kneighbors(X, self.min_samples)[0][:, -1]
but with more memory efficiency.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
neighbors : NearestNeighbors instance
The fitted nearest neighbors estimator.
working_memory : int, default=None
The sought maximum memory for temporary distance matrix chunks.
When None (default), the value of
``sklearn.get_config()['working_memory']`` is used.
Returns
-------
core_distances : ndarray of shape (n_samples,)
Distance at which each sample becomes a core point.
Points which will never be core have a distance of inf.
"""
n_samples = X.shape[0]
core_distances = np.empty(n_samples)
core_distances.fill(np.nan)
chunk_n_rows = get_chunk_n_rows(row_bytes=16 * min_samples,
max_n_rows=n_samples,
working_memory=working_memory)
slices = gen_batches(n_samples, chunk_n_rows)
for sl in slices:
core_distances[sl] = neighbors.kneighbors(
X[sl], min_samples)[0][:, -1]
return core_distances
@_deprecate_positional_args
def compute_optics_graph(X, *, min_samples, max_eps, metric, p, metric_params,
algorithm, leaf_size, n_jobs):
"""Computes the OPTICS reachability graph.
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features), or \
(n_samples, n_samples) if metric=’precomputed’.
A feature array, or array of distances between samples if
metric='precomputed'
min_samples : int > 1 or float between 0 and 1
The number of samples in a neighborhood for a point to be considered
as a core point. Expressed as an absolute number or a fraction of the
number of samples (rounded to be at least 2).
max_eps : float, default=np.inf
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. Default value of ``np.inf`` will
identify clusters across all scales; reducing ``max_eps`` will result
in shorter run times.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string. If metric is
"precomputed", X is assumed to be a distance matrix and must be square.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
p : int, default=2
Parameter for the Minkowski metric from
:class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method. (default)
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
ordering_ : array of shape (n_samples,)
The cluster ordered list of sample indices.
core_distances_ : array of shape (n_samples,)
Distance at which each sample becomes a core point, indexed by object
order. Points which will never be core have a distance of inf. Use
``clust.core_distances_[clust.ordering_]`` to access in cluster order.
reachability_ : array of shape (n_samples,)
Reachability distances per sample, indexed by object order. Use
``clust.reachability_[clust.ordering_]`` to access in cluster order.
predecessor_ : array of shape (n_samples,)
Point that a sample was reached from, indexed by object order.
Seed points have a predecessor of -1.
References
----------
.. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
and Jörg Sander. "OPTICS: ordering points to identify the clustering
structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
"""
n_samples = X.shape[0]
_validate_size(min_samples, n_samples, 'min_samples')
if min_samples <= 1:
min_samples = max(2, int(min_samples * n_samples))
# Start all points as 'unprocessed' ##
reachability_ = np.empty(n_samples)
reachability_.fill(np.inf)
predecessor_ = np.empty(n_samples, dtype=int)
predecessor_.fill(-1)
nbrs = NearestNeighbors(n_neighbors=min_samples,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
metric_params=metric_params,
p=p,
n_jobs=n_jobs)
nbrs.fit(X)
# Here we first do a kNN query for each point, this differs from
# the original OPTICS that only used epsilon range queries.
# TODO: handle working_memory somehow?
core_distances_ = _compute_core_distances_(X=X, neighbors=nbrs,
min_samples=min_samples,
working_memory=None)
# OPTICS puts an upper limit on these, use inf for undefined.
core_distances_[core_distances_ > max_eps] = np.inf
# Main OPTICS loop. Not parallelizable. The order that entries are
# written to the 'ordering_' list is important!
# Note that this implementation is O(n^2) theoretically, but
# supposedly with very low constant factors.
processed = np.zeros(X.shape[0], dtype=bool)
ordering = np.zeros(X.shape[0], dtype=int)
for ordering_idx in range(X.shape[0]):
# Choose next based on smallest reachability distance
# (And prefer smaller ids on ties, possibly np.inf!)
index = np.where(processed == 0)[0]
point = index[np.argmin(reachability_[index])]
processed[point] = True
ordering[ordering_idx] = point
if core_distances_[point] != np.inf:
_set_reach_dist(core_distances_=core_distances_,
reachability_=reachability_,
predecessor_=predecessor_,
point_index=point,
processed=processed, X=X, nbrs=nbrs,
metric=metric, metric_params=metric_params,
p=p, max_eps=max_eps)
if np.all(np.isinf(reachability_)):
warnings.warn("All reachability values are inf. Set a larger"
" max_eps or all data will be considered outliers.",
UserWarning)
return ordering, core_distances_, reachability_, predecessor_
def _set_reach_dist(core_distances_, reachability_, predecessor_,
point_index, processed, X, nbrs, metric, metric_params,
p, max_eps):
P = X[point_index:point_index + 1]
# Assume that radius_neighbors is faster without distances
# and we don't need all distances, nevertheless, this means
# we may be doing some work twice.
indices = nbrs.radius_neighbors(P, radius=max_eps,
return_distance=False)[0]
# Getting indices of neighbors that have not been processed
unproc = np.compress(~np.take(processed, indices), indices)
# Neighbors of current point are already processed.
if not unproc.size:
return
# Only compute distances to unprocessed neighbors:
if metric == 'precomputed':
dists = X[point_index, unproc]
else:
_params = dict() if metric_params is None else metric_params.copy()
if metric == 'minkowski' and 'p' not in _params:
# the same logic as neighbors, p is ignored if explicitly set
# in the dict params
_params['p'] = p
dists = pairwise_distances(P, np.take(X, unproc, axis=0),
metric=metric, n_jobs=None,
**_params).ravel()
rdists = np.maximum(dists, core_distances_[point_index])
improved = np.where(rdists < np.take(reachability_, unproc))
reachability_[unproc[improved]] = rdists[improved]
predecessor_[unproc[improved]] = point_index
@_deprecate_positional_args
def cluster_optics_dbscan(*, reachability, core_distances, ordering, eps):
"""Performs DBSCAN extraction for an arbitrary epsilon.
Extracting the clusters runs in linear time. Note that this results in
``labels_`` which are close to a :class:`~sklearn.cluster.DBSCAN` with
similar settings and ``eps``, only if ``eps`` is close to ``max_eps``.
Parameters
----------
reachability : array of shape (n_samples,)
Reachability distances calculated by OPTICS (``reachability_``)
core_distances : array of shape (n_samples,)
Distances at which points become core (``core_distances_``)
ordering : array of shape (n_samples,)
OPTICS ordered point indices (``ordering_``)
eps : float
DBSCAN ``eps`` parameter. Must be set to < ``max_eps``. Results
will be close to DBSCAN algorithm if ``eps`` and ``max_eps`` are close
to one another.
Returns
-------
labels_ : array of shape (n_samples,)
The estimated labels.
"""
n_samples = len(core_distances)
labels = np.zeros(n_samples, dtype=int)
far_reach = reachability > eps
near_core = core_distances <= eps
labels[ordering] = np.cumsum(far_reach[ordering] & near_core[ordering]) - 1
labels[far_reach & ~near_core] = -1
return labels
def cluster_optics_xi(*, reachability, predecessor, ordering, min_samples,
min_cluster_size=None, xi=0.05,
predecessor_correction=True):
"""Automatically extract clusters according to the Xi-steep method.
Parameters
----------
reachability : ndarray of shape (n_samples,)
Reachability distances calculated by OPTICS (`reachability_`)
predecessor : ndarray of shape (n_samples,)
Predecessors calculated by OPTICS.
ordering : ndarray of shape (n_samples,)
OPTICS ordered point indices (`ordering_`)
min_samples : int > 1 or float between 0 and 1
The same as the min_samples given to OPTICS. Up and down steep regions
can't have more then ``min_samples`` consecutive non-steep points.
Expressed as an absolute number or a fraction of the number of samples
(rounded to be at least 2).
min_cluster_size : int > 1 or float between 0 and 1, default=None
Minimum number of samples in an OPTICS cluster, expressed as an
absolute number or a fraction of the number of samples (rounded to be
at least 2). If ``None``, the value of ``min_samples`` is used instead.
xi : float between 0 and 1, default=0.05
Determines the minimum steepness on the reachability plot that
constitutes a cluster boundary. For example, an upwards point in the
reachability plot is defined by the ratio from one point to its
successor being at most 1-xi.
predecessor_correction : bool, default=True
Correct clusters based on the calculated predecessors.
Returns
-------
labels : ndarray of shape (n_samples,)
The labels assigned to samples. Points which are not included
in any cluster are labeled as -1.
clusters : ndarray of shape (n_clusters, 2)
The list of clusters in the form of ``[start, end]`` in each row, with
all indices inclusive. The clusters are ordered according to ``(end,
-start)`` (ascending) so that larger clusters encompassing smaller
clusters come after such nested smaller clusters. Since ``labels`` does
not reflect the hierarchy, usually ``len(clusters) >
np.unique(labels)``.
"""
n_samples = len(reachability)
_validate_size(min_samples, n_samples, 'min_samples')
if min_samples <= 1:
min_samples = max(2, int(min_samples * n_samples))
if min_cluster_size is None:
min_cluster_size = min_samples
_validate_size(min_cluster_size, n_samples, 'min_cluster_size')
if min_cluster_size <= 1:
min_cluster_size = max(2, int(min_cluster_size * n_samples))
clusters = _xi_cluster(reachability[ordering], predecessor[ordering],
ordering, xi,
min_samples, min_cluster_size,
predecessor_correction)
labels = _extract_xi_labels(ordering, clusters)
return labels, clusters
def _extend_region(steep_point, xward_point, start, min_samples):
"""Extend the area until it's maximal.
It's the same function for both upward and downward reagions, depending on
the given input parameters. Assuming:
- steep_{upward/downward}: bool array indicating whether a point is a
steep {upward/downward};
- upward/downward: bool array indicating whether a point is
upward/downward;
To extend an upward reagion, ``steep_point=steep_upward`` and
``xward_point=downward`` are expected, and to extend a downward region,
``steep_point=steep_downward`` and ``xward_point=upward``.
Parameters
----------
steep_point : ndarray of shape (n_samples,), dtype=bool
True if the point is steep downward (upward).
xward_point : ndarray of shape (n_samples,), dtype=bool
True if the point is an upward (respectively downward) point.
start : int
The start of the xward region.
min_samples : int
The same as the min_samples given to OPTICS. Up and down steep
regions can't have more then ``min_samples`` consecutive non-steep
points.
Returns
-------
index : int
The current index iterating over all the samples, i.e. where we are up
to in our search.
end : int
The end of the region, which can be behind the index. The region
includes the ``end`` index.
"""
n_samples = len(steep_point)
non_xward_points = 0
index = start
end = start
# find a maximal area
while index < n_samples:
if steep_point[index]:
non_xward_points = 0
end = index
elif not xward_point[index]:
# it's not a steep point, but still goes up.
non_xward_points += 1
# region should include no more than min_samples consecutive
# non steep xward points.
if non_xward_points > min_samples:
break
else:
return end
index += 1
return end
def _update_filter_sdas(sdas, mib, xi_complement, reachability_plot):
"""Update steep down areas (SDAs) using the new maximum in between (mib)
value, and the given complement of xi, i.e. ``1 - xi``.
"""
if np.isinf(mib):
return []
res = [sda for sda in sdas
if mib <= reachability_plot[sda['start']] * xi_complement]
for sda in res:
sda['mib'] = max(sda['mib'], mib)
return res
def _correct_predecessor(reachability_plot, predecessor_plot, ordering, s, e):
"""Correct for predecessors.
Applies Algorithm 2 of [1]_.
Input parameters are ordered by the computer OPTICS ordering.
.. [1] Schubert, Erich, Michael Gertz.
"Improving the Cluster Structure Extracted from OPTICS Plots." Proc. of
the Conference "Lernen, Wissen, Daten, Analysen" (LWDA) (2018): 318-329.
"""
while s < e:
if reachability_plot[s] > reachability_plot[e]:
return s, e
p_e = ordering[predecessor_plot[e]]
for i in range(s, e):
if p_e == ordering[i]:
return s, e
e -= 1
return None, None
def _xi_cluster(reachability_plot, predecessor_plot, ordering, xi, min_samples,
min_cluster_size, predecessor_correction):
"""Automatically extract clusters according to the Xi-steep method.
This is rouphly an implementation of Figure 19 of the OPTICS paper.
Parameters
----------
reachability_plot : array-like of shape (n_samples,)
The reachability plot, i.e. reachability ordered according to
the calculated ordering, all computed by OPTICS.
predecessor_plot : array-like of shape (n_samples,)
Predecessors ordered according to the calculated ordering.
xi : float, between 0 and 1
Determines the minimum steepness on the reachability plot that
constitutes a cluster boundary. For example, an upwards point in the
reachability plot is defined by the ratio from one point to its
successor being at most 1-xi.
min_samples : int > 1
The same as the min_samples given to OPTICS. Up and down steep regions
can't have more then ``min_samples`` consecutive non-steep points.
min_cluster_size : int > 1
Minimum number of samples in an OPTICS cluster.
predecessor_correction : bool
Correct clusters based on the calculated predecessors.
Returns
-------
clusters : ndarray of shape (n_clusters, 2)
The list of clusters in the form of [start, end] in each row, with all
indices inclusive. The clusters are ordered in a way that larger
clusters encompassing smaller clusters come after those smaller
clusters.
"""
# Our implementation adds an inf to the end of reachability plot
# this helps to find potential clusters at the end of the
# reachability plot even if there's no upward region at the end of it.
reachability_plot = np.hstack((reachability_plot, np.inf))
xi_complement = 1 - xi
sdas = [] # steep down areas, introduced in section 4.3.2 of the paper
clusters = []
index = 0
mib = 0. # maximum in between, section 4.3.2
# Our implementation corrects a mistake in the original
# paper, i.e., in Definition 9 steep downward point,
# r(p) * (1 - x1) <= r(p + 1) should be
# r(p) * (1 - x1) >= r(p + 1)
with np.errstate(invalid='ignore'):
ratio = reachability_plot[:-1] / reachability_plot[1:]
steep_upward = ratio <= xi_complement
steep_downward = ratio >= 1 / xi_complement
downward = ratio > 1
upward = ratio < 1
# the following loop is is almost exactly as Figure 19 of the paper.
# it jumps over the areas which are not either steep down or up areas
for steep_index in iter(np.flatnonzero(steep_upward | steep_downward)):
# just continue if steep_index has been a part of a discovered xward
# area.
if steep_index < index:
continue
mib = max(mib, np.max(reachability_plot[index:steep_index + 1]))
# steep downward areas
if steep_downward[steep_index]:
sdas = _update_filter_sdas(sdas, mib, xi_complement,
reachability_plot)
D_start = steep_index
D_end = _extend_region(steep_downward, upward,
D_start, min_samples)
D = {'start': D_start, 'end': D_end, 'mib': 0.}
sdas.append(D)
index = D_end + 1
mib = reachability_plot[index]
# steep upward areas
else:
sdas = _update_filter_sdas(sdas, mib, xi_complement,
reachability_plot)
U_start = steep_index
U_end = _extend_region(steep_upward, downward, U_start,
min_samples)
index = U_end + 1
mib = reachability_plot[index]
U_clusters = []
for D in sdas:
c_start = D['start']
c_end = U_end
# line (**), sc2*
if reachability_plot[c_end + 1] * xi_complement < D['mib']:
continue
# Definition 11: criterion 4
D_max = reachability_plot[D['start']]
if D_max * xi_complement >= reachability_plot[c_end + 1]:
# Find the first index from the left side which is almost
# at the same level as the end of the detected cluster.
while (reachability_plot[c_start + 1] >
reachability_plot[c_end + 1]
and c_start < D['end']):
c_start += 1
elif reachability_plot[c_end + 1] * xi_complement >= D_max:
# Find the first index from the right side which is almost
# at the same level as the beginning of the detected
# cluster.
# Our implementation corrects a mistake in the original
# paper, i.e., in Definition 11 4c, r(x) < r(sD) should be
# r(x) > r(sD).
while (reachability_plot[c_end - 1] > D_max
and c_end > U_start):
c_end -= 1
# predecessor correction
if predecessor_correction:
c_start, c_end = _correct_predecessor(reachability_plot,
predecessor_plot,
ordering,
c_start,
c_end)
if c_start is None:
continue
# Definition 11: criterion 3.a
if c_end - c_start + 1 < min_cluster_size:
continue
# Definition 11: criterion 1
if c_start > D['end']:
continue
# Definition 11: criterion 2
if c_end < U_start:
continue
U_clusters.append((c_start, c_end))
# add smaller clusters first.
U_clusters.reverse()
clusters.extend(U_clusters)
return np.array(clusters)
def _extract_xi_labels(ordering, clusters):
"""Extracts the labels from the clusters returned by `_xi_cluster`.
We rely on the fact that clusters are stored
with the smaller clusters coming before the larger ones.
Parameters
----------
ordering : array-like of shape (n_samples,)
The ordering of points calculated by OPTICS
clusters : array-like of shape (n_clusters, 2)
List of clusters i.e. (start, end) tuples,
as returned by `_xi_cluster`.
Returns
-------
labels : ndarray of shape (n_samples,)
"""
labels = np.full(len(ordering), -1, dtype=int)
label = 0
for c in clusters:
if not np.any(labels[c[0]:(c[1] + 1)] != -1):
labels[c[0]:(c[1] + 1)] = label
label += 1
labels[ordering] = labels.copy()
return labels
| bsd-3-clause |
kalvdans/scipy | scipy/stats/_distn_infrastructure.py | 3 | 119483 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy._lib.six import string_types, exec_, PY3
from scipy._lib._util import getargspec_no_self as _getargspec
import sys
import keyword
import re
import types
import warnings
from scipy.misc import doccer
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state, _lazywhere, _lazyselect
from scipy._lib._util import _valarray as valarray
from scipy.special import (comb, chndtr, entr, rel_entr, kl_div, xlogy, ive)
# for root finding for discrete distribution ppf, and max likelihood estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
from numpy import (arange, putmask, ravel, take, ones, shape, ndarray,
product, reshape, zeros, floor, logical_and, log, sqrt, exp)
from numpy import (place, argsort, argmax, vectorize,
asarray, nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _XMAX
if PY3:
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
else:
instancemethod = types.MethodType
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
``rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)``
Random variates.
"""
_doc_pdf = """\
``pdf(x, %(shapes)s, loc=0, scale=1)``
Probability density function.
"""
_doc_logpdf = """\
``logpdf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability density function.
"""
_doc_pmf = """\
``pmf(k, %(shapes)s, loc=0, scale=1)``
Probability mass function.
"""
_doc_logpmf = """\
``logpmf(k, %(shapes)s, loc=0, scale=1)``
Log of the probability mass function.
"""
_doc_cdf = """\
``cdf(x, %(shapes)s, loc=0, scale=1)``
Cumulative distribution function.
"""
_doc_logcdf = """\
``logcdf(x, %(shapes)s, loc=0, scale=1)``
Log of the cumulative distribution function.
"""
_doc_sf = """\
``sf(x, %(shapes)s, loc=0, scale=1)``
Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).
"""
_doc_logsf = """\
``logsf(x, %(shapes)s, loc=0, scale=1)``
Log of the survival function.
"""
_doc_ppf = """\
``ppf(q, %(shapes)s, loc=0, scale=1)``
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
``isf(q, %(shapes)s, loc=0, scale=1)``
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
``moment(n, %(shapes)s, loc=0, scale=1)``
Non-central moment of order n
"""
_doc_stats = """\
``stats(%(shapes)s, loc=0, scale=1, moments='mv')``
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
``entropy(%(shapes)s, loc=0, scale=1)``
(Differential) entropy of the RV.
"""
_doc_fit = """\
``fit(data, %(shapes)s, loc=0, scale=1)``
Parameter estimates for generic data.
"""
_doc_expect = """\
``expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
``expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
``median(%(shapes)s, loc=0, scale=1)``
Median of the distribution.
"""
_doc_mean = """\
``mean(%(shapes)s, loc=0, scale=1)``
Mean of the distribution.
"""
_doc_var = """\
``var(%(shapes)s, loc=0, scale=1)``
Variance of the distribution.
"""
_doc_std = """\
``std(%(shapes)s, loc=0, scale=1)``
Standard deviation of the distribution.
"""
_doc_interval = """\
``interval(alpha, %(shapes)s, loc=0, scale=1)``
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
'\n',
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']
for obj in _doc_disc_methods_err_varname:
docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""
kurtosis is fourth central moment / variance**2 - 3
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._updated_ctor_param())
# a, b may be set in _argcheck, depending on *args, **kwds. Ouch.
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.dist._argcheck(*shapes)
self.a, self.b = self.dist.a, self.dist.b
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4, 5))
>>> B = 2
>>> C = rand((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2, B2, C2] = argsreduce(cond, A, B, C)
>>> B2.shape
(15,)
"""
newargs = np.atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs, ]
expand_arr = (cond == cond)
return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# I think the function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
# We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the factor
# of exp(-xs*ns) into the ive function to improve numerical stability
# at large values of xs. See also `rice.pdf`.
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
res += np.log(ive(df2, xs*ns) / 2.0)
return res
def _ncx2_pdf(x, df, nc):
return np.exp(_ncx2_log_pdf(x, df, nc))
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super(rv_generic, self).__init__()
# figure out if _stats signature has 'moments' keyword
sign = _getargspec(self._stats)
self._stats_has_moments = ((sign[2] is not None) or
('moments' in sign[0]))
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def __getstate__(self):
return self._updated_ctor_param(), self._random_state
def __setstate__(self, state):
ctor_param, r = state
self.__init__(**ctor_param)
self._random_state = r
return self
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser for the shape arguments.
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Is supposed to be called in __init__ of a class for each distribution.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, string_types):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments. Generic methods only have 'self, x', any further args
# are shapes.
shapes_list = []
for meth in meths_to_inspect:
shapes_args = _getargspec(meth) # NB: does not contain self
args = shapes_args.args[1:] # peel off 'x', too
if args:
shapes_list.append(args)
# *args or **kwargs are not allowed w/automatic shapes
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.keywords is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
if shapes_list:
shapes = shapes_list[0]
# make sure the signatures are consistent
for item in shapes_list:
if item != shapes:
raise TypeError('Shape arguments are inconsistent.')
else:
shapes = []
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
ns = {}
exec_(parse_arg_template % dct, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name,
instancemethod(ns[name], self, self.__class__)
)
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
tempdict['shapes_'] = self.shapes or ''
if self.shapes and self.numargs == 1:
tempdict['shapes_'] += ','
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None, extradoc=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Central moments
def _munp(self, n, *args):
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = self.generic_moment(n, *args)
np.seterr(**olderr)
return vals
def _argcheck_rvs(self, *args, **kwargs):
# Handle broadcasting and size validation of the rvs method.
# Subclasses should not have to override this method.
# The rule is that if `size` is not None, then `size` gives the
# shape of the result (integer values of `size` are treated as
# tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)
#
# `args` is expected to contain the shape parameters (if any), the
# location and the scale in a flat tuple (e.g. if there are two
# shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).
# The only keyword argument expected is 'size'.
size = kwargs.get('size', None)
all_bcast = np.broadcast_arrays(*args)
def squeeze_left(a):
while a.ndim > 0 and a.shape[0] == 1:
a = a[0]
return a
# Eliminate trivial leading dimensions. In the convention
# used by numpy's random variate generators, trivial leading
# dimensions are effectively ignored. In other words, when `size`
# is given, trivial leading dimensions of the broadcast parameters
# in excess of the number of dimensions in size are ignored, e.g.
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)
# array([ 1.00104267, 3.00422496, 4.99799278])
# If `size` is not given, the exact broadcast shape is preserved:
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])
# array([[[[ 1.00862899, 3.00061431, 4.99867122]]]])
#
all_bcast = [squeeze_left(a) for a in all_bcast]
bcast_shape = all_bcast[0].shape
bcast_ndim = all_bcast[0].ndim
if size is None:
size_ = bcast_shape
else:
size_ = tuple(np.atleast_1d(size))
# Check compatibility of size_ with the broadcast shape of all
# the parameters. This check is intended to be consistent with
# how the numpy random variate generators (e.g. np.random.normal,
# np.random.beta) handle their arguments. The rule is that, if size
# is given, it determines the shape of the output. Broadcasting
# can't change the output size.
# This is the standard broadcasting convention of extending the
# shape with fewer dimensions with enough dimensions of length 1
# so that the two shapes have the same number of dimensions.
ndiff = bcast_ndim - len(size_)
if ndiff < 0:
bcast_shape = (1,)*(-ndiff) + bcast_shape
elif ndiff > 0:
size_ = (1,)*ndiff + size_
# This compatibility test is not standard. In "regular" broadcasting,
# two shapes are compatible if for each dimension, the lengths are the
# same or one of the lengths is 1. Here, the length of a dimension in
# size_ must not be less than the corresponding length in bcast_shape.
ok = all([bcdim == 1 or bcdim == szdim
for (bcdim, szdim) in zip(bcast_shape, size_)])
if not ok:
raise ValueError("size does not match the broadcast shape of "
"the parameters.")
param_bcast = all_bcast[:-2]
loc_bcast = all_bcast[-2]
scale_bcast = all_bcast[-1]
return param_bcast, loc_bcast, scale_bcast, size_
## These are the methods you must define (standard form functions)
## NB: generic _pdf, _logpdf, _cdf are different for
## rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
def _support_mask(self, x):
return (self.a <= x) & (x <= self.b)
def _open_support_mask(self, x):
return (self.a < x) & (x < self.b)
def _rvs(self, *args):
# This method must handle self._size being a tuple, and it must
# properly broadcast *args and self._size. self._size might be
# an empty tuple, which means a scalar random variate is to be
# generated.
## Use basic inverse cdf algorithm for RV generation as default.
U = self._random_state.random_sample(self._size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
self._random_state = check_random_state(rndm)
# `size` should just be an argument to _rvs(), but for, um,
# historical reasons, it is made an attribute that is read
# by _rvs().
self._size = size
vals = self._rvs(*args)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if size == ():
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""
Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = valarray(shape(cond), self.badvalue)
# Use only entries that are valid in calculation
if np.any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
if g2 is None:
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
mu2 = mu2p - mu * mu
if np.isinf(mu):
# if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
mu3 = mu3p - 3 * mu * mu2 - mu**3
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
mu3 = mu3p - 3 * mu * mu2 - mu**3
mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, *args)
place(output, cond0, self.vecentropy(*goodargs) + log(scale))
return output
def moment(self, n, *args, **kwds):
"""
n-th order non-central moment of distribution.
Parameters
----------
n : int, n >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args, **mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n, k, exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
stats.distributions.rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""
Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if np.any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
## continuous random variables: implement maybe later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods. If not provided, shape parameters will be inferred from
the signature of the private methods, ``_pdf`` and ``_cdf`` of the
instance.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pdf
logpdf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
fit
fit_loc_scale
nnlf
Notes
-----
Public methods of an instance of a distribution class (e.g., ``pdf``,
``cdf``) check their arguments and pass valid arguments to private,
computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
if it is within the support of a distribution, ``self.a <= x <= self.b``.
Whether a shape parameter is valid is decided by an ``_argcheck`` method
(which defaults to checking that its arguments are strictly positive.)
**Subclassing**
New random variables can be defined by subclassing the `rv_continuous` class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1).
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could.
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, `shapes` will be automatically deduced from the signatures of the
overridden methods (`pdf`, `cdf` etc).
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
**Frozen Distributions**
Normally, you must provide shape parameters (and, optionally, location and
scale parameters to each call of a method of a distribution.
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Statistics**
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments``, which is a
string composed of "m", "v", "s", and/or "k".
Only the components appearing in string should be computed and
returned in the order "m", "v", "s", or "k" with missing values
returned as None.
Alternatively, you can override ``_munp``, which takes ``n`` and shape
parameters and returns the n-th non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following:
>>> from scipy.stats import rv_continuous
>>> class gaussian_gen(rv_continuous):
... "Gaussian distribution"
... def _pdf(self, x):
... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
>>> gaussian = gaussian_gen(name='gaussian')
``scipy.stats`` distributions are *instances*, so here we subclass
`rv_continuous` and create an instance. With this, we now have
a fully functional distribution with all relevant methods automagically
generated by the framework.
Note that above we defined a standard normal distribution, with zero mean
and unit variance. Shifting and scaling of the distribution can be done
by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
essentially computes ``y = (x - loc) / scale`` and
``gaussian._pdf(y) / scale``.
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_continuous, self).__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self.moment_type = momtype
self.shapes = shapes
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def _updated_ctor_param(self):
""" Return the current version of _ctor_param, possibly updated by user.
Used by freezing and pickling.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['xtol'] = self.xtol
dct['badvalue'] = self.badvalue
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
left = right = None
if self.a > -np.inf:
left = self.a
if self.b < np.inf:
right = self.b
factor = 10.
if not left: # i.e. self.a = -inf
left = -1.*factor
while self._ppf_to_solve(left, q, *args) > 0.:
right = left
left *= factor
# left is now such that cdf(left) < q
if not right: # i.e. self.b = inf
right = factor
while self._ppf_to_solve(right, q, *args) < 0.:
left = right
right *= factor
# right is now such that cdf(right) > q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
return integrate.quad(self._mom_integ0, self.a, self.b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
## in rv_generic
def pdf(self, x, *args, **kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x) & (scale > 0)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x) & (scale > 0)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""
Survival function (1 - `cdf`) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -np.sum(self._logpdf(x, *args), axis=0)
def _unpack_loc_scale(self, theta):
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
return loc, scale, args
def nnlf(self, theta, x):
'''Return negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
'''
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
if np.any(~self._support_mask(x)):
return inf
return self._nnlf(x, *args) + n_log_scale
def _nnlf_and_penalty(self, x, args):
cond0 = ~self._support_mask(x)
n_bad = sum(cond0)
if n_bad > 0:
x = argsreduce(~cond0, x)[0]
logpdf = self._logpdf(x, *args)
finite_logpdf = np.isfinite(logpdf)
n_bad += np.sum(~finite_logpdf, axis=0)
if n_bad > 0:
penalty = n_bad * log(_XMAX) * 100
return -np.sum(logpdf[finite_logpdf], axis=0) + penalty
return -np.sum(logpdf, axis=0)
def _penalized_nnlf(self, theta, x):
''' Return penalized negative loglikelihood function,
i.e., - sum (log pdf(x, theta), axis=0) + penalty
where theta are the parameters (including loc and scale)
'''
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
return self._nnlf_and_penalty(x, args) + n_log_scale
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
# First of all, convert fshapes params to fnum: eg for stats.beta,
# shapes='a, b'. To fix `a`, can specify either `f1` or `fa`.
# Convert the latter into the former.
if self.shapes:
shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
val = kwds.pop('f' + s, None) or kwds.pop('fix_' + s, None)
if val is not None:
key = 'f%d' % j
if key in kwds:
raise ValueError("Duplicate entry for %s." % key)
else:
kwds[key] = val
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds.pop(key)
else:
x0.append(args[n])
if len(fixedn) == 0:
func = self._penalized_nnlf
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self._penalized_nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape (if applicable), location, and scale
parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs.
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
- f0...fn : hold respective shape parameters fixed.
Alternatively, shape parameters to fix can be specified by name.
For example, if ``self.shapes == "a, b"``, ``fa``and ``fix_a``
are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
equivalent to ``f1``.
- floc : hold location parameter fixed to specified value.
- fscale : hold scale parameter fixed to specified value.
- optimizer : The optimizer to use. The optimizer must take ``func``,
and starting position as the first two arguments,
plus ``args`` (for extra arguments to pass to the
function to be optimized) and ``disp=0`` to suppress
output as keyword arguments.
Returns
-------
mle_tuple : tuple of floats
MLEs for any shape parameters (if applicable), followed by those
for location and scale. For most random variables, shape statistics
will be returned, but there are exceptions (e.g. ``norm``).
Notes
-----
This fit is computed by maximizing a log-likelihood function, with
penalty applied for samples outside of range of the distribution. The
returned answer is not guaranteed to be the globally optimal MLE, it
may only be locally optimal, or the optimization may fail altogether.
Examples
--------
Generate some data to fit: draw random variates from the `beta`
distribution
>>> from scipy.stats import beta
>>> a, b = 1., 2.
>>> x = beta.rvs(a, b, size=1000)
Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``):
>>> a1, b1, loc1, scale1 = beta.fit(x)
We can also use some prior knowledge about the dataset: let's keep
``loc`` and ``scale`` fixed:
>>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
>>> loc1, scale1
(0, 1)
We can also keep shape parameters fixed by using ``f``-keywords. To
keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
equivalently, ``fa=1``:
>>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
>>> a1
1
Not all distributions return estimates for the shape parameters.
``norm`` for example just returns estimates for location and scale:
>>> from scipy.stats import norm
>>> x = norm.rvs(a, b, size=1000, random_state=123)
>>> loc1, scale1 = norm.fit(x)
>>> loc1, scale1
(0.92087172783841631, 2.0015750750324668)
"""
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.pop('loc', start[-2])
scale = kwds.pop('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.pop('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, string_types):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
# by now kwds must be empty, since everybody took what they needed
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def _fit_loc_scale_support(self, data, *args):
"""
Estimate loc and scale parameters from data accounting for support.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
data = np.asarray(data)
# Estimate location and scale according to the method of moments.
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
# Compute the support according to the shape parameters.
self._argcheck(*args)
a, b = self.a, self.b
support_width = b - a
# If the support is empty then return the moment-based estimates.
if support_width <= 0:
return loc_hat, scale_hat
# Compute the proposed support according to the loc and scale estimates.
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
# Use the moment-based estimates if they are compatible with the data.
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
# Otherwise find other estimates that are compatible with the data.
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
# For a finite interval, both the location and scale
# should have interesting values.
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
# For a one-sided interval, use only an interesting location parameter.
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
olderr = np.seterr(over='ignore')
h = integrate.quad(integ, self.a, self.b)[0]
np.seterr(**olderr)
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ubound
E[x] = Integral(f(x) * dist.pdf(x))
lbound
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter (default=0).
scale : float, optional
Scale parameter (default=1).
lb, ub : scalar, optional
Lower and upper bound for integration. Default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`integrate.quad`.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
np.seterr(**olderr)
return vals
# Helpers for the discrete distributions
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
def fun(x):
return np.power(x, n) * self._pmf(x, *args)
return _expect(fun, self.a, self.b, self.ppf(0.5, *args), self.inc)
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.b
a = self.a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= self.b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= self.a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
# testcase: return wrong number at lower index
# python -c "from scipy.stats import zipf;print zipf.ppf(0.01, 2)" wrong
# python -c "from scipy.stats import zipf;print zipf.ppf([0.01, 0.61, 0.77, 0.83], 2)"
# python -c "from scipy.stats import logser;print logser.ppf([0.1, 0.66, 0.86, 0.93], 0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
def entropy(pk, qk=None, base=None):
"""Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=0)``.
If `qk` is not None, then compute the Kullback-Leibler divergence
``S = sum(pk * log(pk / qk), axis=0)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
Returns
-------
S : float
The calculated entropy.
"""
pk = asarray(pk)
pk = 1.0*pk / np.sum(pk, axis=0)
if qk is None:
vec = entr(pk)
else:
qk = asarray(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / np.sum(qk, axis=0)
vec = rel_entr(pk, qk)
S = np.sum(vec, axis=0)
if base is not None:
S /= log(base)
return S
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances for discrete random variables. It can also be used
to construct an arbitrary distribution defined by a list of support
points and corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments.
values : tuple of two array_like, optional
``(xk, pk)`` where ``xk`` are integers with non-zero
probabilities ``pk`` with ``sum(pk) = 1``.
inc : integer, optional
Increment for the support of the distribution.
Default is 1. (other values have not been tested)
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example "m, n" for a distribution
that takes two integers as the two shape arguments for all its methods
If not provided, shape parameters will be inferred from
the signatures of the private methods, ``_pmf`` and ``_cdf`` of
the instance.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None, the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pmf
logpmf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
Notes
-----
This class is similar to `rv_continuous`, the main differences being:
- the support of the distribution is a set of integers
- instead of the probability density function, ``pdf`` (and the
corresponding private ``_pdf``), this class defines the
*probability mass function*, `pmf` (and the corresponding
private ``_pmf``.)
- scale parameter is not defined.
To create a new discrete distribution, we would do the following:
>>> from scipy.stats import rv_discrete
>>> class poisson_gen(rv_discrete):
... "Poisson distribution"
... def _pmf(self, k, mu):
... return exp(-mu) * mu**k / factorial(k)
and create an instance::
>>> poisson = poisson_gen(name="poisson")
Note that above we defined the Poisson distribution in the standard form.
Shifting the distribution can be done by providing the ``loc`` parameter
to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
delegates the work to ``poisson._pmf(x-loc, mu)``.
**Discrete distributions from a list of probabilities**
Alternatively, you can construct an arbitrary discrete rv defined
on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
``values`` keyword argument to the `rv_discrete` constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __new__(cls, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
if values is not None:
# dispatch to a subclass
return super(rv_discrete, cls).__new__(rv_sample)
else:
# business as usual
return super(rv_discrete, cls).__new__(cls)
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.a = a
self.b = b
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
if values is not None:
raise ValueError("rv_discrete.__init__(..., values != None, ...)")
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(_vec_generic_moment,
self, rv_discrete)
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2
self._ppfvec = instancemethod(_vppf,
self, rv_discrete)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
self._construct_docstrings(name, longname, extradoc)
def _construct_docstrings(self, name, longname, extradoc):
if name is None:
name = 'Distribution'
self.name = name
self.extradoc = extradoc
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
# discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
@property
@np.deprecate(message="`return_integers` attribute is not used anywhere any "
" longer and is deprecated in scipy 0.18.")
def return_integers(self):
return 1
def _updated_ctor_param(self):
""" Return the current version of _ctor_param, possibly updated by user.
Used by freezing and pickling.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['badvalue'] = self.badvalue
dct['moment_tol'] = self.moment_tol
dct['inc'] = self.inc
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdf_single(self, k, *args):
m = arange(int(self.a), k+1)
return np.sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (Default is 1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""
Survival function (1 - `cdf`) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as 1 - `cdf`,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.a-1)
place(output, cond2, self.b)
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.b)
place(output, cond2, self.a-1)
# call place only if at least 1 valid argument
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return entropy(self.pk)
else:
return _expect(lambda x: entr(self.pmf(x, *args)),
self.a, self.b, self.ppf(0.5, *args), self.inc)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution.
Parameters
----------
func : callable, optional
Function for which the expectation value is calculated.
Takes only one argument.
The default is the identity mapping f(k) = k.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter.
Default is 0.
lb, ub : int, optional
Lower and upper bound for the summation, default is set to the
support of the distribution, inclusive (``ul <= k <= ub``).
conditional : bool, optional
If true then the expectation is corrected by the conditional
probability of the summation interval. The return value is the
expectation of the function, `func`, conditional on being in
the given interval (k such that ``ul <= k <= ub``).
Default is False.
maxcount : int, optional
Maximal number of terms to evaluate (to avoid an endless loop for
an infinite sum). Default is 1000.
tolerance : float, optional
Absolute tolerance for the summation. Default is 1e-10.
chunksize : int, optional
Iterate over the support of a distributions in chunks of this size.
Default is 32.
Returns
-------
expect : float
Expected value.
Notes
-----
For heavy-tailed distributions, the expected value may or may not exist,
depending on the function, `func`. If it does exist, but the sum converges
slowly, the accuracy of the result may be rather low. For instance, for
``zipf(4)``, accuracy for mean, variance in example is only 1e-5.
increasing `maxcount` and/or `chunksize` may improve the result, but may also
make zipf very slow.
The function is not vectorized.
"""
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = self.a
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = self.b
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
invfac = self.sf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
# iterate over the support, starting from the median
x0 = self.ppf(0.5, *args)
res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)
return res / invfac
def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,
chunksize=32):
"""Helper for computing the expectation value of `fun`."""
# short-circuit if the support size is small enough
if (ub - lb) <= chunksize:
supp = np.arange(lb, ub+1, inc)
vals = fun(supp)
return np.sum(vals)
# otherwise, iterate starting from x0
if x0 < lb:
x0 = lb
if x0 > ub:
x0 = ub
count, tot = 0, 0.
# iterate over [x0, ub] inclusive
for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot
# iterate over [lb, x0)
for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
break
return tot
def _iter_chunked(x0, x1, chunksize=4, inc=1):
"""Iterate from x0 to x1 in chunks of chunksize and steps inc.
x0 must be finite, x1 need not be. In the latter case, the iterator is infinite.
Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards
(make sure to set inc < 0.)
>>> [x for x in _iter_chunked(2, 5, inc=2)]
[array([2, 4])]
>>> [x for x in _iter_chunked(2, 11, inc=2)]
[array([2, 4, 6, 8]), array([10])]
>>> [x for x in _iter_chunked(2, -5, inc=-2)]
[array([ 2, 0, -2, -4])]
>>> [x for x in _iter_chunked(2, -9, inc=-2)]
[array([ 2, 0, -2, -4]), array([-6, -8])]
"""
if inc == 0:
raise ValueError('Cannot increment by zero.')
if chunksize <= 0:
raise ValueError('Chunk size must be positive; got %s.' % chunksize)
s = 1 if inc > 0 else -1
stepsize = abs(chunksize * inc)
x = x0
while (x - x1) * inc < 0:
delta = min(stepsize, abs(x - x1))
step = delta * s
supp = np.arange(x, x + step, inc)
x += step
yield supp
class rv_sample(rv_discrete):
"""A 'sample' discrete distribution defined by the support and values.
The ctor ignores most of the arguments, only needs the `values` argument.
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
if values is None:
raise ValueError("rv_sample.__init__(..., values=None,...)")
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
self.vecentropy = self._entropy
xk, pk = values
if len(xk) != len(pk):
raise ValueError("xk and pk need to have the same length.")
if not np.allclose(np.sum(pk), 1):
raise ValueError("The sum of provided pk is not 1.")
indx = np.argsort(np.ravel(xk))
self.xk = np.take(np.ravel(xk), indx, 0)
self.pk = np.take(np.ravel(pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.qvals = np.cumsum(self.pk, axis=0)
self.shapes = ' ' # bypass inspection
self._construct_argparser(meths_to_inspect=[self._pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._construct_docstrings(name, longname, extradoc)
@property
@np.deprecate(message="`return_integers` attribute is not used anywhere any"
" longer and is deprecated in scipy 0.18.")
def return_integers(self):
return 0
def _pmf(self, x):
return np.select([x == k for k in self.xk],
[np.broadcast_arrays(p, x)[0] for p in self.pk], 0)
def _cdf(self, x):
xx, xxk = np.broadcast_arrays(x[:, None], self.xk)
indx = np.argmax(xxk > xx, axis=-1) - 1
return self.qvals[indx]
def _ppf(self, q):
qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
indx = argmax(sqq >= qq, axis=-1)
return self.xk[indx]
def _rvs(self):
# Need to define it explicitly, otherwise .rvs() with size=None
# fails due to explicit broadcasting in _ppf
U = self._random_state.random_sample(self._size)
if self._size is None:
U = np.array(U, ndmin=1)
Y = self._ppf(U)[0]
else:
Y = self._ppf(U)
return Y
def _entropy(self):
return entropy(self.pk)
def generic_moment(self, n):
n = asarray(n)
return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
@np.deprecate(message="moment_gen method is not used anywhere any more "
"and is deprecated in scipy 0.18.")
def moment_gen(self, t):
t = asarray(t)
return np.sum(exp(self.xk * t[np.newaxis, ...]) * self.pk, axis=0)
@property
@np.deprecate(message="F attribute is not used anywhere any longer and "
"is deprecated in scipy 0.18.")
def F(self):
return dict(zip(self.xk, self.qvals))
@property
@np.deprecate(message="Finv attribute is not used anywhere any longer and "
"is deprecated in scipy 0.18.")
def Finv(self):
decreasing_keys = sorted(self.F.keys(), reverse=True)
return dict((self.F[k], k) for k in decreasing_keys)
def get_distribution_names(namespace_pairs, rv_base_class):
"""
Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
| bsd-3-clause |
gem/oq-hazardlib | openquake/hmtk/plotting/seismicity/completeness/plot_stepp_1972.py | 1 | 5217 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (c) 2010-2017, GEM Foundation, G. Weatherill, M. Pagani,
# D. Monelli.
#
# The Hazard Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Hazard Modeller's Toolkit (openquake.hmtk) provided herein
# is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
# Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM’s OpenQuake suite
# (http://www.globalquakemodel.org/openquake) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM’s OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the hazard scientific staff of the GEM Model Facility
# ([email protected]).
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is therefore distributed WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
#!/usr/bin/env python
'''
Module :mod: 'openquake.hmtk.plotting.seismicity.completeness.plot_stepp_1971'
creates plot to illustrate outcome of Stepp (1972) method for completeness
analysis
'''
import os.path
import numpy as np
import matplotlib.pyplot as plt
valid_markers = ['*', '+', '1', '2', '3', '4', '8', '<', '>', 'D', 'H', '^',
'_', 'd', 'h', 'o', 'p', 's', 'v', 'x', '|']
DEFAULT_SIZE = (8., 6.)
DEFAULT_OFFSET = (1.3, 1.0)
def create_stepp_plot(model, filename, filetype='png', filedpi=300):
'''
Creates the classic Stepp (1972) plots for a completed Stepp analysis,
and exports the figure to a file.
:param model:
Completed Stepp (1972) analysis as instance of :class:
'openquake.hmtk.seismicity.completeness.comp_stepp_1971.Stepp1971'
:param string filename:
Name of output file
:param string filetype:
Type of file (from list supported by matplotlib)
:param int filedpi:
Resolution (dots per inch) of output file
'''
plt.figure(figsize=DEFAULT_SIZE)
if os.path.exists(filename):
raise IOError('File already exists!')
legend_list = [(str(model.magnitude_bin[iloc] + 0.01) + ' - ' +
str(model.magnitude_bin[iloc + 1])) for iloc in range(0,
len(model.magnitude_bin) - 1)]
rgb_list = []
marker_vals = []
# Get marker from valid list
while len(valid_markers) < len(model.magnitude_bin):
valid_markers.append(valid_markers)
marker_sampler = np.arange(0, len(valid_markers), 1)
np.random.shuffle(marker_sampler)
# Get colour for each bin
for value in range(0, len(model.magnitude_bin) - 1):
rgb_samp = np.random.uniform(0., 1., 3)
rgb_list.append((rgb_samp[0], rgb_samp[1], rgb_samp[2]))
marker_vals.append(valid_markers[marker_sampler[value]])
# Plot observed Sigma lambda
for iloc in range(0, len(model.magnitude_bin) - 1):
plt.loglog(model.time_values,
model.sigma[:, iloc],
linestyle='None',
marker=marker_vals[iloc],
color=rgb_list[iloc])
lgd = plt.legend(legend_list, bbox_to_anchor=DEFAULT_OFFSET)
plt.grid(True)
# Plot expected Poisson rate
for iloc in range(0, len(model.magnitude_bin) - 1):
plt.loglog(model.time_values,
model.model_line[:, iloc],
linestyle='-',
marker='None',
color=rgb_list[iloc])
plt.xlim(model.time_values[0] / 2., 2. * model.time_values[-1])
xmarker = model.end_year - model.completeness_table[iloc, 0]
id0 = model.model_line[:, iloc] > 0.
ymarker = 10.0 ** np.interp(np.log10(xmarker),
np.log10(model.time_values[id0]),
np.log10(model.model_line[id0, iloc]))
plt.loglog(xmarker, ymarker, 'ks')
plt.xlabel('Time (years)', fontsize=15)
plt.ylabel("$\\sigma_{\\lambda} = \\sqrt{\\lambda} / \\sqrt{T}$",
fontsize=15)
# Save figure to file
plt.tight_layout()
plt.savefig(filename, dpi=filedpi, format=filetype,
bbox_extra_artists=(lgd,), bbox_inches="tight")
| agpl-3.0 |
lin-credible/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | 8061 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
ehocchen/trading-with-python | lib/classes.py | 76 | 7847 | """
worker classes
@author: Jev Kuznetsov
Licence: GPL v2
"""
__docformat__ = 'restructuredtext'
import os
import logger as logger
import yahooFinance as yahoo
from functions import returns, rank
from datetime import date
from pandas import DataFrame, Series
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class Symbol(object):
'''
Symbol class, the foundation of Trading With Python library,
This class acts as an interface to Yahoo data, Interactive Brokers etc
'''
def __init__(self,name):
self.name = name
self.log = logger.getLogger(self.name)
self.log.debug('class created.')
self.dataDir = os.getenv("USERPROFILE")+'\\twpData\\symbols\\'+self.name
self.log.debug('Data dir:'+self.dataDir)
self.ohlc = None # historic OHLC data
def downloadHistData(self, startDate=(2010,1,1),endDate=date.today().timetuple()[:3],\
source = 'yahoo'):
'''
get historical OHLC data from a data source (yahoo is default)
startDate and endDate are tuples in form (d,m,y)
'''
self.log.debug('Getting OHLC data')
self.ohlc = yahoo.getHistoricData(self.name,startDate,endDate)
def histData(self,column='adj_close'):
'''
Return a column of historic data.
Returns
-------------
df : DataFrame
'''
s = self.ohlc[column]
return DataFrame(s.values,s.index,[self.name])
@property
def dayReturns(self):
''' close-close returns '''
return (self.ohlc['adj_close']/self.ohlc['adj_close'].shift(1)-1)
#return DataFrame(s.values,s.index,[self.name])
class Portfolio(object):
def __init__(self,histPrice,name=''):
"""
Constructor
Parameters
----------
histPrice : historic price
"""
self.histPrice = histPrice
self.params = DataFrame(index=self.symbols)
self.params['capital'] = 100*np.ones(self.histPrice.shape[1],dtype=np.float)
self.params['last'] = self.histPrice.tail(1).T.ix[:,0]
self.params['shares'] = self.params['capital']/self.params['last']
self.name= name
def setHistPrice(self,histPrice):
self.histPrice = histPrice
def setShares(self,shares):
""" set number of shares, adjust capital
shares: list, np array or Series
"""
if len(shares) != self.histPrice.shape[1]:
raise AttributeError('Wrong size of shares vector.')
self.params['shares'] = shares
self.params['capital'] = self.params['shares']*self.params['last']
def setCapital(self,capital):
""" Set target captial, adjust number of shares """
if len(capital) != self.histPrice.shape[1]:
raise AttributeError('Wrong size of shares vector.')
self.params['capital'] = capital
self.params['shares'] = self.params['capital']/self.params['last']
def calculateStatistics(self,other=None):
''' calculate spread statistics, save internally '''
res = {}
res['micro'] = rank(self.returns[-1],self.returns)
res['macro'] = rank(self.value[-1], self.value)
res['last'] = self.value[-1]
if other is not None:
res['corr'] = self.returns.corr(returns(other))
return Series(res,name=self.name)
@property
def symbols(self):
return self.histPrice.columns.tolist()
@property
def returns(self):
return (returns(self.histPrice)*self.params['capital']).sum(axis=1)
@property
def value(self):
return (self.histPrice*self.params['shares']).sum(axis=1)
def __repr__(self):
return ("Portfolio %s \n" % self.name ) + str(self.params)
#return ('Spread %s :' % self.name ) + str.join(',',
# ['%s*%.2f' % t for t in zip(self.symbols,self.capital)])
class Spread(object):
'''
Spread class, used to build a spread out of two symbols.
'''
def __init__(self,stock,hedge,beta=None):
''' init with symbols or price series '''
if isinstance(stock,str) and isinstance(hedge,str):
self.symbols = [stock,hedge]
self._getYahooData()
elif isinstance(stock,pd.Series) and isinstance(hedge,pd.Series):
self.symbols = [stock.name,hedge.name]
self.price = pd.DataFrame(dict(zip(self.symbols,[stock,hedge]))).dropna()
else:
raise ValueError('Both stock and hedge should be of the same type, symbol string or Series')
# calculate returns
self.returns = self.price.pct_change()
if beta is not None:
self.beta = beta
else:
self.estimateBeta()
# set data
self.data = pd.DataFrame(index = self.symbols)
self.data['beta'] = pd.Series({self.symbols[0]:1., self.symbols[1]:-self.beta})
def calculateShares(self,bet):
''' set number of shares based on last quote '''
if 'price' not in self.data.columns:
print 'Getting quote...'
self.getQuote()
self.data['shares'] = bet*self.data['beta']/self.data['price']
def estimateBeta(self,plotOn=False):
""" linear estimation of beta """
x = self.returns[self.symbols[1]] # hedge
y = self.returns[self.symbols[0]] # stock
#avoid extremes
low = np.percentile(x,20)
high = np.percentile(x,80)
iValid = (x>low) & (x<high)
x = x[iValid]
y = y[iValid]
if plotOn:
plt.plot(x,y,'o')
plt.grid(True)
iteration = 1
nrOutliers = 1
while iteration < 3 and nrOutliers > 0 :
(a,b) = np.polyfit(x,y,1)
yf = np.polyval([a,b],x)
err = yf-y
idxOutlier = abs(err) > 3*np.std(err)
nrOutliers =sum(idxOutlier)
beta = a
#print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers)
x = x[~idxOutlier]
y = y[~idxOutlier]
iteration += 1
if plotOn:
yf = x*beta
plt.plot(x,yf,'-',color='red')
plt.xlabel(self.symbols[1])
plt.ylabel(self.symbols[0])
self.beta = beta
return beta
@property
def spread(self):
''' return daily returns of the pair '''
return (self.returns*self.data['beta']).sum(1)
def getQuote(self):
''' get current quote from yahoo '''
q = yahoo.getQuote(self.symbols)
self.data['price'] = q['last']
def _getYahooData(self, startDate=(2007,1,1)):
""" fetch historic data """
data = {}
for symbol in self.symbols:
print 'Downloading %s' % symbol
data[symbol]=(yahoo.getHistoricData(symbol,sDate=startDate)['adj_close'] )
self.price = pd.DataFrame(data).dropna()
def __repr__(self):
return 'Spread 1*%s & %.2f*%s ' % (self.symbols[0],-self.beta,self.symbols[1])
@property
def name(self):
return str.join('_',self.symbols)
if __name__=='__main__':
s = Spread(['SPY','IWM'])
| bsd-3-clause |
ianctse/pvlib-python | pvlib/test/test_tracking.py | 1 | 11782 | import logging
pvl_logger = logging.getLogger('pvlib')
import datetime
import numpy as np
from numpy import nan
import pandas as pd
from nose.tools import raises, assert_almost_equals
from nose.plugins.skip import SkipTest
from pandas.util.testing import assert_frame_equal
from pvlib.location import Location
from pvlib import solarposition
from pvlib import tracking
def test_solar_noon():
apparent_zenith = pd.Series([10])
apparent_azimuth = pd.Series([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 10, 'surface_azimuth': 90,
'surface_tilt': 0, 'tracker_theta': 0},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_azimuth_north_south():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=180,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 90,
'surface_tilt': 60, 'tracker_theta': -60},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect['tracker_theta'] *= -1
assert_frame_equal(expect, tracker_data)
def test_max_angle():
apparent_zenith = pd.Series([60])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=45, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 15, 'surface_azimuth': 90,
'surface_tilt': 45, 'tracker_theta': 45},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_backtrack():
apparent_zenith = pd.Series([80])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=False,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 90,
'surface_tilt': 80, 'tracker_theta': 80},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 52.5716, 'surface_azimuth': 90,
'surface_tilt': 27.42833, 'tracker_theta': 27.4283},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_axis_tilt():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([135])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=30, axis_azimuth=180,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 7.286245, 'surface_azimuth': 142.65730,
'surface_tilt': 35.98741, 'tracker_theta': -20.88121},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=30, axis_azimuth=0,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 47.6632, 'surface_azimuth': 50.96969,
'surface_tilt': 42.5152, 'tracker_theta': 31.6655},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_axis_azimuth():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([90])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 30, 'surface_azimuth': 180,
'surface_tilt': 0, 'tracker_theta': 0},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
expect = pd.DataFrame({'aoi': 0, 'surface_azimuth': 180,
'surface_tilt': 30, 'tracker_theta': 30},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
@raises(ValueError)
def test_index_mismatch():
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([90,180])
tracker_data = tracking.singleaxis(apparent_zenith, apparent_azimuth,
axis_tilt=0, axis_azimuth=90,
max_angle=90, backtrack=True,
gcr=2.0/7.0)
def test_SingleAxisTracker_creation():
system = tracking.SingleAxisTracker(max_angle=45,
gcr=.25,
module='blah',
inverter='blarg')
assert system.max_angle == 45
assert system.gcr == .25
assert system.module == 'blah'
assert system.inverter == 'blarg'
def test_SingleAxisTracker_tracking():
system = tracking.SingleAxisTracker(max_angle=90, axis_tilt=30,
axis_azimuth=180, gcr=2.0/7.0,
backtrack=True)
apparent_zenith = pd.Series([30])
apparent_azimuth = pd.Series([135])
tracker_data = system.singleaxis(apparent_zenith, apparent_azimuth)
expect = pd.DataFrame({'aoi': 7.286245, 'surface_azimuth': 142.65730 ,
'surface_tilt': 35.98741, 'tracker_theta': -20.88121},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
### results calculated using PVsyst
pvsyst_solar_azimuth = 7.1609
pvsyst_solar_height = 27.315
pvsyst_axis_tilt = 20.
pvsyst_axis_azimuth = 20.
pvsyst_system = tracking.SingleAxisTracker(max_angle=60.,
axis_tilt=pvsyst_axis_tilt,
axis_azimuth=180+pvsyst_axis_azimuth,
backtrack=False)
# the definition of azimuth is different from PYsyst
apparent_azimuth = pd.Series([180+pvsyst_solar_azimuth])
apparent_zenith = pd.Series([90-pvsyst_solar_height])
tracker_data = pvsyst_system.singleaxis(apparent_zenith, apparent_azimuth)
expect = pd.DataFrame({'aoi': 41.07852 , 'surface_azimuth': 180-18.432 ,
'surface_tilt': 24.92122 , 'tracker_theta': -15.18391},
index=[0], dtype=np.float64)
assert_frame_equal(expect, tracker_data)
def test_LocalizedSingleAxisTracker_creation():
localized_system = tracking.LocalizedSingleAxisTracker(latitude=32,
longitude=-111,
module='blah',
inverter='blarg')
assert localized_system.module == 'blah'
assert localized_system.inverter == 'blarg'
assert localized_system.latitude == 32
assert localized_system.longitude == -111
def test_SingleAxisTracker_localize():
system = tracking.SingleAxisTracker(max_angle=45, gcr=.25,
module='blah', inverter='blarg')
localized_system = system.localize(latitude=32, longitude=-111)
assert localized_system.module == 'blah'
assert localized_system.inverter == 'blarg'
assert localized_system.latitude == 32
assert localized_system.longitude == -111
def test_SingleAxisTracker_localize_location():
system = tracking.SingleAxisTracker(max_angle=45, gcr=.25,
module='blah', inverter='blarg')
location = Location(latitude=32, longitude=-111)
localized_system = system.localize(location=location)
assert localized_system.module == 'blah'
assert localized_system.inverter == 'blarg'
assert localized_system.latitude == 32
assert localized_system.longitude == -111
def test_get_irradiance():
system = tracking.SingleAxisTracker(max_angle=90, axis_tilt=30,
axis_azimuth=180, gcr=2.0/7.0,
backtrack=True)
times = pd.DatetimeIndex(start='20160101 1200-0700',
end='20160101 1800-0700', freq='6H')
location = Location(latitude=32, longitude=-111)
solar_position = location.get_solarposition(times)
irrads = pd.DataFrame({'dni':[900,0], 'ghi':[600,0], 'dhi':[100,0]},
index=times)
solar_zenith = solar_position['apparent_zenith']
solar_azimuth = solar_position['azimuth']
tracker_data = system.singleaxis(solar_zenith, solar_azimuth)
irradiance = system.get_irradiance(irrads['dni'],
irrads['ghi'],
irrads['dhi'],
solar_zenith=solar_zenith,
solar_azimuth=solar_azimuth,
surface_tilt=tracker_data['surface_tilt'],
surface_azimuth=tracker_data['surface_azimuth'])
expected = pd.DataFrame(data=np.array(
[[ 961.80070, 815.94490, 145.85580, 135.32820,
10.52757492],
[ nan, nan, nan, nan,
nan]]),
columns=['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse'],
index=times)
irradiance = np.round(irradiance, 4)
expected = np.round(expected, 4)
assert_frame_equal(irradiance, expected)
| bsd-3-clause |
bowang/tensorflow | tensorflow/tools/dist_test/python/census_widendeep.py | 42 | 11900 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed training and evaluation of a wide and deep model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import sys
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
# Constants: Data download URLs
TRAIN_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data"
TEST_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test"
# Define features for the model
def census_model_config():
"""Configuration for the census Wide & Deep model.
Returns:
columns: Column names to retrieve from the data source
label_column: Name of the label column
wide_columns: List of wide columns
deep_columns: List of deep columns
categorical_column_names: Names of the categorical columns
continuous_column_names: Names of the continuous columns
"""
# 1. Categorical base columns.
gender = tf.contrib.layers.sparse_column_with_keys(
column_name="gender", keys=["female", "male"])
race = tf.contrib.layers.sparse_column_with_keys(
column_name="race",
keys=["Amer-Indian-Eskimo",
"Asian-Pac-Islander",
"Black",
"Other",
"White"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
marital_status = tf.contrib.layers.sparse_column_with_hash_bucket(
"marital_status", hash_bucket_size=100)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# 2. Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
age_buckets = tf.contrib.layers.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
wide_columns = [
gender, native_country, education, occupation, workclass,
marital_status, relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([age_buckets, race, occupation],
hash_bucket_size=int(1e6))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(marital_status, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(race, dimension=8),
tf.contrib.layers.embedding_column(native_country, dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age, education_num, capital_gain, capital_loss, hours_per_week]
# Define the column names for the data sets.
columns = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week",
"native_country", "income_bracket"]
label_column = "label"
categorical_columns = ["workclass", "education", "marital_status",
"occupation", "relationship", "race", "gender",
"native_country"]
continuous_columns = ["age", "education_num", "capital_gain",
"capital_loss", "hours_per_week"]
return (columns, label_column, wide_columns, deep_columns,
categorical_columns, continuous_columns)
class CensusDataSource(object):
"""Source of census data."""
def __init__(self, data_dir, train_data_url, test_data_url,
columns, label_column,
categorical_columns, continuous_columns):
"""Constructor of CensusDataSource.
Args:
data_dir: Directory to save/load the data files
train_data_url: URL from which the training data can be downloaded
test_data_url: URL from which the test data can be downloaded
columns: Columns to retrieve from the data files (A list of strings)
label_column: Name of the label column
categorical_columns: Names of the categorical columns (A list of strings)
continuous_columns: Names of the continuous columns (A list of strings)
"""
# Retrieve data from disk (if available) or download from the web.
train_file_path = os.path.join(data_dir, "adult.data")
if os.path.isfile(train_file_path):
print("Loading training data from file: %s" % train_file_path)
train_file = open(train_file_path)
else:
urllib.urlretrieve(train_data_url, train_file_path)
test_file_path = os.path.join(data_dir, "adult.test")
if os.path.isfile(test_file_path):
print("Loading test data from file: %s" % test_file_path)
test_file = open(test_file_path)
else:
test_file = open(test_file_path)
urllib.urlretrieve(test_data_url, test_file_path)
# Read the training and testing data sets into Pandas DataFrame.
import pandas # pylint: disable=g-import-not-at-top
self._df_train = pandas.read_csv(train_file, names=columns,
skipinitialspace=True)
self._df_test = pandas.read_csv(test_file, names=columns,
skipinitialspace=True, skiprows=1)
# Remove the NaN values in the last rows of the tables
self._df_train = self._df_train[:-1]
self._df_test = self._df_test[:-1]
# Apply the threshold to get the labels.
income_thresh = lambda x: ">50K" in x
self._df_train[label_column] = (
self._df_train["income_bracket"].apply(income_thresh)).astype(int)
self._df_test[label_column] = (
self._df_test["income_bracket"].apply(income_thresh)).astype(int)
self.label_column = label_column
self.categorical_columns = categorical_columns
self.continuous_columns = continuous_columns
def input_train_fn(self):
return self._input_fn(self._df_train)
def input_test_fn(self):
return self._input_fn(self._df_test)
# TODO(cais): Turn into minibatch feeder
def _input_fn(self, df):
"""Input data function.
Creates a dictionary mapping from each continuous feature column name
(k) to the values of that column stored in a constant Tensor.
Args:
df: data feed
Returns:
feature columns and labels
"""
continuous_cols = {k: tf.constant(df[k].values)
for k in self.continuous_columns}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in self.categorical_columns}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols.items() + categorical_cols.items())
# Converts the label column into a constant Tensor.
label = tf.constant(df[self.label_column].values)
# Returns the feature columns and the label.
return feature_cols, label
def _create_experiment_fn(output_dir): # pylint: disable=unused-argument
"""Experiment creation function."""
(columns, label_column, wide_columns, deep_columns, categorical_columns,
continuous_columns) = census_model_config()
census_data_source = CensusDataSource(FLAGS.data_dir,
TRAIN_DATA_URL, TEST_DATA_URL,
columns, label_column,
categorical_columns,
continuous_columns)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
tf.contrib.learn.TaskType.PS: ["fake_ps"] *
FLAGS.num_parameter_servers
},
"task": {
"index": FLAGS.worker_index
}
})
config = run_config.RunConfig(master=FLAGS.master_grpc_url)
estimator = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=FLAGS.model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[5],
config=config)
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=census_data_source.input_train_fn,
eval_input_fn=census_data_source.input_test_fn,
train_steps=FLAGS.train_steps,
eval_steps=FLAGS.eval_steps
)
def main(unused_argv):
print("Worker index: %d" % FLAGS.worker_index)
learn_runner.run(experiment_fn=_create_experiment_fn,
output_dir=FLAGS.output_dir,
schedule=FLAGS.schedule)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/census-data",
help="Directory for storing the cesnsus data"
)
parser.add_argument(
"--model_dir",
type=str,
default="/tmp/census_wide_and_deep_model",
help="Directory for storing the model"
)
parser.add_argument(
"--output_dir",
type=str,
default="",
help="Base output directory."
)
parser.add_argument(
"--schedule",
type=str,
default="local_run",
help="Schedule to run for this experiment."
)
parser.add_argument(
"--master_grpc_url",
type=str,
default="",
help="URL to master GRPC tensorflow server, e.g.,grpc://127.0.0.1:2222"
)
parser.add_argument(
"--num_parameter_servers",
type=int,
default=0,
help="Number of parameter servers"
)
parser.add_argument(
"--worker_index",
type=int,
default=0,
help="Worker index (>=0)"
)
parser.add_argument(
"--train_steps",
type=int,
default=1000,
help="Number of training steps"
)
parser.add_argument(
"--eval_steps",
type=int,
default=1,
help="Number of evaluation steps"
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
clemkoa/scikit-learn | sklearn/cluster/affinity_propagation_.py | 15 | 13973 | """Affinity Propagation clustering algorithm."""
# Author: Alexandre Gramfort [email protected]
# Gael Varoquaux [email protected]
# License: BSD 3 clause
import numpy as np
import warnings
from sklearn.exceptions import ConvergenceWarning
from ..base import BaseEstimator, ClusterMixin
from ..utils import as_float_array, check_array
from ..utils.validation import check_is_fitted
from ..metrics import euclidean_distances
from ..metrics import pairwise_distances_argmin
def _equal_similarities_and_preferences(S, preference):
def all_equal_preferences():
return np.all(preference == preference.flat[0])
def all_equal_similarities():
# Create mask to ignore diagonal of S
mask = np.ones(S.shape, dtype=bool)
np.fill_diagonal(mask, 0)
return np.all(S[mask].flat == S[mask].flat[0])
return all_equal_preferences() and all_equal_similarities()
def affinity_propagation(S, preference=None, convergence_iter=15, max_iter=200,
damping=0.5, copy=True, verbose=False,
return_n_iter=False):
"""Perform Affinity Propagation Clustering of data
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
S : array-like, shape (n_samples, n_samples)
Matrix of similarities between points
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number of
exemplars, i.e. of clusters, is influenced by the input preferences
value. If the preferences are not passed as arguments, they will be
set to the median of the input similarities (resulting in a moderate
number of clusters). For a smaller amount of clusters, this can be set
to the minimum value of the similarities.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, optional, default: 200
Maximum number of iterations
damping : float, optional, default: 0.5
Damping factor between 0.5 and 1.
copy : boolean, optional, default: True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency
verbose : boolean, optional, default: False
The verbosity level
return_n_iter : bool, default False
Whether or not to return the number of iterations.
Returns
-------
cluster_centers_indices : array, shape (n_clusters,)
index of clusters centers
labels : array, shape (n_samples,)
cluster labels for each point
n_iter : int
number of iterations run. Returned only if `return_n_iter` is
set to True.
Notes
-----
For an example, see :ref:`examples/cluster/plot_affinity_propagation.py
<sphx_glr_auto_examples_cluster_plot_affinity_propagation.py>`.
When the algorithm does not converge, it returns an empty array as
``cluster_center_indices`` and ``-1`` as label for each training sample.
When all training samples have equal similarities and equal preferences,
the assignment of cluster centers and labels depends on the preference.
If the preference is smaller than the similarities, a single cluster center
and label ``0`` for every sample will be returned. Otherwise, every
training sample becomes its own cluster center and is assigned a unique
label.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
S = as_float_array(S, copy=copy)
n_samples = S.shape[0]
if S.shape[0] != S.shape[1]:
raise ValueError("S must be a square array (shape=%s)" % repr(S.shape))
if preference is None:
preference = np.median(S)
if damping < 0.5 or damping >= 1:
raise ValueError('damping must be >= 0.5 and < 1')
preference = np.array(preference)
if (n_samples == 1 or
_equal_similarities_and_preferences(S, preference)):
# It makes no sense to run the algorithm in this case, so return 1 or
# n_samples clusters, depending on preferences
warnings.warn("All samples have mutually equal similarities. "
"Returning arbitrary cluster center(s).")
if preference.flat[0] >= S.flat[n_samples - 1]:
return ((np.arange(n_samples), np.arange(n_samples), 0)
if return_n_iter
else (np.arange(n_samples), np.arange(n_samples)))
else:
return ((np.array([0]), np.array([0] * n_samples), 0)
if return_n_iter
else (np.array([0]), np.array([0] * n_samples)))
random_state = np.random.RandomState(0)
# Place preference on the diagonal of S
S.flat[::(n_samples + 1)] = preference
A = np.zeros((n_samples, n_samples))
R = np.zeros((n_samples, n_samples)) # Initialize messages
# Intermediate results
tmp = np.zeros((n_samples, n_samples))
# Remove degeneracies
S += ((np.finfo(np.double).eps * S + np.finfo(np.double).tiny * 100) *
random_state.randn(n_samples, n_samples))
# Execute parallel affinity propagation updates
e = np.zeros((n_samples, convergence_iter))
ind = np.arange(n_samples)
for it in range(max_iter):
# tmp = A + S; compute responsibilities
np.add(A, S, tmp)
I = np.argmax(tmp, axis=1)
Y = tmp[ind, I] # np.max(A + S, axis=1)
tmp[ind, I] = -np.inf
Y2 = np.max(tmp, axis=1)
# tmp = Rnew
np.subtract(S, Y[:, None], tmp)
tmp[ind, I] = S[ind, I] - Y2
# Damping
tmp *= 1 - damping
R *= damping
R += tmp
# tmp = Rp; compute availabilities
np.maximum(R, 0, tmp)
tmp.flat[::n_samples + 1] = R.flat[::n_samples + 1]
# tmp = -Anew
tmp -= np.sum(tmp, axis=0)
dA = np.diag(tmp).copy()
tmp.clip(0, np.inf, tmp)
tmp.flat[::n_samples + 1] = dA
# Damping
tmp *= 1 - damping
A *= damping
A -= tmp
# Check for convergence
E = (np.diag(A) + np.diag(R)) > 0
e[:, it % convergence_iter] = E
K = np.sum(E, axis=0)
if it >= convergence_iter:
se = np.sum(e, axis=1)
unconverged = (np.sum((se == convergence_iter) + (se == 0))
!= n_samples)
if (not unconverged and (K > 0)) or (it == max_iter):
if verbose:
print("Converged after %d iterations." % it)
break
else:
if verbose:
print("Did not converge")
I = np.flatnonzero(E)
K = I.size # Identify exemplars
if K > 0:
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K) # Identify clusters
# Refine the final set of exemplars and clusters and return results
for k in range(K):
ii = np.where(c == k)[0]
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
I[k] = ii[j]
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
labels = I[c]
# Reduce labels to a sorted, gapless, list
cluster_centers_indices = np.unique(labels)
labels = np.searchsorted(cluster_centers_indices, labels)
else:
warnings.warn("Affinity propagation did not converge, this model "
"will not have any cluster centers.", ConvergenceWarning)
labels = np.array([-1] * n_samples)
cluster_centers_indices = []
if return_n_iter:
return cluster_centers_indices, labels, it + 1
else:
return cluster_centers_indices, labels
###############################################################################
class AffinityPropagation(BaseEstimator, ClusterMixin):
"""Perform Affinity Propagation Clustering of data.
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
damping : float, optional, default: 0.5
Damping factor (between 0.5 and 1) is the extent to
which the current value is maintained relative to
incoming values (weighted 1 - damping). This in order
to avoid numerical oscillations when updating these
values (messages).
max_iter : int, optional, default: 200
Maximum number of iterations.
convergence_iter : int, optional, default: 15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
copy : boolean, optional, default: True
Make a copy of input data.
preference : array-like, shape (n_samples,) or float, optional
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number
of exemplars, ie of clusters, is influenced by the input
preferences value. If the preferences are not passed as arguments,
they will be set to the median of the input similarities.
affinity : string, optional, default=``euclidean``
Which affinity to use. At the moment ``precomputed`` and
``euclidean`` are supported. ``euclidean`` uses the
negative squared euclidean distance between points.
verbose : boolean, optional, default: False
Whether to be verbose.
Attributes
----------
cluster_centers_indices_ : array, shape (n_clusters,)
Indices of cluster centers
cluster_centers_ : array, shape (n_clusters, n_features)
Cluster centers (if affinity != ``precomputed``).
labels_ : array, shape (n_samples,)
Labels of each point
affinity_matrix_ : array, shape (n_samples, n_samples)
Stores the affinity matrix used in ``fit``.
n_iter_ : int
Number of iterations taken to converge.
Notes
-----
For an example, see :ref:`examples/cluster/plot_affinity_propagation.py
<sphx_glr_auto_examples_cluster_plot_affinity_propagation.py>`.
The algorithmic complexity of affinity propagation is quadratic
in the number of points.
When ``fit`` does not converge, ``cluster_centers_`` becomes an empty
array and all training samples will be labelled as ``-1``. In addition,
``predict`` will then label every sample as ``-1``.
When all training samples have equal similarities and equal preferences,
the assignment of cluster centers and labels depends on the preference.
If the preference is smaller than the similarities, ``fit`` will result in
a single cluster center and label ``0`` for every sample. Otherwise, every
training sample becomes its own cluster center and is assigned a unique
label.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
def __init__(self, damping=.5, max_iter=200, convergence_iter=15,
copy=True, preference=None, affinity='euclidean',
verbose=False):
self.damping = damping
self.max_iter = max_iter
self.convergence_iter = convergence_iter
self.copy = copy
self.verbose = verbose
self.preference = preference
self.affinity = affinity
@property
def _pairwise(self):
return self.affinity == "precomputed"
def fit(self, X, y=None):
""" Create affinity matrix from negative euclidean distances, then
apply affinity propagation clustering.
Parameters
----------
X : array-like, shape (n_samples, n_features) or (n_samples, n_samples)
Data matrix or, if affinity is ``precomputed``, matrix of
similarities / affinities.
y : Ignored
"""
X = check_array(X, accept_sparse='csr')
if self.affinity == "precomputed":
self.affinity_matrix_ = X
elif self.affinity == "euclidean":
self.affinity_matrix_ = -euclidean_distances(X, squared=True)
else:
raise ValueError("Affinity must be 'precomputed' or "
"'euclidean'. Got %s instead"
% str(self.affinity))
self.cluster_centers_indices_, self.labels_, self.n_iter_ = \
affinity_propagation(
self.affinity_matrix_, self.preference, max_iter=self.max_iter,
convergence_iter=self.convergence_iter, damping=self.damping,
copy=self.copy, verbose=self.verbose, return_n_iter=True)
if self.affinity != "precomputed":
self.cluster_centers_ = X[self.cluster_centers_indices_].copy()
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : array, shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_indices_")
if not hasattr(self, "cluster_centers_"):
raise ValueError("Predict method is not supported when "
"affinity='precomputed'.")
if self.cluster_centers_.size > 0:
return pairwise_distances_argmin(X, self.cluster_centers_)
else:
warnings.warn("This model does not have any cluster centers "
"because affinity propagation did not converge. "
"Labeling every sample as '-1'.")
return np.array([-1] * X.shape[0])
| bsd-3-clause |
Ziqi-Li/bknqgis | pandas/pandas/tests/test_nanops.py | 2 | 42408 | # -*- coding: utf-8 -*-
from __future__ import division, print_function
from functools import partial
import pytest
import warnings
import numpy as np
import pandas as pd
from pandas import Series, isna, _np_version_under1p9
from pandas.core.dtypes.common import is_integer_dtype
import pandas.core.nanops as nanops
import pandas.util.testing as tm
use_bn = nanops._USE_BOTTLENECK
class TestnanopsDataFrame(object):
def setup_method(self, method):
np.random.seed(11235)
nanops._USE_BOTTLENECK = False
self.arr_shape = (11, 7, 5)
self.arr_float = np.random.randn(*self.arr_shape)
self.arr_float1 = np.random.randn(*self.arr_shape)
self.arr_complex = self.arr_float + self.arr_float1 * 1j
self.arr_int = np.random.randint(-10, 10, self.arr_shape)
self.arr_bool = np.random.randint(0, 2, self.arr_shape) == 0
self.arr_str = np.abs(self.arr_float).astype('S')
self.arr_utf = np.abs(self.arr_float).astype('U')
self.arr_date = np.random.randint(0, 20000,
self.arr_shape).astype('M8[ns]')
self.arr_tdelta = np.random.randint(0, 20000,
self.arr_shape).astype('m8[ns]')
self.arr_nan = np.tile(np.nan, self.arr_shape)
self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan])
self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan])
self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1])
self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan])
self.arr_inf = self.arr_float * np.inf
self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf])
self.arr_float1_inf = np.vstack([self.arr_float1, self.arr_inf])
self.arr_inf_float1 = np.vstack([self.arr_inf, self.arr_float1])
self.arr_inf_inf = np.vstack([self.arr_inf, self.arr_inf])
self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf])
self.arr_float_nan_inf = np.vstack([self.arr_float, self.arr_nan,
self.arr_inf])
self.arr_nan_float1_inf = np.vstack([self.arr_float, self.arr_inf,
self.arr_nan])
self.arr_nan_nan_inf = np.vstack([self.arr_nan, self.arr_nan,
self.arr_inf])
self.arr_obj = np.vstack([self.arr_float.astype(
'O'), self.arr_int.astype('O'), self.arr_bool.astype(
'O'), self.arr_complex.astype('O'), self.arr_str.astype(
'O'), self.arr_utf.astype('O'), self.arr_date.astype('O'),
self.arr_tdelta.astype('O')])
with np.errstate(invalid='ignore'):
self.arr_nan_nanj = self.arr_nan + self.arr_nan * 1j
self.arr_complex_nan = np.vstack([self.arr_complex,
self.arr_nan_nanj])
self.arr_nan_infj = self.arr_inf * 1j
self.arr_complex_nan_infj = np.vstack([self.arr_complex,
self.arr_nan_infj])
self.arr_float_2d = self.arr_float[:, :, 0]
self.arr_float1_2d = self.arr_float1[:, :, 0]
self.arr_complex_2d = self.arr_complex[:, :, 0]
self.arr_int_2d = self.arr_int[:, :, 0]
self.arr_bool_2d = self.arr_bool[:, :, 0]
self.arr_str_2d = self.arr_str[:, :, 0]
self.arr_utf_2d = self.arr_utf[:, :, 0]
self.arr_date_2d = self.arr_date[:, :, 0]
self.arr_tdelta_2d = self.arr_tdelta[:, :, 0]
self.arr_nan_2d = self.arr_nan[:, :, 0]
self.arr_float_nan_2d = self.arr_float_nan[:, :, 0]
self.arr_float1_nan_2d = self.arr_float1_nan[:, :, 0]
self.arr_nan_float1_2d = self.arr_nan_float1[:, :, 0]
self.arr_nan_nan_2d = self.arr_nan_nan[:, :, 0]
self.arr_nan_nanj_2d = self.arr_nan_nanj[:, :, 0]
self.arr_complex_nan_2d = self.arr_complex_nan[:, :, 0]
self.arr_inf_2d = self.arr_inf[:, :, 0]
self.arr_float_inf_2d = self.arr_float_inf[:, :, 0]
self.arr_nan_inf_2d = self.arr_nan_inf[:, :, 0]
self.arr_float_nan_inf_2d = self.arr_float_nan_inf[:, :, 0]
self.arr_nan_nan_inf_2d = self.arr_nan_nan_inf[:, :, 0]
self.arr_float_1d = self.arr_float[:, 0, 0]
self.arr_float1_1d = self.arr_float1[:, 0, 0]
self.arr_complex_1d = self.arr_complex[:, 0, 0]
self.arr_int_1d = self.arr_int[:, 0, 0]
self.arr_bool_1d = self.arr_bool[:, 0, 0]
self.arr_str_1d = self.arr_str[:, 0, 0]
self.arr_utf_1d = self.arr_utf[:, 0, 0]
self.arr_date_1d = self.arr_date[:, 0, 0]
self.arr_tdelta_1d = self.arr_tdelta[:, 0, 0]
self.arr_nan_1d = self.arr_nan[:, 0, 0]
self.arr_float_nan_1d = self.arr_float_nan[:, 0, 0]
self.arr_float1_nan_1d = self.arr_float1_nan[:, 0, 0]
self.arr_nan_float1_1d = self.arr_nan_float1[:, 0, 0]
self.arr_nan_nan_1d = self.arr_nan_nan[:, 0, 0]
self.arr_nan_nanj_1d = self.arr_nan_nanj[:, 0, 0]
self.arr_complex_nan_1d = self.arr_complex_nan[:, 0, 0]
self.arr_inf_1d = self.arr_inf.ravel()
self.arr_float_inf_1d = self.arr_float_inf[:, 0, 0]
self.arr_nan_inf_1d = self.arr_nan_inf[:, 0, 0]
self.arr_float_nan_inf_1d = self.arr_float_nan_inf[:, 0, 0]
self.arr_nan_nan_inf_1d = self.arr_nan_nan_inf[:, 0, 0]
def teardown_method(self, method):
nanops._USE_BOTTLENECK = use_bn
def check_results(self, targ, res, axis, check_dtype=True):
res = getattr(res, 'asm8', res)
res = getattr(res, 'values', res)
# timedeltas are a beast here
def _coerce_tds(targ, res):
if hasattr(targ, 'dtype') and targ.dtype == 'm8[ns]':
if len(targ) == 1:
targ = targ[0].item()
res = res.item()
else:
targ = targ.view('i8')
return targ, res
try:
if axis != 0 and hasattr(
targ, 'shape') and targ.ndim and targ.shape != res.shape:
res = np.split(res, [targ.shape[0]], axis=0)[0]
except:
targ, res = _coerce_tds(targ, res)
try:
tm.assert_almost_equal(targ, res, check_dtype=check_dtype)
except:
# handle timedelta dtypes
if hasattr(targ, 'dtype') and targ.dtype == 'm8[ns]':
targ, res = _coerce_tds(targ, res)
tm.assert_almost_equal(targ, res, check_dtype=check_dtype)
return
# There are sometimes rounding errors with
# complex and object dtypes.
# If it isn't one of those, re-raise the error.
if not hasattr(res, 'dtype') or res.dtype.kind not in ['c', 'O']:
raise
# convert object dtypes to something that can be split into
# real and imaginary parts
if res.dtype.kind == 'O':
if targ.dtype.kind != 'O':
res = res.astype(targ.dtype)
else:
try:
res = res.astype('c16')
except:
res = res.astype('f8')
try:
targ = targ.astype('c16')
except:
targ = targ.astype('f8')
# there should never be a case where numpy returns an object
# but nanops doesn't, so make that an exception
elif targ.dtype.kind == 'O':
raise
tm.assert_almost_equal(targ.real, res.real,
check_dtype=check_dtype)
tm.assert_almost_equal(targ.imag, res.imag,
check_dtype=check_dtype)
def check_fun_data(self, testfunc, targfunc, testarval, targarval,
targarnanval, check_dtype=True, **kwargs):
for axis in list(range(targarval.ndim)) + [None]:
for skipna in [False, True]:
targartempval = targarval if skipna else targarnanval
try:
targ = targfunc(targartempval, axis=axis, **kwargs)
res = testfunc(testarval, axis=axis, skipna=skipna,
**kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
if skipna:
res = testfunc(testarval, axis=axis, **kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
if axis is None:
res = testfunc(testarval, skipna=skipna, **kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
if skipna and axis is None:
res = testfunc(testarval, **kwargs)
self.check_results(targ, res, axis,
check_dtype=check_dtype)
except BaseException as exc:
exc.args += ('axis: %s of %s' % (axis, testarval.ndim - 1),
'skipna: %s' % skipna, 'kwargs: %s' % kwargs)
raise
if testarval.ndim <= 1:
return
try:
testarval2 = np.take(testarval, 0, axis=-1)
targarval2 = np.take(targarval, 0, axis=-1)
targarnanval2 = np.take(targarnanval, 0, axis=-1)
except ValueError:
return
self.check_fun_data(testfunc, targfunc, testarval2, targarval2,
targarnanval2, check_dtype=check_dtype, **kwargs)
def check_fun(self, testfunc, targfunc, testar, targar=None,
targarnan=None, **kwargs):
if targar is None:
targar = testar
if targarnan is None:
targarnan = testar
testarval = getattr(self, testar)
targarval = getattr(self, targar)
targarnanval = getattr(self, targarnan)
try:
self.check_fun_data(testfunc, targfunc, testarval, targarval,
targarnanval, **kwargs)
except BaseException as exc:
exc.args += ('testar: %s' % testar, 'targar: %s' % targar,
'targarnan: %s' % targarnan)
raise
def check_funs(self, testfunc, targfunc, allow_complex=True,
allow_all_nan=True, allow_str=True, allow_date=True,
allow_tdelta=True, allow_obj=True, **kwargs):
self.check_fun(testfunc, targfunc, 'arr_float', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_float_nan', 'arr_float',
**kwargs)
self.check_fun(testfunc, targfunc, 'arr_int', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_bool', **kwargs)
objs = [self.arr_float.astype('O'), self.arr_int.astype('O'),
self.arr_bool.astype('O')]
if allow_all_nan:
self.check_fun(testfunc, targfunc, 'arr_nan', **kwargs)
if allow_complex:
self.check_fun(testfunc, targfunc, 'arr_complex', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_complex_nan',
'arr_complex', **kwargs)
if allow_all_nan:
self.check_fun(testfunc, targfunc, 'arr_nan_nanj', **kwargs)
objs += [self.arr_complex.astype('O')]
if allow_str:
self.check_fun(testfunc, targfunc, 'arr_str', **kwargs)
self.check_fun(testfunc, targfunc, 'arr_utf', **kwargs)
objs += [self.arr_str.astype('O'), self.arr_utf.astype('O')]
if allow_date:
try:
targfunc(self.arr_date)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, 'arr_date', **kwargs)
objs += [self.arr_date.astype('O')]
if allow_tdelta:
try:
targfunc(self.arr_tdelta)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, 'arr_tdelta', **kwargs)
objs += [self.arr_tdelta.astype('O')]
if allow_obj:
self.arr_obj = np.vstack(objs)
# some nanops handle object dtypes better than their numpy
# counterparts, so the numpy functions need to be given something
# else
if allow_obj == 'convert':
targfunc = partial(self._badobj_wrap, func=targfunc,
allow_complex=allow_complex)
self.check_fun(testfunc, targfunc, 'arr_obj', **kwargs)
def check_funs_ddof(self,
testfunc,
targfunc,
allow_complex=True,
allow_all_nan=True,
allow_str=True,
allow_date=False,
allow_tdelta=False,
allow_obj=True, ):
for ddof in range(3):
try:
self.check_funs(testfunc, targfunc, allow_complex,
allow_all_nan, allow_str, allow_date,
allow_tdelta, allow_obj, ddof=ddof)
except BaseException as exc:
exc.args += ('ddof %s' % ddof, )
raise
def _badobj_wrap(self, value, func, allow_complex=True, **kwargs):
if value.dtype.kind == 'O':
if allow_complex:
value = value.astype('c16')
else:
value = value.astype('f8')
return func(value, **kwargs)
def test_nanany(self):
self.check_funs(nanops.nanany, np.any, allow_all_nan=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nanall(self):
self.check_funs(nanops.nanall, np.all, allow_all_nan=False,
allow_str=False, allow_date=False, allow_tdelta=False)
def test_nansum(self):
self.check_funs(nanops.nansum, np.sum, allow_str=False,
allow_date=False, allow_tdelta=True, check_dtype=False)
def test_nanmean(self):
self.check_funs(nanops.nanmean, np.mean, allow_complex=False,
allow_obj=False, allow_str=False, allow_date=False,
allow_tdelta=True)
def test_nanmean_overflow(self):
# GH 10155
# In the previous implementation mean can overflow for int dtypes, it
# is now consistent with numpy
# numpy < 1.9.0 is not computing this correctly
if not _np_version_under1p9:
for a in [2 ** 55, -2 ** 55, 20150515061816532]:
s = Series(a, index=range(500), dtype=np.int64)
result = s.mean()
np_result = s.values.mean()
assert result == a
assert result == np_result
assert result.dtype == np.float64
def test_returned_dtype(self):
dtypes = [np.int16, np.int32, np.int64, np.float32, np.float64]
if hasattr(np, 'float128'):
dtypes.append(np.float128)
for dtype in dtypes:
s = Series(range(10), dtype=dtype)
group_a = ['mean', 'std', 'var', 'skew', 'kurt']
group_b = ['min', 'max']
for method in group_a + group_b:
result = getattr(s, method)()
if is_integer_dtype(dtype) and method in group_a:
assert result.dtype == np.float64
else:
assert result.dtype == dtype
def test_nanmedian(self):
with warnings.catch_warnings(record=True):
self.check_funs(nanops.nanmedian, np.median, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
def test_nanvar(self):
self.check_funs_ddof(nanops.nanvar, np.var, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
def test_nanstd(self):
self.check_funs_ddof(nanops.nanstd, np.std, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=True, allow_obj='convert')
def test_nansem(self):
tm.skip_if_no_package('scipy', min_version='0.17.0')
from scipy.stats import sem
with np.errstate(invalid='ignore'):
self.check_funs_ddof(nanops.nansem, sem, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=False, allow_obj='convert')
def _minmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
if res.dtype.kind == 'm':
res = np.atleast_1d(res)
return res
def test_nanmin(self):
func = partial(self._minmax_wrap, func=np.min)
self.check_funs(nanops.nanmin, func, allow_str=False, allow_obj=False)
def test_nanmax(self):
func = partial(self._minmax_wrap, func=np.max)
self.check_funs(nanops.nanmax, func, allow_str=False, allow_obj=False)
def _argminmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
nans = np.min(value, axis)
nullnan = isna(nans)
if res.ndim:
res[nullnan] = -1
elif (hasattr(nullnan, 'all') and nullnan.all() or
not hasattr(nullnan, 'all') and nullnan):
res = -1
return res
def test_nanargmax(self):
func = partial(self._argminmax_wrap, func=np.argmax)
self.check_funs(nanops.nanargmax, func, allow_str=False,
allow_obj=False, allow_date=True, allow_tdelta=True)
def test_nanargmin(self):
func = partial(self._argminmax_wrap, func=np.argmin)
if tm.sys.version_info[0:2] == (2, 6):
self.check_funs(nanops.nanargmin, func, allow_date=True,
allow_tdelta=True, allow_str=False,
allow_obj=False)
else:
self.check_funs(nanops.nanargmin, func, allow_str=False,
allow_obj=False)
def _skew_kurt_wrap(self, values, axis=None, func=None):
if not isinstance(values.dtype.type, np.floating):
values = values.astype('f8')
result = func(values, axis=axis, bias=False)
# fix for handling cases where all elements in an axis are the same
if isinstance(result, np.ndarray):
result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0
return result
elif np.max(values) == np.min(values):
return 0.
return result
def test_nanskew(self):
tm.skip_if_no_package('scipy', min_version='0.17.0')
from scipy.stats import skew
func = partial(self._skew_kurt_wrap, func=skew)
with np.errstate(invalid='ignore'):
self.check_funs(nanops.nanskew, func, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=False)
def test_nankurt(self):
tm.skip_if_no_package('scipy', min_version='0.17.0')
from scipy.stats import kurtosis
func1 = partial(kurtosis, fisher=True)
func = partial(self._skew_kurt_wrap, func=func1)
with np.errstate(invalid='ignore'):
self.check_funs(nanops.nankurt, func, allow_complex=False,
allow_str=False, allow_date=False,
allow_tdelta=False)
def test_nanprod(self):
self.check_funs(nanops.nanprod, np.prod, allow_str=False,
allow_date=False, allow_tdelta=False)
def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs)
res01 = checkfun(self.arr_float_2d, self.arr_float1_2d,
min_periods=len(self.arr_float_2d) - 1, **kwargs)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d,
**kwargs)
res11 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d,
min_periods=len(self.arr_float_2d) - 1, **kwargs)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d, **kwargs)
res21 = checkfun(self.arr_float_2d, self.arr_nan_2d, **kwargs)
res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d, **kwargs)
res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d,
**kwargs)
res24 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d,
min_periods=len(self.arr_float_2d) - 1, **kwargs)
res25 = checkfun(self.arr_float_2d, self.arr_float1_2d,
min_periods=len(self.arr_float_2d) + 1, **kwargs)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_1d, self.arr_float1_1d, **kwargs)
res01 = checkfun(self.arr_float_1d, self.arr_float1_1d,
min_periods=len(self.arr_float_1d) - 1, **kwargs)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d,
**kwargs)
res11 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d,
min_periods=len(self.arr_float_1d) - 1, **kwargs)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d, **kwargs)
res21 = checkfun(self.arr_float_1d, self.arr_nan_1d, **kwargs)
res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d, **kwargs)
res23 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d,
**kwargs)
res24 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d,
min_periods=len(self.arr_float_1d) - 1, **kwargs)
res25 = checkfun(self.arr_float_1d, self.arr_float1_1d,
min_periods=len(self.arr_float_1d) + 1, **kwargs)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def test_nancorr(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat,
self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1)
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat,
self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='pearson')
def test_nancorr_pearson(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat,
self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='pearson')
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat,
self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='pearson')
def test_nancorr_kendall(self):
tm.skip_if_no_package('scipy.stats')
from scipy.stats import kendalltau
targ0 = kendalltau(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='kendall')
targ0 = kendalltau(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='kendall')
def test_nancorr_spearman(self):
tm.skip_if_no_package('scipy.stats')
from scipy.stats import spearmanr
targ0 = spearmanr(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1,
method='spearman')
targ0 = spearmanr(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1,
method='spearman')
def test_nancov(self):
targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1)
targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1)
def check_nancomp(self, checkfun, targ0):
arr_float = self.arr_float
arr_float1 = self.arr_float1
arr_nan = self.arr_nan
arr_nan_nan = self.arr_nan_nan
arr_float_nan = self.arr_float_nan
arr_float1_nan = self.arr_float1_nan
arr_nan_float1 = self.arr_nan_float1
while targ0.ndim:
try:
res0 = checkfun(arr_float, arr_float1)
tm.assert_almost_equal(targ0, res0)
if targ0.ndim > 1:
targ1 = np.vstack([targ0, arr_nan])
else:
targ1 = np.hstack([targ0, arr_nan])
res1 = checkfun(arr_float_nan, arr_float1_nan)
tm.assert_numpy_array_equal(targ1, res1, check_dtype=False)
targ2 = arr_nan_nan
res2 = checkfun(arr_float_nan, arr_nan_float1)
tm.assert_numpy_array_equal(targ2, res2, check_dtype=False)
except Exception as exc:
exc.args += ('ndim: %s' % arr_float.ndim, )
raise
try:
arr_float = np.take(arr_float, 0, axis=-1)
arr_float1 = np.take(arr_float1, 0, axis=-1)
arr_nan = np.take(arr_nan, 0, axis=-1)
arr_nan_nan = np.take(arr_nan_nan, 0, axis=-1)
arr_float_nan = np.take(arr_float_nan, 0, axis=-1)
arr_float1_nan = np.take(arr_float1_nan, 0, axis=-1)
arr_nan_float1 = np.take(arr_nan_float1, 0, axis=-1)
targ0 = np.take(targ0, 0, axis=-1)
except ValueError:
break
def test_nangt(self):
targ0 = self.arr_float > self.arr_float1
self.check_nancomp(nanops.nangt, targ0)
def test_nange(self):
targ0 = self.arr_float >= self.arr_float1
self.check_nancomp(nanops.nange, targ0)
def test_nanlt(self):
targ0 = self.arr_float < self.arr_float1
self.check_nancomp(nanops.nanlt, targ0)
def test_nanle(self):
targ0 = self.arr_float <= self.arr_float1
self.check_nancomp(nanops.nanle, targ0)
def test_naneq(self):
targ0 = self.arr_float == self.arr_float1
self.check_nancomp(nanops.naneq, targ0)
def test_nanne(self):
targ0 = self.arr_float != self.arr_float1
self.check_nancomp(nanops.nanne, targ0)
def check_bool(self, func, value, correct, *args, **kwargs):
while getattr(value, 'ndim', True):
try:
res0 = func(value, *args, **kwargs)
if correct:
assert res0
else:
assert not res0
except BaseException as exc:
exc.args += ('dim: %s' % getattr(value, 'ndim', value), )
raise
if not hasattr(value, 'ndim'):
break
try:
value = np.take(value, 0, axis=-1)
except ValueError:
break
def test__has_infs(self):
pairs = [('arr_complex', False), ('arr_int', False),
('arr_bool', False), ('arr_str', False), ('arr_utf', False),
('arr_complex', False), ('arr_complex_nan', False),
('arr_nan_nanj', False), ('arr_nan_infj', True),
('arr_complex_nan_infj', True)]
pairs_float = [('arr_float', False), ('arr_nan', False),
('arr_float_nan', False), ('arr_nan_nan', False),
('arr_float_inf', True), ('arr_inf', True),
('arr_nan_inf', True), ('arr_float_nan_inf', True),
('arr_nan_nan_inf', True)]
for arr, correct in pairs:
val = getattr(self, arr)
try:
self.check_bool(nanops._has_infs, val, correct)
except BaseException as exc:
exc.args += (arr, )
raise
for arr, correct in pairs_float:
val = getattr(self, arr)
try:
self.check_bool(nanops._has_infs, val, correct)
self.check_bool(nanops._has_infs, val.astype('f4'), correct)
self.check_bool(nanops._has_infs, val.astype('f2'), correct)
except BaseException as exc:
exc.args += (arr, )
raise
def test__isfinite(self):
pairs = [('arr_complex', False), ('arr_int', False),
('arr_bool', False), ('arr_str', False), ('arr_utf', False),
('arr_complex', False), ('arr_complex_nan', True),
('arr_nan_nanj', True), ('arr_nan_infj', True),
('arr_complex_nan_infj', True)]
pairs_float = [('arr_float', False), ('arr_nan', True),
('arr_float_nan', True), ('arr_nan_nan', True),
('arr_float_inf', True), ('arr_inf', True),
('arr_nan_inf', True), ('arr_float_nan_inf', True),
('arr_nan_nan_inf', True)]
func1 = lambda x: np.any(nanops._isfinite(x).ravel())
# TODO: unused?
# func2 = lambda x: np.any(nanops._isfinite(x).values.ravel())
for arr, correct in pairs:
val = getattr(self, arr)
try:
self.check_bool(func1, val, correct)
except BaseException as exc:
exc.args += (arr, )
raise
for arr, correct in pairs_float:
val = getattr(self, arr)
try:
self.check_bool(func1, val, correct)
self.check_bool(func1, val.astype('f4'), correct)
self.check_bool(func1, val.astype('f2'), correct)
except BaseException as exc:
exc.args += (arr, )
raise
def test__bn_ok_dtype(self):
assert nanops._bn_ok_dtype(self.arr_float.dtype, 'test')
assert nanops._bn_ok_dtype(self.arr_complex.dtype, 'test')
assert nanops._bn_ok_dtype(self.arr_int.dtype, 'test')
assert nanops._bn_ok_dtype(self.arr_bool.dtype, 'test')
assert nanops._bn_ok_dtype(self.arr_str.dtype, 'test')
assert nanops._bn_ok_dtype(self.arr_utf.dtype, 'test')
assert not nanops._bn_ok_dtype(self.arr_date.dtype, 'test')
assert not nanops._bn_ok_dtype(self.arr_tdelta.dtype, 'test')
assert not nanops._bn_ok_dtype(self.arr_obj.dtype, 'test')
class TestEnsureNumeric(object):
def test_numeric_values(self):
# Test integer
assert nanops._ensure_numeric(1) == 1
# Test float
assert nanops._ensure_numeric(1.1) == 1.1
# Test complex
assert nanops._ensure_numeric(1 + 2j) == 1 + 2j
def test_ndarray(self):
# Test numeric ndarray
values = np.array([1, 2, 3])
assert np.allclose(nanops._ensure_numeric(values), values)
# Test object ndarray
o_values = values.astype(object)
assert np.allclose(nanops._ensure_numeric(o_values), values)
# Test convertible string ndarray
s_values = np.array(['1', '2', '3'], dtype=object)
assert np.allclose(nanops._ensure_numeric(s_values), values)
# Test non-convertible string ndarray
s_values = np.array(['foo', 'bar', 'baz'], dtype=object)
pytest.raises(ValueError, lambda: nanops._ensure_numeric(s_values))
def test_convertable_values(self):
assert np.allclose(nanops._ensure_numeric('1'), 1.0)
assert np.allclose(nanops._ensure_numeric('1.1'), 1.1)
assert np.allclose(nanops._ensure_numeric('1+1j'), 1 + 1j)
def test_non_convertable_values(self):
pytest.raises(TypeError, lambda: nanops._ensure_numeric('foo'))
pytest.raises(TypeError, lambda: nanops._ensure_numeric({}))
pytest.raises(TypeError, lambda: nanops._ensure_numeric([]))
class TestNanvarFixedValues(object):
# xref GH10242
def setup_method(self, method):
# Samples from a normal distribution.
self.variance = variance = 3.0
self.samples = self.prng.normal(scale=variance ** 0.5, size=100000)
def test_nanvar_all_finite(self):
samples = self.samples
actual_variance = nanops.nanvar(samples)
tm.assert_almost_equal(actual_variance, self.variance,
check_less_precise=2)
def test_nanvar_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_variance = nanops.nanvar(samples, skipna=True)
tm.assert_almost_equal(actual_variance, self.variance,
check_less_precise=2)
actual_variance = nanops.nanvar(samples, skipna=False)
tm.assert_almost_equal(actual_variance, np.nan, check_less_precise=2)
def test_nanstd_nans(self):
samples = np.nan * np.ones(2 * self.samples.shape[0])
samples[::2] = self.samples
actual_std = nanops.nanstd(samples, skipna=True)
tm.assert_almost_equal(actual_std, self.variance ** 0.5,
check_less_precise=2)
actual_std = nanops.nanvar(samples, skipna=False)
tm.assert_almost_equal(actual_std, np.nan,
check_less_precise=2)
def test_nanvar_axis(self):
# Generate some sample data.
samples_norm = self.samples
samples_unif = self.prng.uniform(size=samples_norm.shape[0])
samples = np.vstack([samples_norm, samples_unif])
actual_variance = nanops.nanvar(samples, axis=1)
tm.assert_almost_equal(actual_variance, np.array(
[self.variance, 1.0 / 12]), check_less_precise=2)
def test_nanvar_ddof(self):
n = 5
samples = self.prng.uniform(size=(10000, n + 1))
samples[:, -1] = np.nan # Force use of our own algorithm.
variance_0 = nanops.nanvar(samples, axis=1, skipna=True, ddof=0).mean()
variance_1 = nanops.nanvar(samples, axis=1, skipna=True, ddof=1).mean()
variance_2 = nanops.nanvar(samples, axis=1, skipna=True, ddof=2).mean()
# The unbiased estimate.
var = 1.0 / 12
tm.assert_almost_equal(variance_1, var,
check_less_precise=2)
# The underestimated variance.
tm.assert_almost_equal(variance_0, (n - 1.0) / n * var,
check_less_precise=2)
# The overestimated variance.
tm.assert_almost_equal(variance_2, (n - 1.0) / (n - 2.0) * var,
check_less_precise=2)
def test_ground_truth(self):
# Test against values that were precomputed with Numpy.
samples = np.empty((4, 4))
samples[:3, :3] = np.array([[0.97303362, 0.21869576, 0.55560287
], [0.72980153, 0.03109364, 0.99155171],
[0.09317602, 0.60078248, 0.15871292]])
samples[3] = samples[:, 3] = np.nan
# Actual variances along axis=0, 1 for ddof=0, 1, 2
variance = np.array([[[0.13762259, 0.05619224, 0.11568816
], [0.20643388, 0.08428837, 0.17353224],
[0.41286776, 0.16857673, 0.34706449]],
[[0.09519783, 0.16435395, 0.05082054
], [0.14279674, 0.24653093, 0.07623082],
[0.28559348, 0.49306186, 0.15246163]]])
# Test nanvar.
for axis in range(2):
for ddof in range(3):
var = nanops.nanvar(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(var[:3], variance[axis, ddof])
assert np.isnan(var[3])
# Test nanstd.
for axis in range(2):
for ddof in range(3):
std = nanops.nanstd(samples, skipna=True, axis=axis, ddof=ddof)
tm.assert_almost_equal(std[:3], variance[axis, ddof] ** 0.5)
assert np.isnan(std[3])
def test_nanstd_roundoff(self):
# Regression test for GH 10242 (test data taken from GH 10489). Ensure
# that variance is stable.
data = Series(766897346 * np.ones(10))
for ddof in range(3):
result = data.std(ddof=ddof)
assert result == 0.0
@property
def prng(self):
return np.random.RandomState(1234)
class TestNanskewFixedValues(object):
# xref GH 11974
def setup_method(self, method):
# Test data + skewness value (computed with scipy.stats.skew)
self.samples = np.sin(np.linspace(0, 1, 200))
self.actual_skew = -0.1875895205961754
def test_constant_series(self):
# xref GH 11974
for val in [3075.2, 3075.3, 3075.5]:
data = val * np.ones(300)
skew = nanops.nanskew(data)
assert skew == 0.0
def test_all_finite(self):
alpha, beta = 0.3, 0.1
left_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nanskew(left_tailed) < 0
alpha, beta = 0.1, 0.3
right_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nanskew(right_tailed) > 0
def test_ground_truth(self):
skew = nanops.nanskew(self.samples)
tm.assert_almost_equal(skew, self.actual_skew)
def test_axis(self):
samples = np.vstack([self.samples,
np.nan * np.ones(len(self.samples))])
skew = nanops.nanskew(samples, axis=1)
tm.assert_almost_equal(skew, np.array([self.actual_skew, np.nan]))
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
skew = nanops.nanskew(samples, skipna=False)
assert np.isnan(skew)
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
skew = nanops.nanskew(samples, skipna=True)
tm.assert_almost_equal(skew, self.actual_skew)
@property
def prng(self):
return np.random.RandomState(1234)
class TestNankurtFixedValues(object):
# xref GH 11974
def setup_method(self, method):
# Test data + kurtosis value (computed with scipy.stats.kurtosis)
self.samples = np.sin(np.linspace(0, 1, 200))
self.actual_kurt = -1.2058303433799713
def test_constant_series(self):
# xref GH 11974
for val in [3075.2, 3075.3, 3075.5]:
data = val * np.ones(300)
kurt = nanops.nankurt(data)
assert kurt == 0.0
def test_all_finite(self):
alpha, beta = 0.3, 0.1
left_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nankurt(left_tailed) < 0
alpha, beta = 0.1, 0.3
right_tailed = self.prng.beta(alpha, beta, size=100)
assert nanops.nankurt(right_tailed) > 0
def test_ground_truth(self):
kurt = nanops.nankurt(self.samples)
tm.assert_almost_equal(kurt, self.actual_kurt)
def test_axis(self):
samples = np.vstack([self.samples,
np.nan * np.ones(len(self.samples))])
kurt = nanops.nankurt(samples, axis=1)
tm.assert_almost_equal(kurt, np.array([self.actual_kurt, np.nan]))
def test_nans(self):
samples = np.hstack([self.samples, np.nan])
kurt = nanops.nankurt(samples, skipna=False)
assert np.isnan(kurt)
def test_nans_skipna(self):
samples = np.hstack([self.samples, np.nan])
kurt = nanops.nankurt(samples, skipna=True)
tm.assert_almost_equal(kurt, self.actual_kurt)
@property
def prng(self):
return np.random.RandomState(1234)
def test_use_bottleneck():
if nanops._BOTTLENECK_INSTALLED:
pd.set_option('use_bottleneck', True)
assert pd.get_option('use_bottleneck')
pd.set_option('use_bottleneck', False)
assert not pd.get_option('use_bottleneck')
pd.set_option('use_bottleneck', use_bn)
| gpl-2.0 |
MattDerry/pendulum_3d | src/pendulum_3d_simulator_corr_inv.py | 1 | 30386 | #!/usr/bin/env python
"""
Matt Derry
Summer 2014
Simulate spherical inverted pendulum and publish results so that
ROS can be visualization.
"""
## define all imports:
import roslib
roslib.load_manifest('pendulum_3d')
import rospy
import tf
from sensor_msgs.msg import JointState as JS
from pendulum_3d.msg import *
import std_srvs.srv as SS
import geometry_msgs.msg as GM
from visualization_msgs.msg import Marker
from sensor_msgs.msg import Joy
import trep
from trep import discopt
from math import pi, fmod, atan2, sin, exp
import numpy as np
import scipy.signal as sp
import matplotlib as mpl
from dynamic_reconfigure.server import Server as DynamicReconfigureServer
from pendulum_3d.cfg import PendulumConfig
import csv
import sys
import time
#####################
# Experiment Settings
#####################
USER_NUMBER = 1
MAX_TRIALS = 10
STARTING_TRUST = 0.3
MAX_TORQUE = 100.0
INPUT_DEVICE = "balance_board" # "joystick"
ALPHA = 0.05
EXPONENT = -0.0001
MAX_COST = 200000
MIN_COST = 50000
####################
# GLOBAL VARIABLES #
####################
DT = 1/50.
tf_freq = 30.
TIMESTEP = 20 # in ms
EXCEPTION_COUNT_MAX = 5
DISTURBANCE_MAGNITUDE = 2.0
# Pendulum params:
MAX_LINKS = 2
# Weight limits:
MAX_WEIGHT = 1000000000
MIN_WEIGHT = 0.0001
KIN_WEIGHT = 1.0
# Trust limits:
MAX_TRUST = 1.0
MIN_TRUST = 0.0
CURRENT_TRUST = 0.0
# Saturation:
MAX_VELOCITY = 0.25/(TIMESTEP/1000.0)
SATURATION_TORQUE = 1000.0
MIN_SCALE = 0.1
MAX_SCALE = 2.0
STATE_INTERACTIVE = 0
STATE_CONTROLLED_INTERACTIVE = 1
QBAR_END = pi
QBAR_DIP = pi/8
QBAR_DIP1 = QBAR_DIP
QBAR_DIP2 = QBAR_DIP
TS_TRANSITION_1 = 200
TS_TRANSITION_2 = 400
CURRENT_TRIAL_NUMBER = 0
###########################
# MISCELLANEOUS FUNCTIONS #
###########################
# map to 0 to 2pi
def normalize_angle_positive(angle):
return fmod(fmod(angle, 2.0*pi) + 2.0*pi, 2.0*pi)
# map to -pi to +pi
def normalize_angle(angle):
a = normalize_angle_positive(angle)
if a > pi:
a -= 2.0*pi
return a
# utility function for getting minimum angular distance between two angles in
# radians. Answer will be -pi <= result <= pi, adding result to 'before' will
# always be an angle equivalent to 'after'
def shortest_angular_distance(before, after):
result = normalize_angle_positive(normalize_angle_positive(after) -
normalize_angle_positive(before))
if result > pi:
result = -(2.0*pi-result)
return normalize_angle(result)
class User:
def __init__(self, user_number, max_number_of_trials, starting_trust, trial_alpha, input_device):
self.user_id = user_number
self.max_num_trials = max_number_of_trials
self.current_trial_num = 0
self.starting_trust = starting_trust
self.input_device = input_device
self.current_trust = starting_trust
self.alpha = trial_alpha
param_file_name = '/home/mderry/cps_data/user_' + str(user_number) + '/user_' + str(user_number) + '_params.csv'
log_file_name = '/home/mderry/cps_data/user_' + str(user_number) + '/user_' + str(user_number) + '_trust_log.csv'
with open(param_file_name, 'wb') as csvfile:
paramwriter = csv.writer(csvfile, delimiter=',', quotechar="'", quoting=csv.QUOTE_MINIMAL)
paramwriter.writerow(['User Number', 'Input Device', 'Max_Num_Trials', 'Starting Trust', 'Alpha'])
paramwriter.writerow([str(user_number), input_device, str(max_number_of_trials), str(starting_trust), str(trial_alpha)])
self.trust_log_file = open(log_file_name, 'wb')
self.log_writer = csv.writer(self.trust_log_file, delimiter=',', quotechar="'", quoting=csv.QUOTE_NONE)
self.log_writer.writerow(['Trial Number', 'Current Trust', 'Raw Task Trust', 'New Trust', 'Raw Task Cost'])
self.write_to_log(0, starting_trust, 0, starting_trust, 0) # no single task trust yet, so just set to zero for now
def update_trust(self, new_trust, new_cost):
self.current_trial_num = self.current_trial_num + 1
adjusted_trust = (1-self.alpha) * self.current_trust + self.alpha * new_trust
self.write_to_log(self.current_trial_num, self.current_trust, new_trust, adjusted_trust, new_cost)
self.current_trust = adjusted_trust
if self.current_trial_num == self.max_num_trials:
self.trust_log_file.close()
def get_current_trust(self):
return self.current_trust
def set_alpha(self, new_alpha):
self.alpha = new_alpha
def write_to_log(self, t_num, current_trust, single_task_trust, adjusted_trust, single_task_cost):
if self.trust_log_file.closed:
log_file_name = '/home/mderry/cps_data/user_' + str(USER_NUMBER) + '/user_' + str(USER_NUMBER) + '_trust_log.csv'
self.trust_log_file = open(log_file_name, 'ab')
self.log_writer = csv.writer(self.trust_log_file, delimiter=',', quotechar="'", quoting=csv.QUOTE_NONE)
self.log_writer.writerow([str(t_num), str(current_trust), str(single_task_trust), str(adjusted_trust), str(single_task_cost)])
self.trust_log_file.close()
else:
self.log_writer.writerow([str(t_num), str(current_trust), str(single_task_trust), str(adjusted_trust), str(single_task_cost)])
# trep system generator:
class BalanceBoard(trep.System):
def __init__(self, num_links, link_length, link_mass, damping):
super(BalanceBoard, self).__init__()
self.num_links = num_links
self.link_length = link_length
self.link_mass = link_mass
rospy.loginfo("Build balance board")
bboard = trep.Frame(self.world_frame, trep.TX, 0)
self._add_link(bboard, 1.0)
trep.potentials.Gravity(self, (0, 0.0, -9.8))
trep.forces.Damping(self, 1.0)
trep.forces.ConfigForce(self, 'link-1-base_x', 'torque1_x')
trep.forces.ConfigForce(self, 'link-1-base_y', 'torque1_y')
rospy.loginfo("Configuration Variables: %d (Kinematic: %d, Dynamic: %d), Inputs: %d, Constraints: %d", self.nQ, self.nQk, self.nQd, self.nu, self.nc)
def _add_link(self, parent, link):
if link == self.num_links+1:
return
base1 = trep.Frame(parent, trep.RX, 'link-%d-base_x' % link,
'link-%d-base_x' % link)
base2 = trep.Frame(base1, trep.RY, 'link-%d-base_y' % link,
'link-%d-base_y' % link)
end = trep.Frame(base2, trep.TZ, self.link_length,
mass=self.link_mass, name=('link-%d' % link))
self._add_link(end, link+1)
def create_systems(max_links, link_length, link_mass, frequency, amplitude, damping):
"""
Creates the balance board and loads or generates the trajectories.
"""
rospy.loginfo("Creating %d link balance board" % 2)
return BalanceBoard(2, float(link_length), float(link_mass), float(damping))
class PendulumSimulator:
def __init__(self, syst, trust, trial_num):
rospy.loginfo("Creating pendulum simulator")
self.links_bool = rospy.get_param('links', False)
self.create_timer_marker()
# first, let's just define the initial configuration of the system:
self.sys = syst
self.mvi = trep.MidpointVI(self.sys)
# self.start_interactive()
self.start_controlled_interactive()
self.trial_num = trial_num
self.finished = False
# fill out a message, and store it in the class so that I can update it
# whenever necessary
self.mappings = {
'link1_rx_link': 'link-1-base_x',
'link1_ry_link': 'link-1-base_y',
'link2_rx_link': 'link-2-base_x',
'link2_ry_link': 'link-2-base_y',
}
self.js = JS()
self.names = [x for x in self.mappings.keys()]
self.js.name = self.names
self.js.header.frame_id = 'base'
self.update_values()
self.count_exceptions = 0
self.user_ux = 0.0
self.user_uy = 0.0
self.reset_button_prev = 0
self.reset_button = 0
self.trust = trust
self.max_torque = MAX_TORQUE # rospy.get_param('~max_torque', MAX_TORQUE)
self.state = STATE_CONTROLLED_INTERACTIVE # rospy.get_param('~state', STATE_CONTROLLED_INTERACTIVE)
self.create_task_marker()
self.create_target_marker()
self.create_score_msg()
# define tf broadcaster and listener
self.br = tf.TransformBroadcaster()
self.listener = tf.TransformListener()
# define a publisher for the joint states
self.joint_pub = rospy.Publisher("joint_states", JS, queue_size=2)
# define a timer for publishing the frames and tf's
self.tf_timer = rospy.Timer(rospy.Duration(1.0/tf_freq), self.send_joint_states)
# define a timer for integrating the state of the VI
# rospy.Subscriber("/board_joy", Joy, self.joy_cb)
rospy.Subscriber("/joy", Joy, self.joy_cb)
self.timer_pub = rospy.Publisher("/task_timer", Marker, queue_size=1)
self.task_pub = rospy.Publisher("/task_direction", Marker, queue_size=2)
self.target_pub = rospy.Publisher("/task_target", Marker, queue_size=2)
self.score_pub = rospy.Publisher("/score", Score, queue_size=2)
time.sleep(0.5)
self.timer_marker.header.stamp = rospy.Time.now()
self.timer_marker.text = "START"
self.timer_pub.publish(self.timer_marker)
self.task_pub.publish(self.task_marker)
self.target_marker.header.stamp = rospy.Time.now()
self.target_pub.publish(self.target_marker)
self.timer = rospy.Timer(rospy.Duration(DT), self.update_interactive)
rospy.loginfo("Starting integration...")
def create_task_marker(self):
self.task_marker = Marker()
self.task_marker.header.frame_id = 'base'
self.task_marker.header.stamp = rospy.Time.now()
#self.task_marker.lifetime = 4.0
self.task_marker.id = 1
self.task_marker.type = 0
self.task_marker.action = 0
self.task_marker.pose.position.x = 0.0
self.task_marker.pose.position.y = 0.0
self.task_marker.pose.position.z = 0.0
# rospy.loginfo("task heading: %f", atan2(QBAR_DIP1, QBAR_DIP2))
self.task_marker.pose.orientation = GM.Quaternion(*(tf.transformations.quaternion_from_euler(atan2(QBAR_DIP2, -1*QBAR_DIP1), 0, 0, 'rzyx')))
self.task_marker.color.r = 1.0
self.task_marker.color.g = 1.0
self.task_marker.color.b = 0.0
self.task_marker.color.a = 1.0
self.task_marker.scale.z = 0.01
self.task_marker.scale.x = 0.750
self.task_marker.scale.y = 0.15
###########################
# Timer Marker
###########################
def create_timer_marker(self):
self.timer_marker = Marker()
self.timer_marker.header.frame_id = 'base'
self.timer_marker.header.stamp = rospy.Time.now()
self.timer_marker.id = 1
self.timer_marker.type = 9
self.timer_marker.action = 0
self.timer_marker.pose.position.x = 0
self.timer_marker.pose.position.y = 2.0
self.timer_marker.pose.position.z = 0
self.timer_marker.text = "0"
self.timer_marker.color.r = 1.0
self.timer_marker.color.g = 1.0
self.timer_marker.color.b = 0.0
self.timer_marker.color.a = 1.0
self.timer_marker.scale.z = 0.5
def create_target_marker(self):
self.target_marker = Marker()
self.target_marker.header.frame_id = 'base'
self.target_marker.header.stamp = rospy.Time.now()
self.target_marker.id = 1
self.target_marker.type = 2
self.target_marker.action = 0
self.target_marker.pose.position.x = 0.0
self.target_marker.pose.position.y = 0.0
self.target_marker.pose.position.z = -2.0
self.target_marker.text = "0"
self.target_marker.color.r = 0.0
self.target_marker.color.g = 0.0
self.target_marker.color.b = 1.0
self.target_marker.color.a = 0.5
self.target_marker.scale.x = 0.35
self.target_marker.scale.y = 0.35
self.target_marker.scale.z = 0.35
def create_score_msg(self):
self.score = Score()
def joy_cb(self, data):
self.joy = data
self.user_ux = self.max_torque * -data.axes[0]
self.user_uy = self.max_torque * -data.axes[1]
self.reset_button_prev = self.reset_button
self.reset_button = data.buttons[0]
if self.reset_button_prev and not self.reset_button:
self.reset()
def start_controlled_interactive(self):
self.state = STATE_CONTROLLED_INTERACTIVE
self.simulation_failed = False
# calculate linearization, and feedback controller:
tvec = np.arange(0, 600.0*TIMESTEP/1000.0, TIMESTEP/1000.0)
self.dsys = trep.discopt.DSystem(self.mvi, tvec)
link1_rx_index = self.dsys.system.get_config('link-1-base_x').index
link1_ry_index = self.dsys.system.get_config('link-1-base_y').index
link2_rx_index = self.dsys.system.get_config('link-2-base_x').index
link2_ry_index = self.dsys.system.get_config('link-2-base_y').index
qd_up = np.zeros((len(tvec), self.mvi.system.nQ))
qd_up[:] = [0]*self.mvi.system.nQ
qd_up[:,link1_rx_index] = QBAR_END
#qd_up[:,link1_ry_index] = QBAR_END
qd_dip = np.zeros((len(tvec), self.mvi.system.nQ))
qd_dip[:] = [0]*self.mvi.system.nQ
qd_dip[:,link1_rx_index] = QBAR_END
#qd_dip[:,link1_ry_index] = QBAR_END
self.qd_comb = np.zeros((len(tvec), self.mvi.system.nQ))
self.qd_comb[:] = [0]*self.mvi.system.nQ
self.qd_comb[:,link1_rx_index] = QBAR_END
#self.qd_comb[:,link1_ry_index] = QBAR_END
for i, t in enumerate(tvec):
if i > 200 and i < 400:
qd_dip[i, link1_rx_index] = normalize_angle(QBAR_END + QBAR_DIP) # Set desired configuration trajectory
qd_dip[i, link1_ry_index] = -1*normalize_angle(QBAR_DIP)
qd_dip[i, link2_rx_index] = -1*QBAR_DIP
qd_dip[i, link2_ry_index] = QBAR_DIP
self.qd_comb[i, link1_rx_index] = normalize_angle(QBAR_END + QBAR_DIP) # Set desired configuration trajectory
self.qd_comb[i, link1_ry_index] = -1*normalize_angle(QBAR_DIP)
self.qd_comb[i, link2_rx_index] = -1*QBAR_DIP
self.qd_comb[i, link2_ry_index] = QBAR_DIP
qd_state_dip = np.zeros((1, self.mvi.system.nQ))
qd_state_dip[0, link1_rx_index] = normalize_angle(QBAR_END + QBAR_DIP)
qd_state_dip[0, link1_ry_index] = -1*normalize_angle(QBAR_DIP)
qd_state_dip[0, link2_rx_index] = -1*QBAR_DIP
qd_state_dip[0, link2_ry_index] = QBAR_DIP
self.xBar_dip = self.dsys.build_state(qd_state_dip)
qd_state_end = np.zeros((1, self.mvi.system.nQ))
qd_state_end[0, link1_rx_index] = normalize_angle(QBAR_END)
qd_state_end[0, link1_ry_index] = 0.0
qd_state_end[0, link2_rx_index] = 0.0
qd_state_end[0, link2_ry_index] = 0.0
self.xBar_end = self.dsys.build_state(qd_state_end)
(Xd_up, Ud_up) = self.dsys.build_trajectory(qd_up) # Set desired state and input trajectory
(Xd_dip, Ud_dip) = self.dsys.build_trajectory(qd_dip)
(Xd_comb, Ud_comb) = self.dsys.build_trajectory(self.qd_comb)
self.Xd = Xd_comb
self.Ud = Ud_comb
# rospy.loginfo("X shape")
# rospy.loginfo(Xd_up.shape)
# rospy.loginfo("U shape")
# rospy.loginfo(Ud_up.shape)
self.state_trajectory = np.zeros(Xd_up.shape)
self.user_input_trajectory = np.zeros(Ud_up.shape)
self.controller_input_trajectory = np.zeros(Ud_up.shape)
self.combined_input_trajectory = np.zeros(Ud_up.shape)
# rospy.loginfo("Xd: %d", len(Xd_up))
dyn_weight = 50
kin_weight = 25
mom_weight = 1
vel_weight = kin_weight*TIMESTEP/1000.0
self.Q = np.diag(np.hstack(([dyn_weight]*self.sys.nQd,
[kin_weight]*self.sys.nQk,
[mom_weight]*self.sys.nQd,
[vel_weight]*self.sys.nQk)))
self.R = 0.000001*np.identity(2)
# rospy.loginfo("Q shape: %s, R shape %s", self.Q.shape, self.R.shape)
self.Qk = lambda k: self.Q
self.Rk = lambda k: self.R
(Kstab_up, A_up, B_up) = self.dsys.calc_feedback_controller(Xd_up, Ud_up, self.Qk, self.Rk, return_linearization=True)
(Kstab_dip, A_dip, B_dip) = self.dsys.calc_feedback_controller(Xd_dip, Ud_dip, self.Qk, self.Rk, return_linearization=True)
self.Kstab_up = Kstab_up
self.Kstab_dip = Kstab_dip
self.k = 1
self.state_trajectory[0] = self.xBar_end
# set initial system state
q0 = (QBAR_END, 0.0, 0.0, 0.0)
q1 = (QBAR_END, 0.0, 0.0, 0.0)
self.mvi.initialize_from_configs(0.0, q0, DT, q1)
self.disp_q = self.mvi.q2
self.disp_qd = np.hstack((self.mvi.q2[0:self.sys.nQd], [0]*self.sys.nQk))
def calc_correlations(self):
self.state_trajectory
self.combined_input_trajectory
self.user_input_trajectory
self.controller_input_trajectory
self.cmd_x_corr = np.correlate(self.user_input_trajectory[:,0], self.controller_input_trajectory[:,0], 'full')
self.cmd_y_corr = np.correlate(self.user_input_trajectory[:,1], self.controller_input_trajectory[:,1], 'full')
max_x_idx = np.argmax(self.cmd_x_corr)
max_y_idx = np.argmax(self.cmd_y_corr)
max_idx = 0
max_corr = 0
for i in range(len(self.cmd_x_corr)):
if self.cmd_x_corr[i] + self.cmd_y_corr[i] > max_corr:
max_idx = i
max_corr = self.cmd_x_corr[i] + self.cmd_y_corr[i]
self.score.cmd_corr = max_corr
self.score.cmd_offset = max_idx - (len(self.user_input_trajectory[:,0]))
self.score.cmd_x_corr = self.cmd_x_corr[max_x_idx]
self.score.cmd_x_corr_offset = max_x_idx - (len(self.user_input_trajectory[:,0]))
self.score.cmd_y_corr = self.cmd_y_corr[max_y_idx]
self.score.cmd_y_corr_offset = max_y_idx - (len(self.user_input_trajectory[:,0]))
rospy.loginfo("x cmd correlation: %f, index offset: %d", self.cmd_x_corr[max_x_idx], max_x_idx - (len(self.user_input_trajectory[:,0])))
rospy.loginfo("y cmd correlation: %f, index offset: %d", self.cmd_y_corr[max_y_idx], max_y_idx - (len(self.user_input_trajectory[:,0])))
rospy.loginfo("cmd correlation: %f, index offset: %d", max_corr, max_idx - (len(self.user_input_trajectory[:,0])))
self.state_x1_corr = np.correlate(self.state_trajectory[:,0], self.Xd[:,0], 'full')
self.state_y1_corr = np.correlate(self.state_trajectory[:,1], self.Xd[:,1], 'full')
self.state_x2_corr = np.correlate(self.state_trajectory[:,2], self.Xd[:,2], 'full')
self.state_y2_corr = np.correlate(self.state_trajectory[:,3], self.Xd[:,3], 'full')
max_idx = 0
max_corr = 0
for i in range(len(self.state_x1_corr)):
if self.state_x1_corr[i] + self.state_y1_corr[i] + self.state_x2_corr[i] + self.state_x2_corr[i]> max_corr:
max_idx = i
max_corr = self.state_x1_corr[i] + self.state_y1_corr[i] + self.state_x2_corr[i] + self.state_x2_corr[i]
self.score.state_corr = max_corr
self.score.state_offset = max_idx - (len(self.state_trajectory[:,0]))
rospy.loginfo("state correlation: %f, index offset: %d", self.score.state_corr, self.score.state_offset)
def update_interactive(self, event):
if self.simulation_failed or self.k == 600:
self.timer.shutdown()
self.tf_timer.shutdown()
self.tcost = 0
if self.k == 600:
self.timer_marker.header.stamp = rospy.Time.now()
self.timer_marker.text = "STOP"
self.timer_pub.publish(self.timer_marker)
# Pad the trajectories so the costs are high, but not so high in the case of stabilization failure
if self.k < 599:
last_state = self.state_trajectory[self.k-1]
last_combined_command = self.combined_input_trajectory[self.k-1]
last_user_command = self.user_input_trajectory[self.k-1]
last_controller_command = self.controller_input_trajectory[self.k-1]
for i in range(self.k, 600):
self.state_trajectory[i] = last_state
self.combined_input_trajectory[i-1] = last_combined_command
self.user_input_trajectory[i-1] = last_user_command
self.controller_input_trajectory[i-1] = last_controller_command
filestr = '/home/mderry/cps_data/user_' + str(USER_NUMBER) + '/user_' + str(USER_NUMBER) + '_trial_' + str(self.trial_num) + '_combined_trajectory.mat'
self.dsys.save_state_trajectory(filestr, self.state_trajectory, self.combined_input_trajectory)
filestr = '/home/mderry/cps_data/user_' + str(USER_NUMBER) + '/user_' + str(USER_NUMBER) + '_trial_' + str(self.trial_num) + '_user_trajectory.mat'
self.dsys.save_state_trajectory(filestr, self.state_trajectory, self.user_input_trajectory)
filestr = '/home/mderry/cps_data/user_' + str(USER_NUMBER) + '/user_' + str(USER_NUMBER) + '_trial_' + str(self.trial_num) + '_controller_trajectory.mat'
self.dsys.save_state_trajectory(filestr, self.state_trajectory, self.controller_input_trajectory)
dcost = discopt.DCost(self.Xd, self.Ud, self.Q, self.R)
optimizer = discopt.DOptimizer(self.dsys, dcost)
self.tcost = optimizer.calc_cost(self.state_trajectory, self.combined_input_trajectory)
rospy.loginfo("calc_cost: %f", self.tcost)
#if tcost > MAX_COST:
# tcost = MAX_COST
if self.tcost < MIN_COST:
self.tcost = MIN_COST
self.score.cost = self.tcost
# self.task_trust = 1.0 - ((optimizer.calc_cost(self.state_trajectory, self.combined_input_trajectory) - MIN_COST)/(MAX_COST-MIN_COST))
self.task_trust = exp(EXPONENT * (self.tcost - MIN_COST))
if self.task_trust > 1.0:
self.task_trust = 1.0
elif self.task_trust < 0.0:
self.task_trust = 0.0
self.calc_correlations()
self.score_pub.publish(self.score)
self.finished = True
return
u = self.mvi.u1
u[0] = self.user_ux
u[1] = self.user_uy
if self.k % 50 == 0:
rospy.loginfo("Clock: %d", self.k/50)
self.timer_marker.header.stamp = rospy.Time.now()
self.target_marker.header.stamp = rospy.Time.now()
if self.k == 200:
self.timer_marker.text = "DIP"
self.target_marker.pose.position.x = -1*sin(QBAR_DIP1) + 0.05
self.target_marker.pose.position.y = sin(QBAR_DIP2) - 0.05
self.target_marker.pose.position.z = -1.95
elif self.k == 400:
self.timer_marker.text = "RETURN"
self.target_marker.pose.position.x = 0.0
self.target_marker.pose.position.y = 0.0
self.target_marker.pose.position.z = -2.0
elif self.k == 600:
self.timer_marker.text = "STOP"
elif self.k > 400:
self.timer_marker.text = str(12-(self.k/50))
elif self.k > 200:
self.timer_marker.text = str(8-(self.k/50))
elif self.k < 200:
self.timer_marker.text = str(4-(self.k/50))
self.timer_pub.publish(self.timer_marker)
self.target_pub.publish(self.target_marker)
## if we are in interactive+control mode, let's run that controller:
if self.state == STATE_CONTROLLED_INTERACTIVE:
# get state stuff
qtmp = self.mvi.q2
ptmp = self.mvi.p2
# wrap angle of pendulum:
for q in self.sys.dyn_configs:
q0 = qtmp[self.sys.get_config(q).index]
qtmp[self.sys.get_config(q).index] = normalize_angle(q0)
X = np.hstack((qtmp, ptmp))
#print "State:"
#print X
if not self.in_basin_of_attraction(X):
self.simulation_failed = True
rospy.loginfo("Outside basin of attraction")
self.state_trajectory[self.k] = X
# calculate feedback law
if self.k > 200 and self.k < 400:
xTilde = X - self.xBar_dip
xTilde[0] = normalize_angle(xTilde[0])
xTilde[1] = normalize_angle(xTilde[1])
xTilde[2] = normalize_angle(xTilde[2])
xTilde[3] = normalize_angle(xTilde[3])
u_cont = -np.dot(self.Kstab_dip[0], xTilde)
#u_cont = u_cont + (0.75 * 9.81 * 2.0 * sin(QBAR_DIP))
if self.k == 201:
rospy.loginfo("DIP!")
# rospy.loginfo(xTilde)
else:
xTilde = X - self.xBar_end
xTilde[0] = normalize_angle(xTilde[0])
xTilde[1] = normalize_angle(xTilde[1])
xTilde[2] = normalize_angle(xTilde[2])
xTilde[3] = normalize_angle(xTilde[3])
u_cont = -np.dot(self.Kstab_up[0], xTilde)
if self.k == 400:
rospy.loginfo("POP!")
# rospy.loginfo(xTilde)
#print "Error:"
#print xTilde
if u_cont[0] > SATURATION_TORQUE:
u_cont[0] = SATURATION_TORQUE
if u_cont[1] > SATURATION_TORQUE:
u_cont[1] = SATURATION_TORQUE
#self.user_ux = 0.0
#self.user_uy = 0.0
# blend user and feedback control input
u[0] = self.trust*self.user_ux + (1-self.trust)*u_cont[0]
u[1] = self.trust*self.user_uy + (1-self.trust)*u_cont[1]
self.controller_input_trajectory[self.k-1] = u_cont
self.user_input_trajectory[self.k-1, 0] = self.user_ux
self.user_input_trajectory[self.k-1, 1] = self.user_uy
self.combined_input_trajectory[self.k-1] = u
# rospy.loginfo("[before] u: (%f, %f), user: (%f, %f), controller: (%f, %f)", u[0], u[1], self.user_ux, self.user_uy, u_cont[0], u_cont[1])
try:
self.k += 1
t2 = self.k*TIMESTEP/1000.0
self.mvi.step(t2, u1=u)
except trep.ConvergenceError:
self.simulation_failed = True
rospy.logwarn("No solution to DEL equations!")
return
self.disp_q = self.mvi.q2
self.disp_qd = np.hstack((self.mvi.q2[0:self.sys.nQd], [0]*self.sys.nQk))
self.update_values()
def update_values(self):
"""
Just fill in all of position array stuff for the JS message
"""
pos = [self.sys.get_config(self.mappings[n]).q for n in self.names]
self.js.position = pos
def send_joint_states(self, event):
tnow = rospy.Time.now()
# first send the transform:
quat = tuple(tf.transformations.quaternion_from_euler(0, 0, 0, 'rzyx'))
point = tuple((0, 0, 0))
self.br.sendTransform(point, quat,
tnow,
'base', 'world')
self.js.header.stamp = tnow
self.joint_pub.publish(self.js)
# send transforms
quat = tuple(tf.transformations.quaternion_from_euler(self.sys.get_config('link-1-base_x').q, 0, 0, 'sxyz'))
point = tuple((0, 0, 0))
self.br.sendTransform(point, quat, tnow, 'link1_rx_link', 'base')
quat = tuple(tf.transformations.quaternion_from_euler(0, self.sys.get_config('link-1-base_y').q, 0, 'sxyz'))
point = tuple((0, 0, 0))
self.br.sendTransform(point, quat, tnow, 'link1_ry_link', 'link1_rx_link')
quat = tuple(tf.transformations.quaternion_from_euler(self.sys.get_config('link-2-base_x').q, 0, 0, 'sxyz'))
point = tuple((0, 0, 0.5))
self.br.sendTransform(point, quat, tnow, 'link2_rx_link', 'link1')
quat = tuple(tf.transformations.quaternion_from_euler(0, self.sys.get_config('link-2-base_y').q, 0, 'sxyz'))
point = tuple((0, 0, 0))
self.br.sendTransform(point, quat, tnow, 'link2_ry_link', 'link2_rx_link')
def in_basin_of_attraction(self, state):
BASIN_THRESHOLD = 2*pi/3
if abs(state[0]) + abs(state[1]) > BASIN_THRESHOLD:
# return False
return True
else:
return True
def output_log(self):
self.file = 2
def main():
"""
Run the main loop, by instatiating a PendulumSimulator, and then
calling ros.spin
"""
rospy.init_node('pendulum_simulator') # , log_level=rospy.INFO)
# check what the value of the links param is
links_bool = rospy.get_param('links', False)
if not rospy.has_param('links'):
rospy.set_param('links', links_bool)
count = 1
try:
system = create_systems(2, link_length='1.0', link_mass='2.0', frequency='0.5', amplitude='0.5', damping='0.1')
user = User(USER_NUMBER, MAX_TRIALS, STARTING_TRUST, ALPHA, INPUT_DEVICE)
r = rospy.Rate(100)
while user.current_trial_num < user.max_num_trials:
rospy.loginfo("Trial number: %d", count)
sim = PendulumSimulator(system, user.current_trust, user.current_trial_num)
while not sim.finished:
r.sleep()
rospy.loginfo("current trust: %f", user.current_trust)
user.update_trust(sim.task_trust, sim.tcost)
rospy.loginfo("new current trust: %f, task trust: %f", user.current_trust, sim.task_trust)
del sim
count = count + 1
time.sleep(2)
except rospy.ROSInterruptException:
pass
rospy.loginfo('Session Complete')
if __name__ == '__main__':
main()
| gpl-2.0 |
thalespaiva/autofinger | comfort.py | 1 | 3386 | #!/usr/bin/python
# First try with a simple genetic algorithm
# Based on my hand: (in centimeters)
DISTANCES_NATURAL = {
(1, 2): 15, (1, 3): 19, (1, 4): 20.5,
(1, 5): 21.5, (2, 3): 9, (2, 4): 13,
(2, 5): 16.5, (3, 4): 8.5, (3, 5): 13,
(4, 5): 8,
}
DISTANCES_CROSSING = {
(1, 2): 12, (1, 3): 9, (1, 4): 6,
(1, 5): 0.01, (2, 3): 0.01, (2, 4): 0.005,
(2, 5): 0.0001, (3, 4): 0.01, (3, 5): 0.0001,
(4, 5): 0.0001,
}
CLOSED_DISTANCE = {
(1, 2): 4.5, (1, 3): 7, (1, 4): 8.6,
(1, 5): 10.5, (2, 3): 3.3, (2, 4): 6,
(2, 5): 8.7, (3, 4): 3.3, (3, 5): 6.1,
(4, 5): 3.3,
}
AV_KEY_WIDTH = 1.95 # centimeters
COMFORT_MEMO_TABLE = {}
def calculate_jump_comfort(finger_org, finger_dst, jump_in_half_tones):
global COMFORT_MEMO_TABLE
try:
return COMFORT_MEMO_TABLE[(finger_org, finger_dst, jump_in_half_tones)]
except KeyError:
key = (finger_org, finger_dst, jump_in_half_tones)
COMFORT_MEMO_TABLE[key] = calculate_jump_comfort_real(
finger_org, finger_dst, jump_in_half_tones)
return COMFORT_MEMO_TABLE[key]
def calculate_jump_comfort_real(finger_org, finger_dst, jump_in_half_tones):
jump = jump_in_half_tones * AV_KEY_WIDTH
if (finger_org is None) or (finger_dst is None):
return 0
key = tuple(sorted((finger_org, finger_dst)))
if finger_org == finger_dst:
return abs(jump)
elif jump >= 0:
if finger_org < finger_dst:
dist = DISTANCES_NATURAL[key]
else:
dist = DISTANCES_CROSSING[key]
diff = jump - dist
factor = jump / dist
if diff > 0:
return abs(diff)
else:
return abs(factor * CLOSED_DISTANCE[key])
else:
if finger_org > finger_dst:
dist = DISTANCES_NATURAL[key]
else:
dist = DISTANCES_CROSSING[key]
diff = jump + dist
factor = -jump / dist
if diff < 0:
return abs(diff)
else:
return abs(factor * CLOSED_DISTANCE[key])
def calculate_comforts(fingers_org, fingers_dst, jumps):
from itertools import product
keys = product(fingers_org, fingers_dst, jumps)
xs = []
ys = []
zs = []
cs = []
for key in keys:
o, d, j = key
xs.append(o)
ys.append(d)
zs.append(j)
cs.append(calculate_jump_comfort(o, d, j))
return (xs, ys, zs, cs)
def plot_comfort(fingers_org=range(1, 6, 1), fingers_dst=range(1, 6, 1),
jumps=range(-12, 13, 1)):
import seaborn
from mpl_toolkits.mplot3d import Axes3D
from pylab import plt
xs, ys, zs, cs = calculate_comforts(
fingers_org, fingers_dst, jumps)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xs, ys, zs, c=cs)
ax.set_zlabel("Interval (half steps)", fontsize=15)
ax.set_zlim(jumps[0], jumps[-1])
# ax.set_zticks(jumps)
plt.xticks(fingers_org)
plt.xlim(fingers_org[0], fingers_org[-1])
plt.xlabel("From finger", fontsize=15)
plt.yticks(fingers_dst)
plt.ylim(fingers_dst[0], fingers_dst[-1])
plt.ylabel("To finger", fontsize=15)
plt.title("Difficulty of finger passages", fontsize=25)
plt.savefig('./figures/image.png', figsize=(16, 12), dpi=300)
plt.show()
if __name__ == '__main__':
plot_comfort()
| gpl-2.0 |
mne-tools/mne-tools.github.io | dev/_downloads/a4921072acc135828760714b86be20cc/eeglab_head_sphere.py | 10 | 4762 | """
.. _ex-topomap-eeglab-style:
========================================
How to plot topomaps the way EEGLAB does
========================================
If you have previous EEGLAB experience you may have noticed that topomaps
(topoplots) generated using MNE-Python look a little different from those
created in EEGLAB. If you prefer the EEGLAB style this example will show you
how to calculate head sphere origin and radius to obtain EEGLAB-like channel
layout in MNE.
"""
# Authors: Mikołaj Magnuski <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from matplotlib import pyplot as plt
import mne
print(__doc__)
###############################################################################
# Create fake data
# ----------------
#
# First we will create a simple evoked object with a single timepoint using
# biosemi 10-20 channel layout.
biosemi_montage = mne.channels.make_standard_montage('biosemi64')
n_channels = len(biosemi_montage.ch_names)
fake_info = mne.create_info(ch_names=biosemi_montage.ch_names, sfreq=250.,
ch_types='eeg')
rng = np.random.RandomState(0)
data = rng.normal(size=(n_channels, 1)) * 1e-6
fake_evoked = mne.EvokedArray(data, fake_info)
fake_evoked.set_montage(biosemi_montage)
###############################################################################
# Calculate sphere origin and radius
# ----------------------------------
#
# EEGLAB plots head outline at the level where the head circumference is
# measured
# in the 10-20 system (a line going through Fpz, T8/T4, Oz and T7/T3 channels).
# MNE-Python places the head outline lower on the z dimension, at the level of
# the anatomical landmarks :term:`LPA, RPA, and NAS <fiducial>`.
# Therefore to use the EEGLAB layout we
# have to move the origin of the reference sphere (a sphere that is used as a
# reference when projecting channel locations to a 2d plane) a few centimeters
# up.
#
# Instead of approximating this position by eye, as we did in :ref:`the sensor
# locations tutorial <tut-sensor-locations>`, here we will calculate it using
# the position of Fpz, T8, Oz and T7 channels available in our montage.
# first we obtain the 3d positions of selected channels
chs = ['Oz', 'Fpz', 'T7', 'T8']
pos = np.stack([biosemi_montage.get_positions()['ch_pos'][ch] for ch in chs])
# now we calculate the radius from T7 and T8 x position
# (we could use Oz and Fpz y positions as well)
radius = np.abs(pos[[2, 3], 0]).mean()
# then we obtain the x, y, z sphere center this way:
# x: x position of the Oz channel (should be very close to 0)
# y: y position of the T8 channel (should be very close to 0 too)
# z: average z position of Oz, Fpz, T7 and T8 (their z position should be the
# the same, so we could also use just one of these channels), it should be
# positive and somewhere around `0.03` (3 cm)
x = pos[0, 0]
y = pos[-1, 1]
z = pos[:, -1].mean()
# lets print the values we got:
print([f'{v:0.5f}' for v in [x, y, z, radius]])
###############################################################################
# Compare MNE and EEGLAB channel layout
# -------------------------------------
#
# We already have the required x, y, z sphere center and its radius — we can
# use these values passing them to the ``sphere`` argument of many
# topo-plotting functions (by passing ``sphere=(x, y, z, radius)``).
# create a two-panel figure with some space for the titles at the top
fig, ax = plt.subplots(ncols=2, figsize=(8, 4), gridspec_kw=dict(top=0.9),
sharex=True, sharey=True)
# we plot the channel positions with default sphere - the mne way
fake_evoked.plot_sensors(axes=ax[0], show=False)
# in the second panel we plot the positions using the EEGLAB reference sphere
fake_evoked.plot_sensors(sphere=(x, y, z, radius), axes=ax[1], show=False)
# add titles
ax[0].set_title('MNE channel projection', fontweight='bold')
ax[1].set_title('EEGLAB channel projection', fontweight='bold')
###############################################################################
# Topomaps (topoplots)
# --------------------
#
# As the last step we do the same, but plotting the topomaps. These will not
# be particularly interesting as they will show random data but hopefully you
# will see the difference.
fig, ax = plt.subplots(ncols=2, figsize=(8, 4), gridspec_kw=dict(top=0.9),
sharex=True, sharey=True)
mne.viz.plot_topomap(fake_evoked.data[:, 0], fake_evoked.info, axes=ax[0],
show=False)
mne.viz.plot_topomap(fake_evoked.data[:, 0], fake_evoked.info, axes=ax[1],
show=False, sphere=(x, y, z, radius))
# add titles
ax[0].set_title('MNE', fontweight='bold')
ax[1].set_title('EEGLAB', fontweight='bold')
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/tests/sparse/frame/test_analytics.py | 2 | 1118 | import numpy as np
import pytest
from pandas import DataFrame, SparseDataFrame, SparseSeries
from pandas.util import testing as tm
@pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH#17386)')
def test_quantile():
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [np.nan, np.nan]]
q = 0.1
sparse_df = SparseDataFrame(data)
result = sparse_df.quantile(q)
dense_df = DataFrame(data)
dense_expected = dense_df.quantile(q)
sparse_expected = SparseSeries(dense_expected)
tm.assert_series_equal(result, dense_expected)
tm.assert_sp_series_equal(result, sparse_expected)
@pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH#17386)')
def test_quantile_multi():
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [np.nan, np.nan]]
q = [0.1, 0.5]
sparse_df = SparseDataFrame(data)
result = sparse_df.quantile(q)
dense_df = DataFrame(data)
dense_expected = dense_df.quantile(q)
sparse_expected = SparseDataFrame(dense_expected)
tm.assert_frame_equal(result, dense_expected)
tm.assert_sp_frame_equal(result, sparse_expected)
| bsd-3-clause |
JosmanPS/scikit-learn | sklearn/linear_model/tests/test_omp.py | 272 | 7752 | # Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
ZenDevelopmentSystems/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
start-jsk/jsk_apc | demos/grasp_data_generator/grasp_data_generator/visualizations/vis_occluded_grasp_instance_segmentation.py | 1 | 7143 | from __future__ import division
import numpy as np
from chainercv.visualizations.colormap import voc_colormap
from chainercv.visualizations import vis_image
from grasp_data_generator.models.occluded_grasp_mask_rcnn.utils \
import rot_lbl_to_rot
def vis_occluded_grasp_instance_segmentation(
img, ins_label, label=None, bbox=None, score=None,
sg_label=None, dg_label=None, label_names=None, rotate_angle=None,
instance_colors=None, alpha=0.7, linewidth=1., fontsize=8, prefix=None,
axes=None,
):
from matplotlib import pyplot as plt
if bbox is not None and len(bbox) != len(ins_label):
raise ValueError('The length of mask must be same as that of bbox')
if label is not None and len(bbox) != len(label):
raise ValueError('The length of label must be same as that of bbox')
if score is not None and len(bbox) != len(score):
raise ValueError('The length of score must be same as that of bbox')
n_inst = len(bbox)
if instance_colors is None:
instance_colors = voc_colormap(list(range(1, n_inst + 1)))
instance_colors = np.array(instance_colors)
if axes is None:
f, axes = plt.subplots(1, 5, sharey=True)
else:
f = None
ins_names = ['background', 'visible', 'occluded']
for ins_id, ax in enumerate(axes[:3]):
if prefix is None:
ax.set_title(ins_names[ins_id])
else:
ax.set_title('{0} : {1}'.format(prefix, ins_names[ins_id]))
ax = vis_image(img, ax=ax)
_, H, W = img.shape
canvas_img = np.zeros((H, W, 4), dtype=np.uint8)
for i, (bb, ins_lbl) in enumerate(zip(bbox, ins_label)):
# The length of `colors` can be smaller than the number of
# instances if a non-default `colors` is used.
color = instance_colors[i % len(instance_colors)]
rgba = np.append(color, alpha * 255)
bb = np.round(bb).astype(np.int32)
y_min, x_min, y_max, x_max = bb
if y_max > y_min and x_max > x_min:
ins_mask = ins_lbl[y_min:y_max, x_min:x_max] == ins_id
canvas_img[y_min:y_max, x_min:x_max][ins_mask] = rgba
xy = (bb[1], bb[0])
height = bb[2] - bb[0]
width = bb[3] - bb[1]
ax.add_patch(plt.Rectangle(
xy, width, height, fill=False,
edgecolor=color / 255, linewidth=linewidth, alpha=alpha))
caption = []
if label is not None and label_names is not None:
lb = label[i]
if not (0 <= lb < len(label_names)):
raise ValueError('No corresponding name is given')
caption.append(label_names[lb])
if score is not None:
sc = score[i]
caption.append('{:.2f}'.format(sc))
if len(caption) > 0:
ax.text((x_max + x_min) / 2, y_min,
': '.join(caption),
style='italic',
bbox={'facecolor': color / 255, 'alpha': alpha},
fontsize=fontsize, color='white')
ax.imshow(canvas_img)
ax3, ax4 = axes[3:5]
if prefix is None:
ax3.set_title('single grasp')
else:
ax3.set_title('{0} : single grasp'.format(prefix))
ax3 = vis_image(img, ax=ax3)
_, H, W = img.shape
canvas_img = np.zeros((H, W, 4), dtype=np.uint8)
for i, (bb, sg_lbl) in enumerate(zip(bbox, sg_label)):
count = np.bincount(sg_lbl.flatten(), minlength=1)
# no grasp mask
if len(count) == 1:
continue
rot_id = np.argmax(count[1:]) + 1
# The length of `colors` can be smaller than the number of
# instances if a non-default `colors` is used.
color = instance_colors[i % len(instance_colors)]
rgba = np.append(color, alpha * 255)
bb = np.round(bb).astype(np.int32)
y_min, x_min, y_max, x_max = bb
if y_max > y_min and x_max > x_min:
canvas_img[sg_lbl == rot_id] = rgba
xy = (bb[1], bb[0])
height = bb[2] - bb[0]
width = bb[3] - bb[1]
ax3.add_patch(plt.Rectangle(
xy, width, height, fill=False,
edgecolor=color / 255, linewidth=linewidth, alpha=alpha))
caption = []
if label is not None and label_names is not None:
lb = label[i]
if not (0 <= lb < len(label_names)):
raise ValueError('No corresponding name is given')
caption.append(label_names[lb])
if score is not None:
sc = score[i]
caption.append('{:.2f}'.format(sc))
if rotate_angle is not None:
rot = rot_lbl_to_rot(rot_id, rotate_angle)
caption.append('{} degree'.format(rot))
if len(caption) > 0:
ax3.text((x_max + x_min) / 2, y_min,
': '.join(caption),
style='italic',
bbox={'facecolor': color / 255, 'alpha': alpha},
fontsize=fontsize, color='white')
ax3.imshow(canvas_img)
if prefix is None:
ax4.set_title('dual grasp')
else:
ax4.set_title('{0} : dual grasp'.format(prefix))
ax4 = vis_image(img, ax=ax4)
_, H, W = img.shape
canvas_img = np.zeros((H, W, 4), dtype=np.uint8)
for i, (bb, dg_lbl) in enumerate(zip(bbox, dg_label)):
count = np.bincount(dg_lbl.flatten(), minlength=1)
# no grasp mask
if len(count) == 1:
continue
rot_id = np.argmax(count[1:]) + 1
# The length of `colors` can be smaller than the number of
# instances if a non-default `colors` is used.
color = instance_colors[i % len(instance_colors)]
rgba = np.append(color, alpha * 255)
bb = np.round(bb).astype(np.int32)
y_min, x_min, y_max, x_max = bb
if y_max > y_min and x_max > x_min:
canvas_img[dg_lbl == rot_id] = rgba
xy = (bb[1], bb[0])
height = bb[2] - bb[0]
width = bb[3] - bb[1]
ax4.add_patch(plt.Rectangle(
xy, width, height, fill=False,
edgecolor=color / 255, linewidth=linewidth, alpha=alpha))
caption = []
if label is not None and label_names is not None:
lb = label[i]
if not (0 <= lb < len(label_names)):
raise ValueError('No corresponding name is given')
caption.append(label_names[lb])
if score is not None:
sc = score[i]
caption.append('{:.2f}'.format(sc))
if rotate_angle is not None and dg_lbl.max() > 0:
rot = rot_lbl_to_rot(rot_id, rotate_angle)
caption.append('{} degree'.format(rot))
if len(caption) > 0:
ax4.text((x_max + x_min) / 2, y_min,
': '.join(caption),
style='italic',
bbox={'facecolor': color / 255, 'alpha': alpha},
fontsize=fontsize, color='white')
ax4.imshow(canvas_img)
return f, axes
| bsd-3-clause |
LiaoPan/scikit-learn | examples/linear_model/plot_robust_fit.py | 238 | 2414 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
mizzao/ggplot | ggplot/tests/test_theme_mpl.py | 12 | 3907 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib as mpl
import six
from nose.tools import assert_true
from ggplot.tests import image_comparison, cleanup
from ggplot import *
def _diff(a, b):
ret = {}
for key, val in a.items():
if key in b:
if b[key] != val:
ret[key] = "%s: %s -> %s" % (key, val, b[key])
else:
ret[key] = "%s: %s -> %s" % (key, val, "--")
for key, val in b.items():
if key not in a:
ret[key] = "%s: %s -> %s" % (key, "--", val)
return ret
@cleanup
def test_theme_matplotlib():
gg = ggplot(aes(x='date', y='beef'), data=meat)
a = mpl.rcParams.copy()
_theme = theme_matplotlib({"font.family": "serif"}, matplotlib_defaults=False)
assert_true(len(_theme._rcParams) < 2, "setting font.family changed more than that in the theme. %s" % list(six.iterkeys(_theme._rcParams))[:5])
gg = gg + _theme
b = mpl.rcParams.copy()
assert_true(len(_diff(a,b)) < 2, "setting font.family changed more than that in ggplot object: %s" % list(six.iterkeys(_diff(a,b)))[:5])
@image_comparison(baseline_images=['theme_clean', 'theme_mpl_completly'])
def test_theme_matplotlib2():
gg = ggplot(aes(x='date', y='beef'), data=meat) + \
geom_point(color='lightblue') + \
stat_smooth(span=.15, color='black', se=True) + \
ggtitle("Beef: It's What's for Dinner") + \
xlab("Date") + \
ylab("Head of Cattle Slaughtered")
a = mpl.rcParams.copy()
print(gg)
b = mpl.rcParams.copy()
assert_true(len(_diff(a,b)) < 1, "Just plotting changed something in the ggplot object: %s" % list(six.iterkeys(_diff(a,b)))[:5])
print(gg + theme_matplotlib())
@image_comparison(baseline_images=['theme_clean2', 'theme_mpl_only_one'])
def test_theme_matplotlib3():
gg = ggplot(aes(x='date', y='beef'), data=meat) + \
geom_point(color='lightblue') + \
stat_smooth(span=.15, color='black', se=True) + \
ggtitle("Beef: It's What's for Dinner") + \
xlab("Date") + \
ylab("Head of Cattle Slaughtered")
a = mpl.rcParams.copy()
print(gg)
b = mpl.rcParams.copy()
assert_true(len(_diff(a,b)) < 1, "Just plotting changed something in the ggplot object: %s" % list(six.iterkeys(_diff(a,b)))[:5])
_theme = theme_matplotlib({"font.family": "serif"}, matplotlib_defaults=False)
gg = gg + _theme
b = mpl.rcParams.copy()
assert_true(len(_diff(a,b)) < 2, "Setting just one param changed more in the ggplot object: %s" % list(six.iterkeys(_diff(a,b)))[:5])
print(gg)
b = mpl.rcParams.copy()
assert_true(len(_diff(a,b)) < 2, "Plotting after setting just one param changed more in the ggplot object: %s" % list(six.iterkeys(_diff(a,b)))[:5])
@image_comparison(baseline_images=['theme_mpl_all_before', 'theme_mpl_all_after'])
def test_theme_matplotlib4():
gg = ggplot(aes(x='date', y='beef'), data=meat) + \
geom_point(color='lightblue') + \
stat_smooth(span=.15, color='black', se=True) + \
ggtitle("Beef: It's What's for Dinner") + \
xlab("Date") + \
ylab("Head of Cattle Slaughtered")
print(gg + theme_matplotlib())
print(gg + theme_matplotlib({"font.family": "serif"}, matplotlib_defaults=False))
@image_comparison(baseline_images=['theme_mpl_all_before'])
def test_theme_matplotlib5():
# Make sure the last complete theme wins.
gg = ggplot(aes(x='date', y='beef'), data=meat) + \
geom_point(color='lightblue') + \
stat_smooth(span=.15, color='black', se=True) + \
ggtitle("Beef: It's What's for Dinner") + \
xlab("Date") + \
ylab("Head of Cattle Slaughtered")
print(gg + theme_gray() + theme_matplotlib())
def test_theme_matplotlib6():
tmpl = theme_matplotlib()
assert_true(tmpl.complete)
| bsd-2-clause |
huhuibin147/osu-qqbot | new/tree.py | 1 | 4724 | from math import log
import operator
def createDataSet():
dataSet = [[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']]
labels = ['no surfacing','flippers']
return dataSet, labels
def calcShannonEnt(dataSet):
numEntries = len(dataSet)
labelCounts = {}
#统计分类出现
for featVec in dataSet:
currentLabel = featVec[-1]
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1
shannonEnt = 0
#计算信息期望
for key in labelCounts:
#分类概率
prob = labelCounts[key]/numEntries
shannonEnt -= prob * log(prob, 2)
return shannonEnt
def splitDataSet(dataSet, axis, value):
#axis特征,value特征值
retDataSet = []
for featVec in dataSet:
#去除这个特征,返回符合这个特征值的集合
if featVec[axis] == value:
reducedFeatVec = featVec[:axis]
reducedFeatVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatVec)
return retDataSet
def chooseBestFeatureToSplit(dataSet):
#取类别数量
numFeatures = len(dataSet[0]) - 1
baseEntropy = calcShannonEnt(dataSet)
bestInfoGain = 0.0
bestFeature = -1
#遍历分类
for i in range(numFeatures):
#整个分类的特征值列表
featList = [example[i] for example in dataSet]
uniqueVals = set(featList)
newEntropy = 0.0
#取特征,分别计算熵
for value in uniqueVals:
#分离特征集合
subDataSet = splitDataSet(dataSet, i, value)
#特征分类概率
prob = len(subDataSet)/len(dataSet)
newEntropy += prob * calcShannonEnt(subDataSet)
#信息增益是熵的减少
infoGain = baseEntropy - newEntropy
if(infoGain > bestInfoGain):
bestInfoGain = infoGain
bestFeature = i
return bestFeature
def majorityCnt(classList):
classCount = {}
for vote in classList:
if vote not in classCount.keys():
classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.items(),key=lambda k:k[1],reverse=True)
return sortedClassCount[0][0]
def createTree(dataSet,labels):
#分类列表
classList = [example[-1] for example in dataSet]
#出口1,分类相同
if classList.count(classList[0]) == len(classList):
return classList[0]
#出口2,遍历完特征,还存在不同分类,多数决定
if len(dataSet[0]) == 1:
return majorityCnt(classList)
bestFeat = chooseBestFeatureToSplit(dataSet)
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel:{}}
del(labels[bestFeat])
#最优特征的值列表
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = set(featValues)
#分支
for value in uniqueVals:
subLabels = labels[:]
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet,bestFeat,value),subLabels)
return myTree
def classify(inputTree,featLabels,testVec):
#{'no surfacing':{0:'no',1:{'flippers':{0:'no',1:'yes'}}}}
#获得树的第一特征
firstStr = list(inputTree.keys())[0]
#第一特征对应的字典
secondDict = inputTree[firstStr]
#找到特征对应测试的下标
featIndex = featLabels.index(firstStr)
for key in secondDict.keys():
if testVec[featIndex] == key:
if type(secondDict[key]) == 'dict':
classLabels = classify(secondDict[key],featLabels,testVec)
else:
classLabels = secondDict[key]
return classLabels
def storeTree(inputTree, filename):
import pickle
fw = open(filename,'w')
pickle.dump(inputTree,fw)
fw.close()
def grabTree(filename):
import pickle
fr = open(filename)
return pickle.load(fr)
#########################
# import matplotlib.pyplot as plt
# decisionNode = dict(boxstyle='sawtooth', fc='0.8')
# leafNode = dict(boxstyle='round4', fc='0.8')
# arrow_args = dict(arrowstyle='<-')
def plotNode(nodeTxt, centerPt, parentPt, nodeType):
createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction', xytext=centerPt,
textcoords='axes fraction',va='center',ha='center',bbox=nodeType,arrowprops=arrow_args)
def createPlot():
fig = plt.figure(1, facecolor='white')
fig.clf()
createPlot.ax1 = plt.subplot(111, frameon=False)
plotNode('a decision node', (0.5,0.1), (0.1,0.5), decisionNode)
plotNode('a leaf node', (0.8,0.1), (0.3,0.8),leafNode)
plt.show()
#########################
| gpl-2.0 |
marmarko/ml101 | tensorflow/examples/skflow/text_classification_character_rnn.py | 6 | 3028 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is an example of using recurrent neural networks over characters
for DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import pandas
import tensorflow as tf
from tensorflow.contrib import learn
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_bool('test_with_fake_data', False,
'Test the example code with fake data.')
MAX_DOCUMENT_LENGTH = 100
HIDDEN_SIZE = 20
def char_rnn_model(x, y):
"""Character level recurrent neural network model to predict classes."""
y = tf.one_hot(y, 15, 1, 0)
byte_list = learn.ops.one_hot_matrix(x, 256)
byte_list = tf.unpack(byte_list, axis=1)
cell = tf.nn.rnn_cell.GRUCell(HIDDEN_SIZE)
_, encoding = tf.nn.rnn(cell, byte_list, dtype=tf.float32)
prediction, loss = learn.models.logistic_regression(encoding, y)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_rnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(x_test, as_iterable=True)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| bsd-2-clause |
sbg2133/miscellaneous_projects | lic/lic.py | 1 | 1085 | from subprocess import call
import sys
import numpy as np
import matplotlib.pyplot as plt
from magnetic_dipole import dipole
plt.ion()
plt.figure(figsize = (10.24, 7.68), dpi = 100)
xsize, ysize = int(sys.argv[1]), int(sys.argv[2])
xmax, ymax = 200, 200
X = np.linspace(0, xmax, xsize)
Y = np.linspace(0, ymax, ysize)
x, y = np.meshgrid(X,Y)
### magnetic dipole ###
dx, dy = dipole(m=[5., 5.], r=np.meshgrid(X,Y), r0=[xmax/2. + 0.1, ymax/2. + 0.3]).astype('float32')
vectors = np.array([dx,dy])
white = np.random.rand(xsize, ysize)
with file('texture.dat', 'w') as outfile:
for row in white:
np.savetxt(outfile, row, newline = " ")
outfile.write('\n')
with file('dx.dat', 'w') as outfile:
for row in dx:
np.savetxt(outfile, row, newline = " ")
outfile.write('\n')
with file('dy.dat', 'w') as outfile:
for row in dy:
np.savetxt(outfile, row, newline = " ")
outfile.write('\n')
command = ["./lic", str(xsize), str(ysize)]
call(command)
lic = np.loadtxt("./lic.dat")
plt.imshow(lic, cmap = "viridis", interpolation = "sinc")
plt.tight_layout()
| gpl-3.0 |
gpfreitas/bokeh | sphinx/source/conf.py | 1 | 11340 | # -*- coding: utf-8 -*-
#
# Bokeh documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 12 23:43:03 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.graphviz',
'sphinx.ext.ifconfig',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'bokeh.sphinxext.bokeh_autodoc',
'bokeh.sphinxext.bokeh_gallery',
'bokeh.sphinxext.bokeh_github',
'bokeh.sphinxext.bokeh_jinja',
'bokeh.sphinxext.bokeh_model',
'bokeh.sphinxext.bokeh_palette',
'bokeh.sphinxext.bokeh_plot',
'bokeh.sphinxext.bokeh_prop',
'bokeh.sphinxext.collapsible_code_block',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Bokeh'
copyright = u'2013, Continuum Analytics'
# Get the standard computed Bokeh version string to use for |version|
# and |release|
from bokeh import __version__
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# Check for version override (e.g. when re-deploying a previously released
# docs, or when pushing test docs that do not have a corresponding BokehJS
# available on CDN)
from bokeh.settings import settings
if settings.docs_version():
version = release = settings.docs_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Sort members by type
autodoc_member_order = 'groupwise'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
#'navbar_title': "Demo",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
'navbar_links': [
("Gallery", "docs/gallery"),
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': True,
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 3,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar navbar-inverse",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "false",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "footer",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo".
#'bootswatch_theme': "united",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/bokeh_white_32.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
standard_sidebars = ['sidebartoc.html', 'sourcelink.html', 'searchbox.html']
html_sidebars = {
'*': standard_sidebars,
'docs/*': standard_sidebars,
'docs/dev_guide/**': standard_sidebars,
'docs/reference/**': standard_sidebars,
'docs/tutorials/**': standard_sidebars,
'docs/user_guide/**': standard_sidebars,
'docs/gallery': [],
'docs/gallery/*': [],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bokehdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Bokeh.tex', u'Bokeh Documentation',
u'Continuum Analytics', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bokeh', u'Bokeh Documentation',
[u'Continuum Analytics'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Bokeh', u'Bokeh Documentation',
u'Continuum Analytics', 'Bokeh', 'Interactive Web Plotting for Python',
'Graphics'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# intersphinx settings
intersphinx_mapping = {'python': ('https://docs.python.org/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None)}
| bsd-3-clause |
lvapeab/nmt-keras | setup.py | 1 | 2100 | # -*- coding: utf-8 -*-
from setuptools import setup
setup(name='nmt_keras',
version='0.6',
description='Neural Machine Translation with Keras (Theano and Tensorflow).',
author='Marc Bolaños - Alvaro Peris',
author_email='[email protected]',
url='https://github.com/lvapeab/nmt-keras',
download_url='https://github.com/lvapeab/nmt-keras/archive/master.zip',
license='MIT',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
"License :: OSI Approved :: MIT License"
],
install_requires=[
'cloudpickle',
'future',
'keras @ https://github.com/MarcBS/keras/archive/master.zip',
'keras_applications',
'keras_preprocessing',
'h5py',
'matplotlib',
'multimodal-keras-wrapper',
'numpy',
'scikit-image',
'scikit-learn',
'six',
'tables',
'numpy',
'pandas',
'sacrebleu',
'sacremoses',
'scipy',
'tensorflow<2'
],
package_dir={'nmt_keras': '.',
'nmt_keras.utils': 'utils',
'nmt_keras.data_engine': 'data_engine',
'nmt_keras.nmt_keras': 'nmt_keras',
'nmt_keras.demo-web': 'demo-web',
},
packages=['nmt_keras',
'nmt_keras.utils',
'nmt_keras.data_engine',
'nmt_keras.nmt_keras',
'nmt_keras.demo-web'
],
package_data={
'nmt_keras': ['examples/*']
}
)
| mit |
deroneriksson/incubator-systemml | src/main/python/tests/test_mllearn_df.py | 12 | 5320 | #!/usr/bin/python
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# To run:
# - Python 2: `PYSPARK_PYTHON=python2 spark-submit --master local[*] --driver-class-path SystemML.jar test_mllearn_df.py`
# - Python 3: `PYSPARK_PYTHON=python3 spark-submit --master local[*] --driver-class-path SystemML.jar test_mllearn_df.py`
# Make the `systemml` package importable
import os
import sys
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../")
sys.path.insert(0, path)
import unittest
import numpy as np
from pyspark.ml import Pipeline
from pyspark.ml.feature import HashingTF, Tokenizer
from pyspark.sql import SparkSession
from sklearn import datasets, metrics, neighbors
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import linear_model
from sklearn.metrics import accuracy_score, r2_score
from systemml.mllearn import LinearRegression, LogisticRegression, NaiveBayes, SVM
sparkSession = SparkSession.builder.getOrCreate()
# Currently not integrated with JUnit test
# ~/spark-1.6.1-scala-2.11/bin/spark-submit --master local[*] --driver-class-path SystemML.jar test.py
class TestMLLearn(unittest.TestCase):
def test_logistic_sk2(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
# Convert to DataFrame for i/o: current way to transfer data
logistic = LogisticRegression(sparkSession, transferUsingDF=True)
logistic.fit(X_train, y_train)
mllearn_predicted = logistic.predict(X_test)
sklearn_logistic = linear_model.LogisticRegression()
sklearn_logistic.fit(X_train, y_train)
self.failUnless(accuracy_score(sklearn_logistic.predict(X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_linear_regression(self):
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
regr = LinearRegression(sparkSession, solver='direct-solve', transferUsingDF=True)
regr.fit(diabetes_X_train, diabetes_y_train)
mllearn_predicted = regr.predict(diabetes_X_test)
sklearn_regr = linear_model.LinearRegression()
sklearn_regr.fit(diabetes_X_train, diabetes_y_train)
self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_linear_regression_cg(self):
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
regr = LinearRegression(sparkSession, solver='newton-cg', transferUsingDF=True)
regr.fit(diabetes_X_train, diabetes_y_train)
mllearn_predicted = regr.predict(diabetes_X_test)
sklearn_regr = linear_model.LinearRegression()
sklearn_regr.fit(diabetes_X_train, diabetes_y_train)
self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_svm_sk2(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
svm = SVM(sparkSession, is_multi_class=True, transferUsingDF=True)
mllearn_predicted = svm.fit(X_train, y_train).predict(X_test)
from sklearn import linear_model, svm
clf = svm.LinearSVC()
sklearn_predicted = clf.fit(X_train, y_train).predict(X_test)
self.failUnless(accuracy_score(sklearn_predicted, mllearn_predicted) > 0.95 )
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
anurag313/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
hfegetude/EjerciciosMicroondas | tema3/ej7/parte4.py | 1 | 5836 | import numpy as np
import matplotlib.pyplot as plt
def cart2pol(x, y):
theta = np.arctan2(y, x)
rho = np.hypot(x, y)
return theta, rho
def pol2cart(theta, rho):
x = rho * np.cos(theta)
y = rho * np.sin(theta)
return x, y
def add_radius(x, y, r ):
ang, mod = cart2pol(x, y)
return pol2cart( ang, mod + r)
def adjustFigAspect(fig,aspect=1):
'''
Adjust the subplot parameters so that the figure has the correct
aspect ratio.
'''
xsize,ysize = fig.get_size_inches()
minsize = min(xsize,ysize)
xlim = .4*minsize/xsize
ylim = .4*minsize/ysize
if aspect < 1:
xlim *= aspect
else:
ylim /= aspect
fig.subplots_adjust(left=.5-xlim,
right=.5+xlim,
bottom=.5-ylim,
top=.5+ylim)
def colision(r0, r1, p1x, p1y, p2x, p2y):
d=np.linalg.norm([p1x-p2x , p1y-p2y] )
a=(r0*r0 - r1*r1 + d*d)/(2*d)
h = np.sqrt(r0*r0 - a*a)
p3x = p1x + a*(p2x - p1x)/(d)
p3y = p1y + a*(p2y - p1y)/d
p4x = p3x - h*(p2y - p1y)/d
p4y = p3y + h*(p2x - p1x)/d
return p4x, p4y
def colisionM(r0, r1, p1x, p1y, p2x, p2y):
d=np.linalg.norm([p1x-p2x , p1y-p2y] )
a=(r0*r0 - r1*r1 + d*d)/(2*d)
h = np.sqrt(r0*r0 - a*a)
p3x = p1x + a*(p2x - p1x)/(d)
p3y = p1y + a*(p2y - p1y)/d
p4x = p3x + h*(p2y - p1y)/d
p4y = p3y - h*(p2x - p1x)/d
return p4x, p4y
def line(i):
x = 1 + (1/i) * np.cos(np.arange(0 , 2*np.pi , 0.0001))
y = (1/(i))+(1/(i)) * np.sin(np.arange(0 , 2*np.pi , 0.0001))
x_t , y_t = colision(1, 1/i, 0, 0, 1, 1/i)
x_f = x[x < 1]
y_f = y[x < 1]
y_f = y_f[x_f > -1]
x_f = x_f[x_f > -1]
x_f = x_f[y_f < y_t ]
y_f = y_f[y_f < y_t ]
ax.plot(x_f, y_f , 'k', linewidth = 0.2)
x_text , y_text = add_radius(x_t, y_t, 0.01)
ax.text( x_text,
y_text,
str(i),
verticalalignment='center',
horizontalalignment='center',
rotation=np.angle(x_t + y_t*1j, deg=True) - 90 ,
fontsize=3)
##ax.plot(x_text, y_text, 'ko')
def line2(i):
x = 1 + (1/(-1*i)) * np.cos(np.arange( -np.pi , np.pi, 0.0001))
y = (1/(i*-1))+(1/(i*-1)) * np.sin(np.arange(-np.pi , np.pi, 0.0001))
x_t , y_t = colisionM(1, 1/i, 0, 0, 1, -1/i)
x_f = x[x < 1]
y_f = y[x < 1]
y_f = y_f[x_f > -1]
x_f = x_f[x_f > -1]
x_f = x_f[y_f > y_t ]
y_f = y_f[y_f > y_t ]
x_text , y_text = add_radius(x_t, y_t, 0.02)
ax.text( x_text,
y_text,
str(i),
verticalalignment='center',
horizontalalignment='center',
rotation=np.angle(x_t + y_t*1j, deg=True) - 90 ,
fontsize=3)
#ax.plot(x_t, y_t, 'ko')
ax.plot( x_f[20:] ,y_f[20:] , 'k', linewidth = 0.2)
def paint_line(i, ax):
x = i/(1+i) + (1/(1+i)) * np.cos(np.arange(0 , 2*np.pi , 0.001))
y = (1/(1+i)) * np.sin(np.arange(0 , 2*np.pi , 0.001))
ax.plot(x, y, 'k', linewidth = 0.2)
ax.text( 1-2*(1/(1+i)),
0.02,
str(i),
verticalalignment='bottom',
horizontalalignment='right',
rotation=90,
fontsize=3)
line(i)
line2(i)
def paint_text_degrees():
positions = np.arange(0, np.pi*2, 2*np.pi / 36)
for i, ang in enumerate(positions):
x_t , y_t = pol2cart(ang, 1.04)
ax.text( x_t,
y_t,
str(i*10),
verticalalignment='center',
horizontalalignment='center',
rotation=np.angle(x_t + y_t*1j, deg=True) - 90 ,
fontsize=3)
def paint_text_wavelength():
positions = np.arange(np.pi, 3*np.pi, 2*np.pi / 50)
for i, ang in enumerate(positions):
x_t , y_t = pol2cart(ang, 1.06)
ax.text( x_t,
y_t,
str(i/100),
verticalalignment='center',
horizontalalignment='center',
rotation=np.angle(x_t + y_t*1j, deg=True) - 90 ,
fontsize=3)
def imp2point(v1, v2):
reax = v1/(1+v1)
reay = 0
rear = (1/(1+v1))
imgx = 1
imgy = 1/v2
imgr = 1/v2
return colision(rear, imgr, reax, reay, imgx, imgy)
def move_wl(x, y , wl):
ax_ang, modulos = cart2pol(x, y)
ax_ang += 4*np.pi*wl
return pol2cart(ax_ang, modulos)
x_1= np.cos(np.arange(0 , 2*np.pi , 0.001))
y_1 = np.sin(np.arange(0, 2*np.pi, 0.001) )
fig = plt.figure()
adjustFigAspect(fig,aspect=1)
ax = fig.add_subplot(111)
ax.set_ylim(-1.01 , 1.01)
ax.set_xlim(-1.01, 1.01)
ax.axis('off')
ax.plot(x_1, y_1 , 'k', linewidth = 0.3)
#fig.axhline(y=0, xmin=-0.99, xmax=0.99, color='k', hold=None, linewidth = 0.5)
ax.plot([1, -1], [0, 0], 'k', linewidth = 0.3)
#ax.plot([0], [0], 'ko')
#black big lines
for i in np.arange(0.05, 0.2, 0.05):
paint_line(i , ax)
for i in np.arange(0.2, 1, 0.1):
paint_line(i , ax)
for i in np.arange(1, 2, 0.2):
paint_line(i , ax)
for i in np.arange(2, 5, 1):
paint_line(i , ax)
for i in np.array([5, 10, 20, 50]):
paint_line(i , ax)
paint_text_degrees()
paint_text_wavelength()
p1 , p2 = imp2point(1.4, 0.8)
ax.plot(p1, p2, 'ko')
start, modd= cart2pol(p1, p2)
print(start, modd)
p3, p4 = pol2cart(0,modd)
ax.plot(p3, p4, 'ko')
end, modd= cart2pol(p3, p4)
data_x = modd*np.cos(np.arange(start , end , -0.0001))
data_y = modd*np.sin(np.arange(start , end , -0.0001))
ax.plot(data_x, data_y)
p3, p4 = pol2cart(np.pi,modd)
ax.plot(p3, p4, 'ko')
end2, modd= cart2pol(p3, p4)
data_x = modd*np.cos(np.arange(0 , -np.pi , -0.0001))
data_y = modd*np.sin(np.arange(0 , -np.pi , -0.0001))
ax.plot(data_x, data_y)
fig.savefig('images/out4.pdf')
| gpl-3.0 |
zymsys/sms-tools | lectures/08-Sound-transformations/plots-code/stftMorph-frame.py | 21 | 2700 | import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
from scipy.signal import hamming, resample
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
import math
(fs, x1) = UF.wavread('../../../sounds/orchestra.wav')
(fs, x2) = UF.wavread('../../../sounds/speech-male.wav')
w1 = np.hamming(1024)
N1 = 1024
H1 = 256
w2 = np.hamming(1024)
N2 = 1024
smoothf = .1
balancef = .7
M1 = w1.size # size of analysis window
hM1_1 = int(math.floor((M1+1)/2)) # half analysis window size by rounding
hM1_2 = int(math.floor(M1/2)) # half analysis window size by floor
M2 = w2.size # size of analysis window
hM2_1 = int(math.floor((M2+1)/2)) # half analysis window size by rounding
hM2_2 = int(math.floor(M2/2)) # half analysis window size by floor2
loc1 = 14843
loc2 = 9294
x1 = x1[loc1-hM1_1:loc1+hM1_2]
x2 = x2[loc2-hM2_1:loc2+hM2_2]
mX1, pX1 = DFT.dftAnal(x1, w1, N1) # compute dft
mX2, pX2 = DFT.dftAnal(x2, w2, N2) # compute dft
# morph
mX2smooth = resample(np.maximum(-200, mX2), mX2.size*smoothf) # smooth spectrum of second sound
mX2 = resample(mX2smooth, mX2.size)
mY = balancef * mX2 + (1-balancef) * mX1 # generate output spectrum
#-----synthesis-----
y = DFT.dftSynth(mY, pX1, M1) * sum(w1) # overlap-add to generate output sound
mY1, pY1 = DFT.dftAnal(y, w1, M1) # overlap-add to generate output sound
plt.figure(1, figsize=(12, 9))
plt.subplot(321)
plt.plot(np.arange(N1)/float(fs), x1*w1, 'b', lw=1.5)
plt.axis([0, N1/float(fs), min(x1*w1), max(x1*w1)])
plt.title('x1 (orchestra.wav)')
plt.subplot(323)
plt.plot(fs*np.arange(mX1.size)/float(mX1.size), mX1-max(mX1), 'r', lw=1.5, label = 'mX1')
plt.plot(fs*np.arange(mX2.size)/float(mX2.size), mX2-max(mX2), 'k', lw=1.5, label='mX2')
plt.legend(prop={'size':10})
plt.axis([0,fs/4.0,-70,2])
plt.title('mX1 + mX2 (speech-male.wav)')
plt.subplot(325)
plt.plot(fs*np.arange(pX1.size)/float(pX1.size), pX1, 'c', lw=1.5)
plt.axis([0,fs/4.0,min(pX1),20])
plt.title('pX1')
plt.subplot(322)
plt.plot(np.arange(N1)/float(fs), y, 'b', lw=1.5)
plt.axis([0, float(N1)/fs, min(y), max(y)])
plt.title('y')
plt.subplot(324)
plt.plot(fs*np.arange(mY1.size)/float(mY1.size), mY1-max(mY1), 'r', lw=1.5)
plt.axis([0,fs/4.0,-70,2])
plt.title('mY')
plt.subplot(326)
plt.plot(fs*np.arange(pY1.size)/float(pY1.size), pY1, 'c', lw=1.5)
plt.axis([0,fs/4.0,min(pY1),6])
plt.title('pY')
plt.tight_layout()
plt.savefig('stftMorph-frame.png')
plt.show()
| agpl-3.0 |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/matplotlib/text.py | 10 | 82852 | """
Classes for including text in a figure.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
import math
import warnings
import contextlib
import numpy as np
from matplotlib import cbook
from matplotlib import rcParams
import matplotlib.artist as artist
from matplotlib.artist import Artist
from matplotlib.cbook import is_string_like, maxdict
from matplotlib import docstring
from matplotlib.font_manager import FontProperties
from matplotlib.patches import FancyBboxPatch
from matplotlib.patches import FancyArrowPatch, Rectangle
import matplotlib.transforms as mtransforms
from matplotlib.transforms import Affine2D, Bbox, Transform
from matplotlib.transforms import BboxBase, BboxTransformTo
from matplotlib.lines import Line2D
from matplotlib.path import Path
from matplotlib.artist import allow_rasterization
from matplotlib.backend_bases import RendererBase
from matplotlib.textpath import TextPath
def _process_text_args(override, fontdict=None, **kwargs):
"Return an override dict. See :func:`~pyplot.text' docstring for info"
if fontdict is not None:
override.update(fontdict)
override.update(kwargs)
return override
@contextlib.contextmanager
def _wrap_text(textobj):
"""
Temporarily inserts newlines to the text if the wrap option is enabled.
"""
if textobj.get_wrap():
old_text = textobj.get_text()
try:
textobj.set_text(textobj._get_wrapped_text())
yield textobj
finally:
textobj.set_text(old_text)
else:
yield textobj
# Extracted from Text's method to serve as a function
def get_rotation(rotation):
"""
Return the text angle as float. The returned
angle is between 0 and 360 deg.
*rotation* may be 'horizontal', 'vertical', or a numeric value in degrees.
"""
try:
angle = float(rotation)
except (ValueError, TypeError):
isString = isinstance(rotation, six.string_types)
if ((isString and rotation == 'horizontal') or rotation is None):
angle = 0.
elif (isString and rotation == 'vertical'):
angle = 90.
else:
raise ValueError("rotation is {0} expected either 'horizontal'"
" 'vertical', numeric value or"
"None".format(rotation))
return angle % 360
# these are not available for the object inspector until after the
# class is build so we define an initial set here for the init
# function and they will be overridden after object defn
docstring.interpd.update(Text="""
========================== ================================================
Property Value
========================== ================================================
alpha float or None
animated [True | False]
backgroundcolor any matplotlib color
bbox rectangle prop dict plus key 'pad' which is a
pad in points; if a boxstyle is supplied as
a string, then pad is instead a fraction
of the font size
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
color any matplotlib color
family ['serif' | 'sans-serif' | 'cursive' |
'fantasy' | 'monospace']
figure a matplotlib.figure.Figure instance
fontproperties a matplotlib.font_manager.FontProperties
instance
horizontalalignment or ha ['center' | 'right' | 'left']
label any string
linespacing float
lod [True | False]
multialignment ['left' | 'right' | 'center' ]
name or fontname string e.g.,
['Sans' | 'Courier' | 'Helvetica' ...]
position (x,y)
rotation [ angle in degrees 'vertical' | 'horizontal'
rotation_mode [ None | 'anchor']
size or fontsize [size in points | relative size e.g., 'smaller',
'x-large']
style or fontstyle [ 'normal' | 'italic' | 'oblique']
text string
transform a matplotlib.transform transformation instance
usetex [True | False | None]
variant ['normal' | 'small-caps']
verticalalignment or va ['center' | 'top' | 'bottom' | 'baseline' |
'center_baseline' ]
visible [True | False]
weight or fontweight ['normal' | 'bold' | 'heavy' | 'light' |
'ultrabold' | 'ultralight']
wrap [True | False]
x float
y float
zorder any number
========================== ===============================================
""")
# TODO : This function may move into the Text class as a method. As a
# matter of fact, The information from the _get_textbox function
# should be available during the Text._get_layout() call, which is
# called within the _get_textbox. So, it would better to move this
# function as a method with some refactoring of _get_layout method.
def _get_textbox(text, renderer):
"""
Calculate the bounding box of the text. Unlike
:meth:`matplotlib.text.Text.get_extents` method, The bbox size of
the text before the rotation is calculated.
"""
projected_xs = []
projected_ys = []
theta = np.deg2rad(text.get_rotation())
tr = mtransforms.Affine2D().rotate(-theta)
_, parts, d = text._get_layout(renderer)
for t, wh, x, y in parts:
w, h = wh
xt1, yt1 = tr.transform_point((x, y))
yt1 -= d
xt2, yt2 = xt1 + w, yt1 + h
projected_xs.extend([xt1, xt2])
projected_ys.extend([yt1, yt2])
xt_box, yt_box = min(projected_xs), min(projected_ys)
w_box, h_box = max(projected_xs) - xt_box, max(projected_ys) - yt_box
tr = mtransforms.Affine2D().rotate(theta)
x_box, y_box = tr.transform_point((xt_box, yt_box))
return x_box, y_box, w_box, h_box
class Text(Artist):
"""
Handle storing and drawing of text in window or data coordinates.
"""
zorder = 3
_cached = maxdict(50)
def __str__(self):
return "Text(%g,%g,%s)" % (self._x, self._y, repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='baseline',
horizontalalignment='left',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
rotation_mode=None,
usetex=None, # defaults to rcParams['text.usetex']
wrap=False,
**kwargs
):
"""
Create a :class:`~matplotlib.text.Text` instance at *x*, *y*
with string *text*.
Valid kwargs are
%(Text)s
"""
Artist.__init__(self)
self._x, self._y = x, y
if color is None:
color = rcParams['text.color']
if fontproperties is None:
fontproperties = FontProperties()
elif is_string_like(fontproperties):
fontproperties = FontProperties(fontproperties)
self.set_text(text)
self.set_color(color)
self.set_usetex(usetex)
self.set_wrap(wrap)
self._verticalalignment = verticalalignment
self._horizontalalignment = horizontalalignment
self._multialignment = multialignment
self._rotation = rotation
self._fontproperties = fontproperties
self._bbox_patch = None # a FancyBboxPatch instance
self._renderer = None
if linespacing is None:
linespacing = 1.2 # Maybe use rcParam later.
self._linespacing = linespacing
self.set_rotation_mode(rotation_mode)
self.update(kwargs)
def update(self, kwargs):
"""
Update properties from a dictionary.
"""
bbox = kwargs.pop('bbox', None)
super(Text, self).update(kwargs)
if bbox:
self.set_bbox(bbox) # depends on font properties
def __getstate__(self):
d = super(Text, self).__getstate__()
# remove the cached _renderer (if it exists)
d['_renderer'] = None
return d
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the patch.
In the case of text, a hit is true anywhere in the
axis-aligned bounding-box containing the text.
Returns True or False.
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
if not self.get_visible() or self._renderer is None:
return False, {}
l, b, w, h = self.get_window_extent().bounds
r, t = l + w, b + h
x, y = mouseevent.x, mouseevent.y
inside = (l <= x <= r and b <= y <= t)
cattr = {}
# if the text has a surrounding patch, also check containment for it,
# and merge the results with the results for the text.
if self._bbox_patch:
patch_inside, patch_cattr = self._bbox_patch.contains(mouseevent)
inside = inside or patch_inside
cattr["bbox_patch"] = patch_cattr
return inside, cattr
def _get_xy_display(self):
'get the (possibly unit converted) transformed x, y in display coords'
x, y = self.get_unitless_position()
return self.get_transform().transform_point((x, y))
def _get_multialignment(self):
if self._multialignment is not None:
return self._multialignment
else:
return self._horizontalalignment
def get_rotation(self):
'return the text angle as float in degrees'
return get_rotation(self._rotation) # string_or_number -> number
def set_rotation_mode(self, m):
"""
set text rotation mode. If "anchor", the un-rotated text
will first aligned according to their *ha* and
*va*, and then will be rotated with the alignement
reference point as a origin. If None (default), the text will be
rotated first then will be aligned.
"""
if m is None or m in ["anchor", "default"]:
self._rotation_mode = m
else:
raise ValueError("Unknown rotation_mode : %s" % repr(m))
self.stale = True
def get_rotation_mode(self):
"get text rotation mode"
return self._rotation_mode
def update_from(self, other):
'Copy properties from other to self'
Artist.update_from(self, other)
self._color = other._color
self._multialignment = other._multialignment
self._verticalalignment = other._verticalalignment
self._horizontalalignment = other._horizontalalignment
self._fontproperties = other._fontproperties.copy()
self._rotation = other._rotation
self._picker = other._picker
self._linespacing = other._linespacing
self.stale = True
def _get_layout(self, renderer):
"""
return the extent (bbox) of the text together with
multiple-alignment information. Note that it returns an extent
of a rotated text when necessary.
"""
key = self.get_prop_tup(renderer=renderer)
if key in self._cached:
return self._cached[key]
horizLayout = []
thisx, thisy = 0.0, 0.0
xmin, ymin = 0.0, 0.0
width, height = 0.0, 0.0
lines = self.get_text().split('\n')
whs = np.zeros((len(lines), 2))
horizLayout = np.zeros((len(lines), 4))
# Find full vertical extent of font,
# including ascenders and descenders:
tmp, lp_h, lp_bl = renderer.get_text_width_height_descent('lp',
self._fontproperties,
ismath=False)
offsety = (lp_h - lp_bl) * self._linespacing
baseline = 0
for i, line in enumerate(lines):
clean_line, ismath = self.is_math_text(line, self.get_usetex())
if clean_line:
w, h, d = renderer.get_text_width_height_descent(clean_line,
self._fontproperties,
ismath=ismath)
else:
w, h, d = 0, 0, 0
# For multiline text, increase the line spacing when the
# text net-height(excluding baseline) is larger than that
# of a "l" (e.g., use of superscripts), which seems
# what TeX does.
h = max(h, lp_h)
d = max(d, lp_bl)
whs[i] = w, h
baseline = (h - d) - thisy
thisy -= max(offsety, (h - d) * self._linespacing)
horizLayout[i] = thisx, thisy, w, h
thisy -= d
width = max(width, w)
descent = d
ymin = horizLayout[-1][1]
ymax = horizLayout[0][1] + horizLayout[0][3]
height = ymax - ymin
xmax = xmin + width
# get the rotation matrix
M = Affine2D().rotate_deg(self.get_rotation())
offsetLayout = np.zeros((len(lines), 2))
offsetLayout[:] = horizLayout[:, 0:2]
# now offset the individual text lines within the box
if len(lines) > 1: # do the multiline aligment
malign = self._get_multialignment()
if malign == 'center':
offsetLayout[:, 0] += width / 2.0 - horizLayout[:, 2] / 2.0
elif malign == 'right':
offsetLayout[:, 0] += width - horizLayout[:, 2]
# the corners of the unrotated bounding box
cornersHoriz = np.array(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)],
np.float_)
cornersHoriz[:, 1] -= descent
# now rotate the bbox
cornersRotated = M.transform(cornersHoriz)
txs = cornersRotated[:, 0]
tys = cornersRotated[:, 1]
# compute the bounds of the rotated box
xmin, xmax = txs.min(), txs.max()
ymin, ymax = tys.min(), tys.max()
width = xmax - xmin
height = ymax - ymin
# Now move the box to the target position offset the display
# bbox by alignment
halign = self._horizontalalignment
valign = self._verticalalignment
rotation_mode = self.get_rotation_mode()
if rotation_mode != "anchor":
# compute the text location in display coords and the offsets
# necessary to align the bbox with that location
if halign == 'center':
offsetx = (xmin + width / 2.0)
elif halign == 'right':
offsetx = (xmin + width)
else:
offsetx = xmin
if valign == 'center':
offsety = (ymin + height / 2.0)
elif valign == 'top':
offsety = (ymin + height)
elif valign == 'baseline':
offsety = (ymin + height) - baseline
elif valign == 'center_baseline':
offsety = ymin + height - baseline / 2.0
else:
offsety = ymin
else:
xmin1, ymin1 = cornersHoriz[0]
xmax1, ymax1 = cornersHoriz[2]
if halign == 'center':
offsetx = (xmin1 + xmax1) / 2.0
elif halign == 'right':
offsetx = xmax1
else:
offsetx = xmin1
if valign == 'center':
offsety = (ymin1 + ymax1) / 2.0
elif valign == 'top':
offsety = ymax1
elif valign == 'baseline':
offsety = ymax1 - baseline
elif valign == 'center_baseline':
offsety = (ymin1 + ymax1 - baseline) / 2.0
else:
offsety = ymin1
offsetx, offsety = M.transform_point((offsetx, offsety))
xmin -= offsetx
ymin -= offsety
bbox = Bbox.from_bounds(xmin, ymin, width, height)
# now rotate the positions around the first x,y position
xys = M.transform(offsetLayout)
xys -= (offsetx, offsety)
xs, ys = xys[:, 0], xys[:, 1]
ret = bbox, list(zip(lines, whs, xs, ys)), descent
self._cached[key] = ret
return ret
def set_bbox(self, rectprops):
"""
Draw a bounding box around self. rectprops are any settable
properties for a FancyBboxPatch, e.g., facecolor='red', alpha=0.5.
t.set_bbox(dict(facecolor='red', alpha=0.5))
The default boxstyle is 'square'. The mutation
scale of the FancyBboxPatch is set to the fontsize.
ACCEPTS: FancyBboxPatch prop dict
"""
if rectprops is not None:
props = rectprops.copy()
boxstyle = props.pop("boxstyle", None)
pad = props.pop("pad", None)
if boxstyle is None:
boxstyle = "square"
if pad is None:
pad = 4 # points
pad /= self.get_size() # to fraction of font size
else:
if pad is None:
pad = 0.3
# boxstyle could be a callable or a string
if is_string_like(boxstyle) and "pad" not in boxstyle:
boxstyle += ",pad=%0.2f" % pad
bbox_transmuter = props.pop("bbox_transmuter", None)
self._bbox_patch = FancyBboxPatch(
(0., 0.),
1., 1.,
boxstyle=boxstyle,
bbox_transmuter=bbox_transmuter,
transform=mtransforms.IdentityTransform(),
**props)
else:
self._bbox_patch = None
self._update_clip_properties()
def get_bbox_patch(self):
"""
Return the bbox Patch object. Returns None if the
FancyBboxPatch is not made.
"""
return self._bbox_patch
def update_bbox_position_size(self, renderer):
"""
Update the location and the size of the bbox. This method
should be used when the position and size of the bbox needs to
be updated before actually drawing the bbox.
"""
if self._bbox_patch:
trans = self.get_transform()
# don't use self.get_unitless_position here, which refers to text
# position in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0., w_box, h_box)
theta = np.deg2rad(self.get_rotation())
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx + x_box, posy + y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
def _draw_bbox(self, renderer, posx, posy):
""" Update the location and the size of the bbox
(FancyBboxPatch), and draw
"""
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0., w_box, h_box)
theta = np.deg2rad(self.get_rotation())
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx + x_box, posy + y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
self._bbox_patch.draw(renderer)
def _update_clip_properties(self):
clipprops = dict(clip_box=self.clipbox,
clip_path=self._clippath,
clip_on=self._clipon)
if self._bbox_patch:
bbox = self._bbox_patch.update(clipprops)
def set_clip_box(self, clipbox):
"""
Set the artist's clip :class:`~matplotlib.transforms.Bbox`.
ACCEPTS: a :class:`matplotlib.transforms.Bbox` instance
"""
super(Text, self).set_clip_box(clipbox)
self._update_clip_properties()
def set_clip_path(self, path, transform=None):
"""
Set the artist's clip path, which may be:
* a :class:`~matplotlib.patches.Patch` (or subclass) instance
* a :class:`~matplotlib.path.Path` instance, in which case
an optional :class:`~matplotlib.transforms.Transform`
instance may be provided, which will be applied to the
path before using it for clipping.
* *None*, to remove the clipping path
For efficiency, if the path happens to be an axis-aligned
rectangle, this method will set the clipping box to the
corresponding rectangle and set the clipping path to *None*.
ACCEPTS: [ (:class:`~matplotlib.path.Path`,
:class:`~matplotlib.transforms.Transform`) |
:class:`~matplotlib.patches.Patch` | None ]
"""
super(Text, self).set_clip_path(path, transform)
self._update_clip_properties()
def set_clip_on(self, b):
"""
Set whether artist uses clipping.
When False artists will be visible out side of the axes which
can lead to unexpected results.
ACCEPTS: [True | False]
"""
super(Text, self).set_clip_on(b)
self._update_clip_properties()
def get_wrap(self):
"""
Returns the wrapping state for the text.
"""
return self._wrap
def set_wrap(self, wrap):
"""
Sets the wrapping state for the text.
"""
self._wrap = wrap
def _get_wrap_line_width(self):
"""
Returns the maximum line width for wrapping text based on the
current orientation.
"""
x0, y0 = self.get_transform().transform(self.get_position())
figure_box = self.get_figure().get_window_extent()
# Calculate available width based on text alignment
alignment = self.get_horizontalalignment()
self.set_rotation_mode('anchor')
rotation = self.get_rotation()
left = self._get_dist_to_box(rotation, x0, y0, figure_box)
right = self._get_dist_to_box(
(180 + rotation) % 360,
x0,
y0,
figure_box)
if alignment == 'left':
line_width = left
elif alignment == 'right':
line_width = right
else:
line_width = 2 * min(left, right)
return line_width
def _get_dist_to_box(self, rotation, x0, y0, figure_box):
"""
Returns the distance from the given points, to the boundaries
of a rotated box in pixels.
"""
if rotation > 270:
quad = rotation - 270
h1 = y0 / math.cos(math.radians(quad))
h2 = (figure_box.x1 - x0) / math.cos(math.radians(90 - quad))
elif rotation > 180:
quad = rotation - 180
h1 = x0 / math.cos(math.radians(quad))
h2 = y0 / math.cos(math.radians(90 - quad))
elif rotation > 90:
quad = rotation - 90
h1 = (figure_box.y1 - y0) / math.cos(math.radians(quad))
h2 = x0 / math.cos(math.radians(90 - quad))
else:
h1 = (figure_box.x1 - x0) / math.cos(math.radians(rotation))
h2 = (figure_box.y1 - y0) / math.cos(math.radians(90 - rotation))
return min(h1, h2)
def _get_rendered_text_width(self, text):
"""
Returns the width of a given text string, in pixels.
"""
w, h, d = self._renderer.get_text_width_height_descent(
text,
self.get_fontproperties(),
False)
return math.ceil(w)
def _get_wrapped_text(self):
"""
Return a copy of the text with new lines added, so that
the text is wrapped relative to the parent figure.
"""
# Not fit to handle breaking up latex syntax correctly, so
# ignore latex for now.
if self.get_usetex():
return self.get_text()
# Build the line incrementally, for a more accurate measure of length
line_width = self._get_wrap_line_width()
wrapped_str = ""
line = ""
for word in self.get_text().split(' '):
# New lines in the user's test need to force a split, so that it's
# not using the longest current line width in the line being built
sub_words = word.split('\n')
for i in range(len(sub_words)):
current_width = self._get_rendered_text_width(
line + ' ' + sub_words[i])
# Split long lines, and each newline found in the current word
if current_width > line_width or i > 0:
wrapped_str += line + '\n'
line = ""
if line == "":
line = sub_words[i]
else:
line += ' ' + sub_words[i]
return wrapped_str + line
@allow_rasterization
def draw(self, renderer):
"""
Draws the :class:`Text` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
if self.get_text().strip() == '':
return
renderer.open_group('text', self.get_gid())
with _wrap_text(self) as textobj:
bbox, info, descent = textobj._get_layout(renderer)
trans = textobj.get_transform()
# don't use textobj.get_position here, which refers to text
# position in Text, and dash position in TextWithDash:
posx = float(textobj.convert_xunits(textobj._x))
posy = float(textobj.convert_yunits(textobj._y))
if not np.isfinite(posx) or not np.isfinite(posy):
raise ValueError("posx and posy should be finite values")
posx, posy = trans.transform_point((posx, posy))
canvasw, canvash = renderer.get_canvas_width_height()
# draw the FancyBboxPatch
if textobj._bbox_patch:
textobj._draw_bbox(renderer, posx, posy)
gc = renderer.new_gc()
gc.set_foreground(textobj.get_color())
gc.set_alpha(textobj.get_alpha())
gc.set_url(textobj._url)
textobj._set_gc_clip(gc)
angle = textobj.get_rotation()
for line, wh, x, y in info:
mtext = textobj if len(info) == 1 else None
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash - y
clean_line, ismath = textobj.is_math_text(line,
self.get_usetex())
if textobj.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
textrenderer = PathEffectRenderer(
textobj.get_path_effects(), renderer)
else:
textrenderer = renderer
if textobj.get_usetex():
textrenderer.draw_tex(gc, x, y, clean_line,
textobj._fontproperties, angle,
mtext=mtext)
else:
textrenderer.draw_text(gc, x, y, clean_line,
textobj._fontproperties, angle,
ismath=ismath, mtext=mtext)
gc.restore()
renderer.close_group('text')
self.stale = False
def get_color(self):
"Return the color of the text"
return self._color
def get_fontproperties(self):
"Return the :class:`~font_manager.FontProperties` object"
return self._fontproperties
def get_font_properties(self):
'alias for get_fontproperties'
return self.get_fontproperties()
def get_family(self):
"Return the list of font families used for font lookup"
return self._fontproperties.get_family()
def get_fontfamily(self):
'alias for get_family'
return self.get_family()
def get_name(self):
"Return the font name as string"
return self._fontproperties.get_name()
def get_style(self):
"Return the font style as string"
return self._fontproperties.get_style()
def get_size(self):
"Return the font size as integer"
return self._fontproperties.get_size_in_points()
def get_variant(self):
"Return the font variant as a string"
return self._fontproperties.get_variant()
def get_fontvariant(self):
'alias for get_variant'
return self.get_variant()
def get_weight(self):
"Get the font weight as string or number"
return self._fontproperties.get_weight()
def get_fontname(self):
'alias for get_name'
return self.get_name()
def get_fontstyle(self):
'alias for get_style'
return self.get_style()
def get_fontsize(self):
'alias for get_size'
return self.get_size()
def get_fontweight(self):
'alias for get_weight'
return self.get_weight()
def get_stretch(self):
'Get the font stretch as a string or number'
return self._fontproperties.get_stretch()
def get_fontstretch(self):
'alias for get_stretch'
return self.get_stretch()
def get_ha(self):
'alias for get_horizontalalignment'
return self.get_horizontalalignment()
def get_horizontalalignment(self):
"""
Return the horizontal alignment as string. Will be one of
'left', 'center' or 'right'.
"""
return self._horizontalalignment
def get_unitless_position(self):
"Return the unitless position of the text as a tuple (*x*, *y*)"
# This will get the position with all unit information stripped away.
# This is here for convienience since it is done in several locations.
x = float(self.convert_xunits(self._x))
y = float(self.convert_yunits(self._y))
return x, y
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
# This should return the same data (possible unitized) as was
# specified with 'set_x' and 'set_y'.
return self._x, self._y
def get_prop_tup(self, renderer=None):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (e.g., layouts) and
need to know if the text has changed.
"""
x, y = self.get_unitless_position()
return (x, y, self.get_text(), self._color,
self._verticalalignment, self._horizontalalignment,
hash(self._fontproperties),
self._rotation, self._rotation_mode,
self.figure.dpi, id(renderer or self._renderer),
)
def get_text(self):
"Get the text as string"
return self._text
def get_va(self):
'alias for :meth:`getverticalalignment`'
return self.get_verticalalignment()
def get_verticalalignment(self):
"""
Return the vertical alignment as string. Will be one of
'top', 'center', 'bottom' or 'baseline'.
"""
return self._verticalalignment
def get_window_extent(self, renderer=None, dpi=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
*dpi* defaults to self.figure.dpi; the renderer dpi is
irrelevant. For the web application, if figure.dpi is not
the value used when saving the figure, then the value that
was used must be specified as the *dpi* argument.
'''
#return _unit_box
if not self.get_visible():
return Bbox.unit()
if dpi is not None:
dpi_orig = self.figure.dpi
self.figure.dpi = dpi
if self.get_text().strip() == '':
tx, ty = self._get_xy_display()
return Bbox.from_bounds(tx, ty, 0, 0)
if renderer is not None:
self._renderer = renderer
if self._renderer is None:
raise RuntimeError('Cannot get window extent w/o renderer')
bbox, info, descent = self._get_layout(self._renderer)
x, y = self.get_unitless_position()
x, y = self.get_transform().transform_point((x, y))
bbox = bbox.translated(x, y)
if dpi is not None:
self.figure.dpi = dpi_orig
return bbox
def set_backgroundcolor(self, color):
"""
Set the background color of the text by updating the bbox.
.. seealso::
:meth:`set_bbox`
To change the position of the bounding box.
ACCEPTS: any matplotlib color
"""
if self._bbox_patch is None:
self.set_bbox(dict(facecolor=color, edgecolor=color))
else:
self._bbox_patch.update(dict(facecolor=color))
self._update_clip_properties()
self.stale = True
def set_color(self, color):
"""
Set the foreground color of the text
ACCEPTS: any matplotlib color
"""
# Make sure it is hashable, or get_prop_tup will fail.
try:
hash(color)
except TypeError:
color = tuple(color)
self._color = color
self.stale = True
def set_ha(self, align):
'alias for set_horizontalalignment'
self.set_horizontalalignment(align)
def set_horizontalalignment(self, align):
"""
Set the horizontal alignment to one of
ACCEPTS: [ 'center' | 'right' | 'left' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' %
str(legal))
self._horizontalalignment = align
self.stale = True
def set_ma(self, align):
'alias for set_verticalalignment'
self.set_multialignment(align)
def set_multialignment(self, align):
"""
Set the alignment for multiple lines layout. The layout of the
bounding box of all the lines is determined bu the horizontalalignment
and verticalalignment properties, but the multiline text within that
box can be
ACCEPTS: ['left' | 'right' | 'center' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' %
str(legal))
self._multialignment = align
self.stale = True
def set_linespacing(self, spacing):
"""
Set the line spacing as a multiple of the font size.
Default is 1.2.
ACCEPTS: float (multiple of font size)
"""
self._linespacing = spacing
self.stale = True
def set_family(self, fontname):
"""
Set the font family. May be either a single string, or a list
of strings in decreasing priority. Each string may be either
a real font name or a generic font class name. If the latter,
the specific font names will be looked up in the
:file:`matplotlibrc` file.
ACCEPTS: [FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' |
'monospace' ]
"""
self._fontproperties.set_family(fontname)
self.stale = True
def set_variant(self, variant):
"""
Set the font variant, either 'normal' or 'small-caps'.
ACCEPTS: [ 'normal' | 'small-caps' ]
"""
self._fontproperties.set_variant(variant)
self.stale = True
def set_fontvariant(self, variant):
'alias for set_variant'
return self.set_variant(variant)
def set_name(self, fontname):
"""alias for set_family"""
return self.set_family(fontname)
def set_fontname(self, fontname):
"""alias for set_family"""
self.set_family(fontname)
def set_style(self, fontstyle):
"""
Set the font style.
ACCEPTS: [ 'normal' | 'italic' | 'oblique']
"""
self._fontproperties.set_style(fontstyle)
self.stale = True
def set_fontstyle(self, fontstyle):
'alias for set_style'
return self.set_style(fontstyle)
def set_size(self, fontsize):
"""
Set the font size. May be either a size string, relative to
the default font size, or an absolute font size in points.
ACCEPTS: [size in points | 'xx-small' | 'x-small' | 'small' |
'medium' | 'large' | 'x-large' | 'xx-large' ]
"""
self._fontproperties.set_size(fontsize)
self.stale = True
def set_fontsize(self, fontsize):
'alias for set_size'
return self.set_size(fontsize)
def set_weight(self, weight):
"""
Set the font weight.
ACCEPTS: [a numeric value in range 0-1000 | 'ultralight' | 'light' |
'normal' | 'regular' | 'book' | 'medium' | 'roman' |
'semibold' | 'demibold' | 'demi' | 'bold' | 'heavy' |
'extra bold' | 'black' ]
"""
self._fontproperties.set_weight(weight)
self.stale = True
def set_fontweight(self, weight):
'alias for set_weight'
return self.set_weight(weight)
def set_stretch(self, stretch):
"""
Set the font stretch (horizontal condensation or expansion).
ACCEPTS: [a numeric value in range 0-1000 | 'ultra-condensed' |
'extra-condensed' | 'condensed' | 'semi-condensed' |
'normal' | 'semi-expanded' | 'expanded' | 'extra-expanded' |
'ultra-expanded' ]
"""
self._fontproperties.set_stretch(stretch)
self.stale = True
def set_fontstretch(self, stretch):
'alias for set_stretch'
return self.set_stretch(stretch)
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the text
ACCEPTS: (x,y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the text
ACCEPTS: float
"""
self._x = x
self.stale = True
def set_y(self, y):
"""
Set the *y* position of the text
ACCEPTS: float
"""
self._y = y
self.stale = True
def set_rotation(self, s):
"""
Set the rotation of the text
ACCEPTS: [ angle in degrees | 'vertical' | 'horizontal' ]
"""
self._rotation = s
self.stale = True
def set_va(self, align):
'alias for set_verticalalignment'
self.set_verticalalignment(align)
def set_verticalalignment(self, align):
"""
Set the vertical alignment
ACCEPTS: [ 'center' | 'top' | 'bottom' | 'baseline' ]
"""
legal = ('top', 'bottom', 'center', 'baseline')
if align not in legal:
raise ValueError('Vertical alignment must be one of %s' %
str(legal))
self._verticalalignment = align
self.stale = True
def set_text(self, s):
"""
Set the text string *s*
It may contain newlines (``\\n``) or math in LaTeX syntax.
ACCEPTS: string or anything printable with '%s' conversion.
"""
self._text = '%s' % (s,)
self.stale = True
@staticmethod
def is_math_text(s, usetex=None):
"""
Returns a cleaned string and a boolean flag.
The flag indicates if the given string *s* contains any mathtext,
determined by counting unescaped dollar signs. If no mathtext
is present, the cleaned string has its dollar signs unescaped.
If usetex is on, the flag always has the value "TeX".
"""
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
if usetex is None:
usetex = rcParams['text.usetex']
if usetex:
if s == ' ':
s = r'\ '
return s, 'TeX'
if cbook.is_math_text(s):
return s, True
else:
return s.replace(r'\$', '$'), False
def set_fontproperties(self, fp):
"""
Set the font properties that control the text. *fp* must be a
:class:`matplotlib.font_manager.FontProperties` object.
ACCEPTS: a :class:`matplotlib.font_manager.FontProperties` instance
"""
if is_string_like(fp):
fp = FontProperties(fp)
self._fontproperties = fp.copy()
self.stale = True
def set_font_properties(self, fp):
'alias for set_fontproperties'
self.set_fontproperties(fp)
def set_usetex(self, usetex):
"""
Set this `Text` object to render using TeX (or not).
If `None` is given, the option will be reset to use the value of
`rcParams['text.usetex']`
"""
if usetex is None:
self._usetex = rcParams['text.usetex']
else:
self._usetex = bool(usetex)
self.stale = True
def get_usetex(self):
"""
Return whether this `Text` object will render using TeX.
If the user has not manually set this value, it will default to
the value of `rcParams['text.usetex']`
"""
if self._usetex is None:
return rcParams['text.usetex']
else:
return self._usetex
docstring.interpd.update(Text=artist.kwdoc(Text))
docstring.dedent_interpd(Text.__init__)
class TextWithDash(Text):
"""
This is basically a :class:`~matplotlib.text.Text` with a dash
(drawn with a :class:`~matplotlib.lines.Line2D`) before/after
it. It is intended to be a drop-in replacement for
:class:`~matplotlib.text.Text`, and should behave identically to
it when *dashlength* = 0.0.
The dash always comes between the point specified by
:meth:`~matplotlib.text.Text.set_position` and the text. When a
dash exists, the text alignment arguments (*horizontalalignment*,
*verticalalignment*) are ignored.
*dashlength* is the length of the dash in canvas units.
(default = 0.0).
*dashdirection* is one of 0 or 1, where 0 draws the dash after the
text and 1 before. (default = 0).
*dashrotation* specifies the rotation of the dash, and should
generally stay *None*. In this case
:meth:`~matplotlib.text.TextWithDash.get_dashrotation` returns
:meth:`~matplotlib.text.Text.get_rotation`. (i.e., the dash takes
its rotation from the text's rotation). Because the text center is
projected onto the dash, major deviations in the rotation cause
what may be considered visually unappealing results.
(default = *None*)
*dashpad* is a padding length to add (or subtract) space
between the text and the dash, in canvas units.
(default = 3)
*dashpush* "pushes" the dash and text away from the point
specified by :meth:`~matplotlib.text.Text.set_position` by the
amount in canvas units. (default = 0)
.. note::
The alignment of the two objects is based on the bounding box
of the :class:`~matplotlib.text.Text`, as obtained by
:meth:`~matplotlib.artist.Artist.get_window_extent`. This, in
turn, appears to depend on the font metrics as given by the
rendering backend. Hence the quality of the "centering" of the
label text with respect to the dash varies depending on the
backend used.
.. note::
I'm not sure that I got the
:meth:`~matplotlib.text.TextWithDash.get_window_extent` right,
or whether that's sufficient for providing the object bounding
box.
"""
__name__ = 'textwithdash'
def __str__(self):
return "TextWithDash(%g,%g,%s)" % (self._x, self._y, repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='center',
horizontalalignment='center',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
dashlength=0.0,
dashdirection=0,
dashrotation=None,
dashpad=3,
dashpush=0,
):
Text.__init__(self, x=x, y=y, text=text, color=color,
verticalalignment=verticalalignment,
horizontalalignment=horizontalalignment,
multialignment=multialignment,
fontproperties=fontproperties,
rotation=rotation,
linespacing=linespacing)
# The position (x,y) values for text and dashline
# are bogus as given in the instantiation; they will
# be set correctly by update_coords() in draw()
self.dashline = Line2D(xdata=(x, x),
ydata=(y, y),
color='k',
linestyle='-')
self._dashx = float(x)
self._dashy = float(y)
self._dashlength = dashlength
self._dashdirection = dashdirection
self._dashrotation = dashrotation
self._dashpad = dashpad
self._dashpush = dashpush
#self.set_bbox(dict(pad=0))
def get_unitless_position(self):
"Return the unitless position of the text as a tuple (*x*, *y*)"
# This will get the position with all unit information stripped away.
# This is here for convienience since it is done in several locations.
x = float(self.convert_xunits(self._dashx))
y = float(self.convert_yunits(self._dashy))
return x, y
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
# This should return the same data (possibly unitized) as was
# specified with set_x and set_y
return self._dashx, self._dashy
def get_prop_tup(self, renderer=None):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (e.g., layouts) and
need to know if the text has changed.
"""
props = [p for p in Text.get_prop_tup(self, renderer=renderer)]
props.extend([self._x, self._y, self._dashlength,
self._dashdirection, self._dashrotation, self._dashpad,
self._dashpush])
return tuple(props)
def draw(self, renderer):
"""
Draw the :class:`TextWithDash` object to the given *renderer*.
"""
self.update_coords(renderer)
Text.draw(self, renderer)
if self.get_dashlength() > 0.0:
self.dashline.draw(renderer)
self.stale = False
def update_coords(self, renderer):
"""
Computes the actual *x*, *y* coordinates for text based on the
input *x*, *y* and the *dashlength*. Since the rotation is
with respect to the actual canvas's coordinates we need to map
back and forth.
"""
dashx, dashy = self.get_unitless_position()
dashlength = self.get_dashlength()
# Shortcircuit this process if we don't have a dash
if dashlength == 0.0:
self._x, self._y = dashx, dashy
return
dashrotation = self.get_dashrotation()
dashdirection = self.get_dashdirection()
dashpad = self.get_dashpad()
dashpush = self.get_dashpush()
angle = get_rotation(dashrotation)
theta = np.pi * (angle / 180.0 + dashdirection - 1)
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
transform = self.get_transform()
# Compute the dash end points
# The 'c' prefix is for canvas coordinates
cxy = transform.transform_point((dashx, dashy))
cd = np.array([cos_theta, sin_theta])
c1 = cxy + dashpush * cd
c2 = cxy + (dashpush + dashlength) * cd
inverse = transform.inverted()
(x1, y1) = inverse.transform_point(tuple(c1))
(x2, y2) = inverse.transform_point(tuple(c2))
self.dashline.set_data((x1, x2), (y1, y2))
# We now need to extend this vector out to
# the center of the text area.
# The basic problem here is that we're "rotating"
# two separate objects but want it to appear as
# if they're rotated together.
# This is made non-trivial because of the
# interaction between text rotation and alignment -
# text alignment is based on the bbox after rotation.
# We reset/force both alignments to 'center'
# so we can do something relatively reasonable.
# There's probably a better way to do this by
# embedding all this in the object's transformations,
# but I don't grok the transformation stuff
# well enough yet.
we = Text.get_window_extent(self, renderer=renderer)
w, h = we.width, we.height
# Watch for zeros
if sin_theta == 0.0:
dx = w
dy = 0.0
elif cos_theta == 0.0:
dx = 0.0
dy = h
else:
tan_theta = sin_theta / cos_theta
dx = w
dy = w * tan_theta
if dy > h or dy < -h:
dy = h
dx = h / tan_theta
cwd = np.array([dx, dy]) / 2
cwd *= 1 + dashpad / np.sqrt(np.dot(cwd, cwd))
cw = c2 + (dashdirection * 2 - 1) * cwd
newx, newy = inverse.transform_point(tuple(cw))
self._x, self._y = newx, newy
# Now set the window extent
# I'm not at all sure this is the right way to do this.
we = Text.get_window_extent(self, renderer=renderer)
self._twd_window_extent = we.frozen()
self._twd_window_extent.update_from_data_xy(np.array([c1]), False)
# Finally, make text align center
Text.set_horizontalalignment(self, 'center')
Text.set_verticalalignment(self, 'center')
def get_window_extent(self, renderer=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
'''
self.update_coords(renderer)
if self.get_dashlength() == 0.0:
return Text.get_window_extent(self, renderer=renderer)
else:
return self._twd_window_extent
def get_dashlength(self):
"""
Get the length of the dash.
"""
return self._dashlength
def set_dashlength(self, dl):
"""
Set the length of the dash.
ACCEPTS: float (canvas units)
"""
self._dashlength = dl
self.stale = True
def get_dashdirection(self):
"""
Get the direction dash. 1 is before the text and 0 is after.
"""
return self._dashdirection
def set_dashdirection(self, dd):
"""
Set the direction of the dash following the text.
1 is before the text and 0 is after. The default
is 0, which is what you'd want for the typical
case of ticks below and on the left of the figure.
ACCEPTS: int (1 is before, 0 is after)
"""
self._dashdirection = dd
self.stale = True
def get_dashrotation(self):
"""
Get the rotation of the dash in degrees.
"""
if self._dashrotation is None:
return self.get_rotation()
else:
return self._dashrotation
def set_dashrotation(self, dr):
"""
Set the rotation of the dash, in degrees
ACCEPTS: float (degrees)
"""
self._dashrotation = dr
self.stale = True
def get_dashpad(self):
"""
Get the extra spacing between the dash and the text, in canvas units.
"""
return self._dashpad
def set_dashpad(self, dp):
"""
Set the "pad" of the TextWithDash, which is the extra spacing
between the dash and the text, in canvas units.
ACCEPTS: float (canvas units)
"""
self._dashpad = dp
self.stale = True
def get_dashpush(self):
"""
Get the extra spacing between the dash and the specified text
position, in canvas units.
"""
return self._dashpush
def set_dashpush(self, dp):
"""
Set the "push" of the TextWithDash, which
is the extra spacing between the beginning
of the dash and the specified position.
ACCEPTS: float (canvas units)
"""
self._dashpush = dp
self.stale = True
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the :class:`TextWithDash`.
ACCEPTS: (x, y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashx = float(x)
self.stale = True
def set_y(self, y):
"""
Set the *y* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashy = float(y)
self.stale = True
def set_transform(self, t):
"""
Set the :class:`matplotlib.transforms.Transform` instance used
by this artist.
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Text.set_transform(self, t)
self.dashline.set_transform(t)
self.stale = True
def get_figure(self):
'return the figure instance the artist belongs to'
return self.figure
def set_figure(self, fig):
"""
Set the figure instance the artist belong to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
Text.set_figure(self, fig)
self.dashline.set_figure(fig)
docstring.interpd.update(TextWithDash=artist.kwdoc(TextWithDash))
class OffsetFrom(object):
'Callable helper class for working with `Annotation`'
def __init__(self, artist, ref_coord, unit="points"):
'''
Parameters
----------
artist : `Artist`, `BboxBase`, or `Transform`
The object to compute the offset from.
ref_coord : length 2 sequence
If `artist` is an `Artist` or `BboxBase`, this values is
the location to of the offset origin in fractions of the
`artist` bounding box.
If `artist` is a transform, the offset origin is the
transform applied to this value.
unit : {'points, 'pixels'}
The screen units to use (pixels or points) for the offset
input.
'''
self._artist = artist
self._ref_coord = ref_coord
self.set_unit(unit)
def set_unit(self, unit):
'''
The unit for input to the transform used by ``__call__``
Parameters
----------
unit : {'points', 'pixels'}
'''
if unit not in ["points", "pixels"]:
raise ValueError("'unit' must be one of [ 'points' | 'pixels' ]")
self._unit = unit
def get_unit(self):
'The unit for input to the transform used by ``__call__``'
return self._unit
def _get_scale(self, renderer):
unit = self.get_unit()
if unit == "pixels":
return 1.
else:
return renderer.points_to_pixels(1.)
def __call__(self, renderer):
'''
Return the offset transform.
Parameters
----------
renderer : `RendererBase`
The renderer to use to compute the offset
Returns
-------
transform : `Transform`
Maps (x, y) in pixel or point units to screen units
relative to the given artist.
'''
if isinstance(self._artist, Artist):
bbox = self._artist.get_window_extent(renderer)
l, b, w, h = bbox.bounds
xf, yf = self._ref_coord
x, y = l + w * xf, b + h * yf
elif isinstance(self._artist, BboxBase):
l, b, w, h = self._artist.bounds
xf, yf = self._ref_coord
x, y = l + w * xf, b + h * yf
elif isinstance(self._artist, Transform):
x, y = self._artist.transform_point(self._ref_coord)
else:
raise RuntimeError("unknown type")
sc = self._get_scale(renderer)
tr = Affine2D().scale(sc, sc).translate(x, y)
return tr
class _AnnotationBase(object):
def __init__(self,
xy,
xycoords='data',
annotation_clip=None):
self.xy = xy
self.xycoords = xycoords
self.set_annotation_clip(annotation_clip)
self._draggable = None
def _get_xy(self, renderer, x, y, s):
if isinstance(s, tuple):
s1, s2 = s
else:
s1, s2 = s, s
if s1 == 'data':
x = float(self.convert_xunits(x))
if s2 == 'data':
y = float(self.convert_yunits(y))
tr = self._get_xy_transform(renderer, s)
x1, y1 = tr.transform_point((x, y))
return x1, y1
def _get_xy_transform(self, renderer, s):
if isinstance(s, tuple):
s1, s2 = s
from matplotlib.transforms import blended_transform_factory
tr1 = self._get_xy_transform(renderer, s1)
tr2 = self._get_xy_transform(renderer, s2)
tr = blended_transform_factory(tr1, tr2)
return tr
if six.callable(s):
tr = s(renderer)
if isinstance(tr, BboxBase):
return BboxTransformTo(tr)
elif isinstance(tr, Transform):
return tr
else:
raise RuntimeError("unknown return type ...")
if isinstance(s, Artist):
bbox = s.get_window_extent(renderer)
return BboxTransformTo(bbox)
elif isinstance(s, BboxBase):
return BboxTransformTo(s)
elif isinstance(s, Transform):
return s
elif not is_string_like(s):
raise RuntimeError("unknown coordinate type : %s" % (s,))
if s == 'data':
return self.axes.transData
elif s == 'polar':
from matplotlib.projections import PolarAxes
tr = PolarAxes.PolarTransform()
trans = tr + self.axes.transData
return trans
s_ = s.split()
if len(s_) != 2:
raise ValueError("%s is not a recognized coordinate" % s)
bbox0, xy0 = None, None
bbox_name, unit = s_
# if unit is offset-like
if bbox_name == "figure":
bbox0 = self.figure.bbox
elif bbox_name == "axes":
bbox0 = self.axes.bbox
# elif bbox_name == "bbox":
# if bbox is None:
# raise RuntimeError("bbox is specified as a coordinate but "
# "never set")
# bbox0 = self._get_bbox(renderer, bbox)
if bbox0 is not None:
xy0 = bbox0.bounds[:2]
elif bbox_name == "offset":
xy0 = self._get_ref_xy(renderer)
if xy0 is not None:
# reference x, y in display coordinate
ref_x, ref_y = xy0
from matplotlib.transforms import Affine2D
if unit == "points":
# dots per points
dpp = self.figure.get_dpi() / 72.
tr = Affine2D().scale(dpp, dpp)
elif unit == "pixels":
tr = Affine2D()
elif unit == "fontsize":
fontsize = self.get_size()
dpp = fontsize * self.figure.get_dpi() / 72.
tr = Affine2D().scale(dpp,
dpp)
elif unit == "fraction":
w, h = bbox0.bounds[2:]
tr = Affine2D().scale(w, h)
else:
raise ValueError("%s is not a recognized coordinate" % s)
return tr.translate(ref_x, ref_y)
else:
raise ValueError("%s is not a recognized coordinate" % s)
def _get_ref_xy(self, renderer):
"""
return x, y (in display coordinate) that is to be used for a reference
of any offset coordinate
"""
if isinstance(self.xycoords, tuple):
s1, s2 = self.xycoords
if ((is_string_like(s1) and s1.split()[0] == "offset") or
(is_string_like(s2) and s2.split()[0] == "offset")):
raise ValueError("xycoords should not be an offset coordinate")
x, y = self.xy
x1, y1 = self._get_xy(renderer, x, y, s1)
x2, y2 = self._get_xy(renderer, x, y, s2)
return x1, y2
elif (is_string_like(self.xycoords) and
self.xycoords.split()[0] == "offset"):
raise ValueError("xycoords should not be an offset coordinate")
else:
x, y = self.xy
return self._get_xy(renderer, x, y, self.xycoords)
#raise RuntimeError("must be defined by the derived class")
# def _get_bbox(self, renderer):
# if hasattr(bbox, "bounds"):
# return bbox
# elif hasattr(bbox, "get_window_extent"):
# bbox = bbox.get_window_extent()
# return bbox
# else:
# raise ValueError("A bbox instance is expected but got %s" %
# str(bbox))
def set_annotation_clip(self, b):
"""
set *annotation_clip* attribute.
* True: the annotation will only be drawn when self.xy is inside
the axes.
* False: the annotation will always be drawn regardless of its
position.
* None: the self.xy will be checked only if *xycoords* is "data"
"""
self._annotation_clip = b
def get_annotation_clip(self):
"""
Return *annotation_clip* attribute.
See :meth:`set_annotation_clip` for the meaning of return values.
"""
return self._annotation_clip
def _get_position_xy(self, renderer):
"Return the pixel position of the annotated point."
x, y = self.xy
return self._get_xy(renderer, x, y, self.xycoords)
def _check_xy(self, renderer, xy_pixel):
"""
given the xy pixel coordinate, check if the annotation need to
be drawn.
"""
b = self.get_annotation_clip()
if b or (b is None and self.xycoords == "data"):
# check if self.xy is inside the axes.
if not self.axes.contains_point(xy_pixel):
return False
return True
def draggable(self, state=None, use_blit=False):
"""
Set the draggable state -- if state is
* None : toggle the current state
* True : turn draggable on
* False : turn draggable off
If draggable is on, you can drag the annotation on the canvas with
the mouse. The DraggableAnnotation helper instance is returned if
draggable is on.
"""
from matplotlib.offsetbox import DraggableAnnotation
is_draggable = self._draggable is not None
# if state is None we'll toggle
if state is None:
state = not is_draggable
if state:
if self._draggable is None:
self._draggable = DraggableAnnotation(self, use_blit)
else:
if self._draggable is not None:
self._draggable.disconnect()
self._draggable = None
return self._draggable
class Annotation(Text, _AnnotationBase):
def __str__(self):
return "Annotation(%g,%g,%s)" % (self.xy[0],
self.xy[1],
repr(self._text))
@docstring.dedent_interpd
def __init__(self, s, xy,
xytext=None,
xycoords='data',
textcoords=None,
arrowprops=None,
annotation_clip=None,
**kwargs):
'''
Annotate the point ``xy`` with text ``s``.
Additional kwargs are passed to `~matplotlib.text.Text`.
Parameters
----------
s : str
The text of the annotation
xy : iterable
Length 2 sequence specifying the *(x,y)* point to annotate
xytext : iterable, optional
Length 2 sequence specifying the *(x,y)* to place the text
at. If None, defaults to ``xy``.
xycoords : str, Artist, Transform, callable or tuple, optional
The coordinate system that ``xy`` is given in.
For a `str` the allowed values are:
================= ===============================================
Property Description
================= ===============================================
'figure points' points from the lower left of the figure
'figure pixels' pixels from the lower left of the figure
'figure fraction' fraction of figure from lower left
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' fraction of axes from lower left
'data' use the coordinate system of the object being
annotated (default)
'polar' *(theta,r)* if not native 'data' coordinates
================= ===============================================
If a `~matplotlib.artist.Artist` object is passed in the units are
fraction if it's bounding box.
If a `~matplotlib.transforms.Transform` object is passed
in use that to transform ``xy`` to screen coordinates
If a callable it must take a
`~matplotlib.backend_bases.RendererBase` object as input
and return a `~matplotlib.transforms.Transform` or
`~matplotlib.transforms.Bbox` object
If a `tuple` must be length 2 tuple of str, `Artist`,
`Transform` or callable objects. The first transform is
used for the *x* coordinate and the second for *y*.
See :ref:`plotting-guide-annotation` for more details.
Defaults to ``'data'``
textcoords : str, `Artist`, `Transform`, callable or tuple, optional
The coordinate system that ``xytext`` is given, which
may be different than the coordinate system used for
``xy``.
All ``xycoords`` values are valid as well as the following
strings:
================= =========================================
Property Description
================= =========================================
'offset points' offset (in points) from the *xy* value
'offset pixels' offset (in pixels) from the *xy* value
================= =========================================
defaults to the input of ``xycoords``
arrowprops : dict, optional
If not None, properties used to draw a
`~matplotlib.patches.FancyArrowPatch` arrow between ``xy`` and
``xytext``.
If `arrowprops` does not contain the key ``'arrowstyle'`` the
allowed keys are:
========== ======================================================
Key Description
========== ======================================================
width the width of the arrow in points
headwidth the width of the base of the arrow head in points
headlength the length of the arrow head in points
shrink fraction of total length to 'shrink' from both ends
? any key to :class:`matplotlib.patches.FancyArrowPatch`
========== ======================================================
If the `arrowprops` contains the key ``'arrowstyle'`` the
above keys are forbidden. The allowed values of
``'arrowstyle'`` are:
============ =============================================
Name Attrs
============ =============================================
``'-'`` None
``'->'`` head_length=0.4,head_width=0.2
``'-['`` widthB=1.0,lengthB=0.2,angleB=None
``'|-|'`` widthA=1.0,widthB=1.0
``'-|>'`` head_length=0.4,head_width=0.2
``'<-'`` head_length=0.4,head_width=0.2
``'<->'`` head_length=0.4,head_width=0.2
``'<|-'`` head_length=0.4,head_width=0.2
``'<|-|>'`` head_length=0.4,head_width=0.2
``'fancy'`` head_length=0.4,head_width=0.4,tail_width=0.4
``'simple'`` head_length=0.5,head_width=0.5,tail_width=0.2
``'wedge'`` tail_width=0.3,shrink_factor=0.5
============ =============================================
Valid keys for `~matplotlib.patches.FancyArrowPatch` are:
=============== ==================================================
Key Description
=============== ==================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ==================================================
Defaults to None
annotation_clip : bool, optional
Controls the visibility of the annotation when it goes
outside the axes area.
If `True`, the annotation will only be drawn when the
``xy`` is inside the axes. If `False`, the annotation will
always be drawn regardless of its position.
The default is `None`, which behave as `True` only if
*xycoords* is "data".
Returns
-------
Annotation
'''
_AnnotationBase.__init__(self,
xy,
xycoords=xycoords,
annotation_clip=annotation_clip)
# warn about wonky input data
if (xytext is None and
textcoords is not None and
textcoords != xycoords):
warnings.warn("You have used the `textcoords` kwarg, but not "
"the `xytext` kwarg. This can lead to surprising "
"results.")
# clean up textcoords and assign default
if textcoords is None:
textcoords = self.xycoords
self._textcoords = textcoords
# cleanup xytext defaults
if xytext is None:
xytext = self.xy
x, y = xytext
Text.__init__(self, x, y, s, **kwargs)
self.arrowprops = arrowprops
self.arrow = None
if arrowprops:
if "arrowstyle" in arrowprops:
arrowprops = self.arrowprops.copy()
self._arrow_relpos = arrowprops.pop("relpos", (0.5, 0.5))
else:
# modified YAArrow API to be used with FancyArrowPatch
shapekeys = ('width', 'headwidth', 'headlength',
'shrink', 'frac')
arrowprops = dict()
for key, val in self.arrowprops.items():
if key not in shapekeys:
arrowprops[key] = val # basic Patch properties
self.arrow_patch = FancyArrowPatch((0, 0), (1, 1),
**arrowprops)
else:
self.arrow_patch = None
def contains(self, event):
contains, tinfo = Text.contains(self, event)
if self.arrow is not None:
in_arrow, _ = self.arrow.contains(event)
contains = contains or in_arrow
if self.arrow_patch is not None:
in_patch, _ = self.arrow_patch.contains(event)
contains = contains or in_patch
return contains, tinfo
@property
def xyann(self):
return self.get_position()
@xyann.setter
def xyann(self, xytext):
self.set_position(xytext)
@property
def anncoords(self):
return self._textcoords
@anncoords.setter
def anncoords(self, coords):
self._textcoords = coords
def set_figure(self, fig):
if self.arrow is not None:
self.arrow.set_figure(fig)
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
Artist.set_figure(self, fig)
def update_positions(self, renderer):
""""Update the pixel positions of the annotated point and the
text.
"""
xy_pixel = self._get_position_xy(renderer)
self._update_position_xytext(renderer, xy_pixel)
def _update_position_xytext(self, renderer, xy_pixel):
"""Update the pixel positions of the annotation text and the arrow
patch.
"""
# generate transformation,
self.set_transform(self._get_xy_transform(renderer, self.anncoords))
ox0, oy0 = self._get_xy_display()
ox1, oy1 = xy_pixel
if self.arrowprops:
x0, y0 = xy_pixel
l, b, w, h = Text.get_window_extent(self, renderer).bounds
r = l + w
t = b + h
xc = 0.5 * (l + r)
yc = 0.5 * (b + t)
d = self.arrowprops.copy()
ms = d.pop("mutation_scale", self.get_size())
self.arrow_patch.set_mutation_scale(ms)
if "arrowstyle" not in d:
# Approximately simulate the YAArrow.
# Pop its kwargs:
shrink = d.pop('shrink', 0.0)
width = d.pop('width', 4)
headwidth = d.pop('headwidth', 12)
# Ignore frac--it is useless.
frac = d.pop('frac', None)
if frac is not None:
warnings.warn(
"'frac' option in 'arrowprops' is no longer supported;"
" use 'headlength' to set the head length in points.")
headlength = d.pop('headlength', 12)
# NB: ms is in pts
stylekw = dict(head_length=headlength / ms,
head_width=headwidth / ms,
tail_width=width / ms)
self.arrow_patch.set_arrowstyle('simple', **stylekw)
# using YAArrow style:
# pick the x,y corner of the text bbox closest to point
# annotated
xpos = ((l, 0), (xc, 0.5), (r, 1))
ypos = ((b, 0), (yc, 0.5), (t, 1))
dsu = [(abs(val[0] - x0), val) for val in xpos]
dsu.sort()
_, (x, relposx) = dsu[0]
dsu = [(abs(val[0] - y0), val) for val in ypos]
dsu.sort()
_, (y, relposy) = dsu[0]
self._arrow_relpos = (relposx, relposy)
r = np.hypot((y - y0), (x - x0))
shrink_pts = shrink * r / renderer.points_to_pixels(1)
self.arrow_patch.shrinkA = shrink_pts
self.arrow_patch.shrinkB = shrink_pts
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
bbox = Text.get_window_extent(self, renderer)
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrunk by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1, oy1))
if "patchA" in d:
self.arrow_patch.set_patchA(d.pop("patchA"))
else:
if self._bbox_patch:
self.arrow_patch.set_patchA(self._bbox_patch)
else:
pad = renderer.points_to_pixels(4)
if self.get_text().strip() == "":
self.arrow_patch.set_patchA(None)
return
bbox = Text.get_window_extent(self, renderer)
l, b, w, h = bbox.bounds
l -= pad / 2.
b -= pad / 2.
w += pad
h += pad
r = Rectangle(xy=(l, b),
width=w,
height=h,
)
r.set_transform(mtransforms.IdentityTransform())
r.set_clip_on(False)
self.arrow_patch.set_patchA(r)
@allow_rasterization
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
xy_pixel = self._get_position_xy(renderer)
if not self._check_xy(renderer, xy_pixel):
return
self._update_position_xytext(renderer, xy_pixel)
self.update_bbox_position_size(renderer)
if self.arrow_patch is not None: # FancyArrowPatch
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
# Draw text, including FancyBboxPatch, after FancyArrowPatch.
# Otherwise, a wedge arrowstyle can land partly on top of the Bbox.
Text.draw(self, renderer)
def get_window_extent(self, renderer=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text and arrow annotation, in display units.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure. The
*dpi* used defaults to self.figure.dpi; the renderer dpi is
irrelevant.
'''
if not self.get_visible():
return Bbox.unit()
arrow = self.arrow
arrow_patch = self.arrow_patch
text_bbox = Text.get_window_extent(self, renderer=renderer)
bboxes = [text_bbox]
if self.arrow is not None:
bboxes.append(arrow.get_window_extent(renderer=renderer))
elif self.arrow_patch is not None:
bboxes.append(arrow_patch.get_window_extent(renderer=renderer))
return Bbox.union(bboxes)
docstring.interpd.update(Annotation=Annotation.__init__.__doc__)
| apache-2.0 |
pld/bamboo | bamboo/models/observation.py | 2 | 13221 | from math import ceil
from pandas import concat, DataFrame
from pymongo.errors import AutoReconnect
from bamboo.core.frame import add_id_column, DATASET_ID, INDEX
from bamboo.lib.datetools import now, parse_timestamp_query
from bamboo.lib.mongo import MONGO_ID, MONGO_ID_ENCODED
from bamboo.lib.parsing import parse_columns
from bamboo.lib.query_args import QueryArgs
from bamboo.lib.utils import combine_dicts, invert_dict, replace_keys
from bamboo.models.abstract_model import AbstractModel
def add_index(df):
"""Add an encoded index to this DataFrame."""
if not INDEX in df.columns:
# No index, create index for this dframe.
if not 'index' in df.columns:
# Custom index not supplied, use pandas default index.
df.reset_index(inplace=True)
df.rename(columns={'index': INDEX}, inplace=True)
return df
def encode(dframe, dataset, append_index=True):
"""Encode the columns for `dataset` to slugs and add ID column.
The ID column is the dataset_id for dataset. This is
used to link observations to a specific dataset.
:param dframe: The DataFrame to encode.
:param dataset: The Dataset to use a mapping for.
:param append_index: Add index to the DataFrame, default True.
:returns: A modified `dframe`.
"""
if append_index:
dframe = add_index(dframe)
dframe = add_id_column(dframe, dataset.dataset_id)
encoded_columns_map = dataset.schema.rename_map_for_dframe(dframe)
return dframe.rename(columns=encoded_columns_map)
def update_calculations(record, dataset):
calculations = dataset.calculations(include_aggs=False)
if len(calculations):
dframe = DataFrame(data=record, index=[0])
labels_to_slugs = dataset.schema.labels_to_slugs
for c in calculations:
columns = parse_columns(dataset, c.formula, c.name, dframe=dframe)
record[labels_to_slugs[c.name]] = columns[0][0]
return record
class Observation(AbstractModel):
__collectionname__ = 'observations'
DELETED_AT = '-1' # use a short code for key
ENCODING = 'enc'
ENCODING_DATASET_ID = '%s_%s' % (DATASET_ID, ENCODING)
@classmethod
def delete(cls, dataset, index):
"""Delete observation at index for dataset.
:param dataset: The dataset to delete the observation from.
:param index: The index of the observation to delete.
"""
query = {INDEX: index, DATASET_ID: dataset.dataset_id}
query = cls.encode(query, dataset=dataset)
cls.__soft_delete(query)
@classmethod
def delete_all(cls, dataset, query=None):
"""Delete the observations for `dataset`.
:param dataset: The dataset to delete observations for.
:param query: An optional query to restrict deletion.
"""
query = query or {}
query.update({DATASET_ID: dataset.dataset_id})
query = cls.encode(query, dataset=dataset)
super(cls, cls()).delete(query)
@classmethod
def delete_columns(cls, dataset, columns):
"""Delete a column from the dataset."""
encoding = cls.encoding(dataset)
cls.unset({cls.ENCODING_DATASET_ID: dataset.dataset_id},
{"%s.%s" % (cls.ENCODING, c): 1 for c in columns})
cls.unset(
cls.encode({DATASET_ID: dataset.dataset_id}, encoding=encoding),
cls.encode({c: 1 for c in columns}, encoding=encoding))
@classmethod
def delete_encoding(cls, dataset):
query = {cls.ENCODING_DATASET_ID: dataset.dataset_id}
super(cls, cls()).delete(query)
@classmethod
def encoding(cls, dataset, encoded_dframe=None):
record = super(cls, cls).find_one({
cls.ENCODING_DATASET_ID: dataset.dataset_id}).record
if record is None and encoded_dframe is not None:
encoding = cls.__make_encoding(encoded_dframe)
cls.__store_encoding(dataset, encoding)
return cls.encoding(dataset)
return record[cls.ENCODING] if record else None
@classmethod
def encode(cls, dict_, dataset=None, encoding=None):
if dataset:
encoding = cls.encoding(dataset)
return replace_keys(dict_, encoding) if encoding else dict_
@classmethod
def decoding(cls, dataset):
return invert_dict(cls.encoding(dataset))
@classmethod
def find(cls, dataset, query_args=None, as_cursor=False,
include_deleted=False):
"""Return observation rows matching parameters.
:param dataset: Dataset to return rows for.
:param include_deleted: If True, return delete records, default False.
:param query_args: An optional QueryArgs to hold the query arguments.
:raises: `JSONError` if the query could not be parsed.
:returns: A list of dictionaries matching the passed in `query` and
other parameters.
"""
encoding = cls.encoding(dataset) or {}
query_args = query_args or QueryArgs()
query_args.query = parse_timestamp_query(query_args.query,
dataset.schema)
query_args.encode(encoding, {DATASET_ID: dataset.dataset_id})
if not include_deleted:
query = query_args.query
query[cls.DELETED_AT] = 0
query_args.query = query
# exclude deleted at column
query_args.select = query_args.select or {cls.DELETED_AT: 0}
distinct = query_args.distinct
records = super(cls, cls).find(query_args, as_dict=True,
as_cursor=(as_cursor or distinct))
return records.distinct(encoding.get(distinct, distinct)) if distinct\
else records
@classmethod
def update_from_dframe(cls, df, dataset):
dataset.build_schema(df)
encoded_dframe = encode(df.reset_index(), dataset, append_index=False)
encoding = cls.encoding(dataset)
cls.__batch_update(encoded_dframe, encoding)
cls.__store_encoding(dataset, encoding)
dataset.update_stats(df, update=True)
@classmethod
def find_one(cls, dataset, index, decode=True):
"""Return row by index.
:param dataset: The dataset to find the row for.
:param index: The index of the row to find.
"""
query = {INDEX: index, DATASET_ID: dataset.dataset_id,
cls.DELETED_AT: 0}
query = cls.encode(query, dataset=dataset)
decoding = cls.decoding(dataset)
record = super(cls, cls).find_one(query, as_dict=True)
return cls(cls.encode(record, encoding=decoding) if decode else record)
@classmethod
def append(cls, dframe, dataset):
"""Append an additional dframe to an existing dataset.
:params dframe: The DataFrame to append.
:params dataset: The DataSet to add `dframe` to.
"""
encoded_dframe = encode(dframe, dataset)
encoding = cls.encoding(dataset, encoded_dframe)
cls.__batch_save(encoded_dframe, encoding)
dataset.clear_summary_stats()
@classmethod
def save(cls, dframe, dataset):
"""Save data in `dframe` with the `dataset`.
Encode `dframe` for MongoDB, and add fields to identify it with the
passed in `dataset`. All column names in `dframe` are converted to
slugs using the dataset's schema. The dataset is update to store the
size of the stored data.
:param dframe: The DataFrame to store.
:param dataset: The dataset to store the dframe in.
"""
# Build schema for the dataset after having read it from file.
if not dataset.schema:
dataset.build_schema(dframe)
# Update stats, before inplace encoding.
dataset.update_stats(dframe)
encoded_dframe = encode(dframe, dataset)
encoding = cls.encoding(dataset, encoded_dframe)
cls.__batch_save(encoded_dframe, encoding)
@classmethod
def update(cls, dataset, index, record):
"""Update a dataset row by index.
The record dictionary will update, not replace, the data in the row at
index.
:param dataset: The dataset to update a row for.
:param dex: The index of the row to update.
:param record: The dictionary to update the row with.
"""
previous_record = cls.find_one(dataset, index).record
previous_record.pop(MONGO_ID)
record = combine_dicts(previous_record, record)
record = update_calculations(record, dataset)
record = cls.encode(record, dataset=dataset)
cls.delete(dataset, index)
super(cls, cls()).save(record)
@classmethod
def batch_read_dframe_from_cursor(cls, dataset, observations, distinct,
limit):
"""Read a DataFrame from a MongoDB Cursor in batches."""
dframes = []
batch = 0
decoding = cls.decoding(dataset)
while True:
start = batch * cls.DB_READ_BATCH_SIZE
end = start + cls.DB_READ_BATCH_SIZE
if limit > 0 and end > limit:
end = limit
# if there is a limit this may occur, and we are done
if start >= end:
break
current_observations = [
replace_keys(ob, decoding) for ob in observations[start:end]]
# if the batches exhausted the data
if not len(current_observations):
break
dframes.append(DataFrame(current_observations))
if not distinct:
observations.rewind()
batch += 1
return concat(dframes) if len(dframes) else DataFrame()
@classmethod
def __batch_save(cls, dframe, encoding):
"""Save records in batches to avoid document size maximum setting.
:param dframe: A DataFrame to save in the current model.
"""
def command(records, encoding):
cls.collection.insert(records)
batch_size = cls.DB_SAVE_BATCH_SIZE
cls.__batch_command_wrapper(command, dframe, encoding, batch_size)
@classmethod
def __batch_update(cls, dframe, encoding):
"""Update records in batches to avoid document size maximum setting.
DataFrame must have column with record (object) ids.
:param dfarme: The DataFrame to update.
"""
def command(records, encoding):
# Encode the reserved key to access the row ID.
mongo_id_key = encoding.get(MONGO_ID_ENCODED, MONGO_ID_ENCODED)
# MongoDB has no batch updates.
for record in records:
spec = {MONGO_ID: record[mongo_id_key]}
del record[mongo_id_key]
doc = {'$set': record}
cls.collection.update(spec, doc)
cls.__batch_command_wrapper(command, dframe, encoding,
cls.DB_SAVE_BATCH_SIZE)
@classmethod
def __batch_command_wrapper(cls, command, df, encoding, batch_size):
try:
cls.__batch_command(command, df, encoding, batch_size)
except AutoReconnect:
batch_size /= 2
# If batch size drop is less than MIN_BATCH_SIZE, assume the
# records are too large or there is another error and fail.
if batch_size >= cls.MIN_BATCH_SIZE:
cls.__batch_command_wrapper(command, df, encoding, batch_size)
@classmethod
def __batch_command(cls, command, dframe, encoding, batch_size):
batches = int(ceil(float(len(dframe)) / batch_size))
for batch in xrange(0, batches):
start = batch * batch_size
end = start + batch_size
current_dframe = dframe[start:end]
records = cls.__encode_records(current_dframe, encoding)
command(records, encoding)
@classmethod
def __encode_records(cls, dframe, encoding):
return [cls.__encode_record(row.to_dict(), encoding)
for (_, row) in dframe.iterrows()]
@classmethod
def __encode_record(cls, row, encoding):
encoded = replace_keys(row, encoding)
encoded[cls.DELETED_AT] = 0
return encoded
@classmethod
def __make_encoding(cls, dframe, start=0):
# Ensure that DATASET_ID is first so that we can guarantee an index.
columns = [DATASET_ID] + sorted(dframe.columns - [DATASET_ID])
return {v: str(start + i) for (i, v) in enumerate(columns)}
@classmethod
def __soft_delete(cls, query):
cls.collection.update(query,
{'$set': {cls.DELETED_AT: now().isoformat()}})
@classmethod
def __store_encoding(cls, dataset, encoding):
"""Store encoded columns with dataset.
:param dataset: The dataset to store the encoding with.
:param encoding: The encoding for dataset.
"""
record = {cls.ENCODING_DATASET_ID: dataset.dataset_id,
cls.ENCODING: encoding}
super(cls, cls()).delete({cls.ENCODING_DATASET_ID: dataset.dataset_id})
super(cls, cls()).save(record)
| bsd-3-clause |
0x0all/scikit-learn | sklearn/utils/setup.py | 296 | 2884 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
rishikksh20/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 73 | 2264 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import matlotlib.pyplot as plt
#plt.matshow(cm, cmap=plt.cm.jet)
#plt.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
AchimTuran/sfs-python | examples/modal_room_acoustics.py | 1 | 1362 | """
This example illustrates the use of the modal room model.
"""
import numpy as np
import matplotlib.pyplot as plt
import sfs
x0 = [1, 3, 1.80] # source position
L = [6, 6, 3] # dimensions of room
deltan = 0.01 # absorption factor of walls
n0 = [1, 0, 0] # normal vector of source (only for compatibilty)
N = 20 # maximum order of modes
#N = [1, 0, 0] # room mode to compute
fresponse = True # freqeuency response or sound field?
# compute and plot frequency response at one point
if fresponse:
f = np.linspace(20, 200, 180) # frequency
omega = 2 * np.pi * f # angular frequency
grid = sfs.util.xyz_grid(1, 1, 1.80, spacing=1)
p = []
for om in omega:
p.append(sfs.mono.source.point_modal(om, x0, n0, grid, L,
N=N, deltan=deltan))
p = np.asarray(p)
plt.plot(f, 20*np.log10(np.abs(p)))
plt.xlabel('frequency / Hz')
plt.ylabel('level / dB')
plt.grid()
# compute and plot sound field for one frequency
if not fresponse:
f = 500 # frequency
omega = 2 * np.pi * f # angular frequency
grid = sfs.util.xyz_grid([0, L[0]], [0, L[1]], L[2], spacing=.1)
p = sfs.mono.source.point_modal(omega, x0, n0, grid, L, N=N, deltan=deltan)
sfs.plot.soundfield(p, grid, xnorm=[2, 3, 0], colorbar=False,
vmax=2, vmin=-2)
| mit |
craigcitro/pydatalab | legacy_tests/kernel/utils_tests.py | 4 | 10815 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import range
import datetime as dt
import collections
import mock
import pandas
import unittest
import google.auth
# import Python so we can mock the parts we need to here.
import IPython
import IPython.core.magic
IPython.core.magic.register_line_cell_magic = mock.Mock()
IPython.core.magic.register_line_magic = mock.Mock()
IPython.core.magic.register_cell_magic = mock.Mock()
IPython.get_ipython = mock.Mock()
import datalab.bigquery # noqa: E402
import datalab.context # noqa: E402
import datalab.utils.commands # noqa: E402
class TestCases(unittest.TestCase):
@staticmethod
def _get_expected_cols():
cols = [
{'type': 'number', 'id': 'Column1', 'label': 'Column1'},
{'type': 'number', 'id': 'Column2', 'label': 'Column2'},
{'type': 'string', 'id': 'Column3', 'label': 'Column3'},
{'type': 'boolean', 'id': 'Column4', 'label': 'Column4'},
{'type': 'number', 'id': 'Column5', 'label': 'Column5'},
{'type': 'datetime', 'id': 'Column6', 'label': 'Column6'}
]
return cols
@staticmethod
def _timestamp(d):
return (d - dt.datetime(1970, 1, 1)).total_seconds()
@staticmethod
def _get_raw_rows():
rows = [
{'f': [
{'v': 1}, {'v': 2}, {'v': '3'}, {'v': 'true'}, {'v': 0.0},
{'v': TestCases._timestamp(dt.datetime(2000, 1, 1))}
]},
{'f': [
{'v': 11}, {'v': 12}, {'v': '13'}, {'v': 'false'}, {'v': 0.2},
{'v': TestCases._timestamp(dt.datetime(2000, 1, 2))}
]},
{'f': [
{'v': 21}, {'v': 22}, {'v': '23'}, {'v': 'true'}, {'v': 0.3},
{'v': TestCases._timestamp(dt.datetime(2000, 1, 3))}
]},
{'f': [
{'v': 31}, {'v': 32}, {'v': '33'}, {'v': 'false'}, {'v': 0.4},
{'v': TestCases._timestamp(dt.datetime(2000, 1, 4))}
]},
{'f': [
{'v': 41}, {'v': 42}, {'v': '43'}, {'v': 'true'}, {'v': 0.5},
{'v': TestCases._timestamp(dt.datetime(2000, 1, 5))}
]},
{'f': [
{'v': 51}, {'v': 52}, {'v': '53'}, {'v': 'true'}, {'v': 0.6},
{'v': TestCases._timestamp(dt.datetime(2000, 1, 6))}
]}
]
return rows
@staticmethod
def _get_expected_rows():
rows = [
{'c': [
{'v': 1}, {'v': 2}, {'v': '3'}, {'v': True}, {'v': 0.0}, {'v': dt.datetime(2000, 1, 1)}
]},
{'c': [
{'v': 11}, {'v': 12}, {'v': '13'}, {'v': False}, {'v': 0.2}, {'v': dt.datetime(2000, 1, 2)}
]},
{'c': [
{'v': 21}, {'v': 22}, {'v': '23'}, {'v': True}, {'v': 0.3}, {'v': dt.datetime(2000, 1, 3)}
]},
{'c': [
{'v': 31}, {'v': 32}, {'v': '33'}, {'v': False}, {'v': 0.4}, {'v': dt.datetime(2000, 1, 4)}
]},
{'c': [
{'v': 41}, {'v': 42}, {'v': '43'}, {'v': True}, {'v': 0.5}, {'v': dt.datetime(2000, 1, 5)}
]},
{'c': [
{'v': 51}, {'v': 52}, {'v': '53'}, {'v': True}, {'v': 0.6}, {'v': dt.datetime(2000, 1, 6)}
]}
]
return rows
@staticmethod
def _get_test_data_as_list_of_dicts():
test_data = [
{'Column1': 1, 'Column2': 2, 'Column3': '3',
'Column4': True, 'Column5': 0.0, 'Column6': dt.datetime(2000, 1, 1)},
{'Column1': 11, 'Column2': 12, 'Column3': '13',
'Column4': False, 'Column5': 0.2, 'Column6': dt.datetime(2000, 1, 2)},
{'Column1': 21, 'Column2': 22, 'Column3': '23',
'Column4': True, 'Column5': 0.3, 'Column6': dt.datetime(2000, 1, 3)},
{'Column1': 31, 'Column2': 32, 'Column3': '33',
'Column4': False, 'Column5': 0.4, 'Column6': dt.datetime(2000, 1, 4)},
{'Column1': 41, 'Column2': 42, 'Column3': '43',
'Column4': True, 'Column5': 0.5, 'Column6': dt.datetime(2000, 1, 5)},
{'Column1': 51, 'Column2': 52, 'Column3': '53',
'Column4': True, 'Column5': 0.6, 'Column6': dt.datetime(2000, 1, 6)}
]
# Use OrderedDicts to make testing the result easier.
for i in range(0, len(test_data)):
test_data[i] = collections.OrderedDict(sorted(list(test_data[i].items()), key=lambda t: t[0]))
return test_data
def test_get_data_from_list_of_dicts(self):
self._test_get_data(TestCases._get_test_data_as_list_of_dicts(), TestCases._get_expected_cols(),
TestCases._get_expected_rows(), 6,
datalab.utils.commands._utils._get_data_from_list_of_dicts)
self._test_get_data(TestCases._get_test_data_as_list_of_dicts(), TestCases._get_expected_cols(),
TestCases._get_expected_rows(), 6, datalab.utils.commands._utils.get_data)
def test_get_data_from_list_of_lists(self):
test_data = [
[1, 2, '3', True, 0.0, dt.datetime(2000, 1, 1)],
[11, 12, '13', False, 0.2, dt.datetime(2000, 1, 2)],
[21, 22, '23', True, 0.3, dt.datetime(2000, 1, 3)],
[31, 32, '33', False, 0.4, dt.datetime(2000, 1, 4)],
[41, 42, '43', True, 0.5, dt.datetime(2000, 1, 5)],
[51, 52, '53', True, 0.6, dt.datetime(2000, 1, 6)],
]
self._test_get_data(test_data, TestCases._get_expected_cols(), TestCases._get_expected_rows(),
6, datalab.utils.commands._utils._get_data_from_list_of_lists)
self._test_get_data(test_data, TestCases._get_expected_cols(), TestCases._get_expected_rows(),
6, datalab.utils.commands._utils.get_data)
def test_get_data_from_dataframe(self):
df = pandas.DataFrame(self._get_test_data_as_list_of_dicts())
self._test_get_data(df, TestCases._get_expected_cols(), TestCases._get_expected_rows(), 6,
datalab.utils.commands._utils._get_data_from_dataframe)
self._test_get_data(df, TestCases._get_expected_cols(), TestCases._get_expected_rows(), 6,
datalab.utils.commands._utils.get_data)
@mock.patch('datalab.bigquery._api.Api.tabledata_list')
@mock.patch('datalab.bigquery._table.Table.exists')
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.context._context.Context.default')
def test_get_data_from_table(self, mock_context_default, mock_api_tables_get,
mock_table_exists, mock_api_tabledata_list):
data = TestCases._get_expected_rows()
mock_context_default.return_value = TestCases._create_context()
mock_api_tables_get.return_value = {
'numRows': len(data),
'schema': {
'fields': [
{'name': 'Column1', 'type': 'INTEGER'},
{'name': 'Column2', 'type': 'INTEGER'},
{'name': 'Column3', 'type': 'STRING'},
{'name': 'Column4', 'type': 'BOOLEAN'},
{'name': 'Column5', 'type': 'FLOAT'},
{'name': 'Column6', 'type': 'TIMESTAMP'}
]
}
}
mock_table_exists.return_value = True
raw_data = self._get_raw_rows()
def tabledata_list(*args, **kwargs):
start_index = kwargs['start_index']
max_results = kwargs['max_results']
if max_results < 0:
max_results = len(data)
return {'rows': raw_data[start_index:start_index + max_results]}
mock_api_tabledata_list.side_effect = tabledata_list
t = datalab.bigquery.Table('foo.bar')
self._test_get_data(t, TestCases._get_expected_cols(), TestCases._get_expected_rows(), 6,
datalab.utils.commands._utils._get_data_from_table)
self._test_get_data(t, TestCases._get_expected_cols(), TestCases._get_expected_rows(), 6,
datalab.utils.commands._utils.get_data)
def test_get_data_from_empty_list(self):
self._test_get_data([], [], [], 0, datalab.utils.commands._utils.get_data)
def test_get_data_from_malformed_list(self):
with self.assertRaises(Exception) as error:
self._test_get_data(['foo', 'bar'], [], [], 0, datalab.utils.commands._utils.get_data)
self.assertEquals('To get tabular data from a list it must contain dictionaries or lists.',
str(error.exception))
def _test_get_data(self, test_data, cols, rows, expected_count, fn):
self.maxDiff = None
data, count = fn(test_data)
self.assertEquals(expected_count, count)
self.assertEquals({'cols': cols, 'rows': rows}, data)
# Test first_row. Note that count must be set in this case so we use a value greater than the
# data set size.
for first in range(0, 6):
data, count = fn(test_data, first_row=first, count=10)
self.assertEquals(expected_count, count)
self.assertEquals({'cols': cols, 'rows': rows[first:]}, data)
# Test first_row + count
for first in range(0, 6):
data, count = fn(test_data, first_row=first, count=2)
self.assertEquals(expected_count, count)
self.assertEquals({'cols': cols, 'rows': rows[first:first + 2]}, data)
# Test subsets of columns
# No columns
data, count = fn(test_data, fields=[])
self.assertEquals({'cols': [], 'rows': [{'c': []}] * expected_count}, data)
# Single column
data, count = fn(test_data, fields=['Column3'])
if expected_count == 0:
return
self.assertEquals({'cols': [cols[2]],
'rows': [{'c': [row['c'][2]]} for row in rows]}, data)
# Multi-columns
data, count = fn(test_data, fields=['Column1', 'Column3', 'Column6'])
self.assertEquals({'cols': [cols[0], cols[2], cols[5]],
'rows': [{'c': [row['c'][0], row['c'][2], row['c'][5]]} for row in rows]},
data)
# Switch order
data, count = fn(test_data, fields=['Column3', 'Column1'])
self.assertEquals({'cols': [cols[2], cols[0]],
'rows': [{'c': [row['c'][2], row['c'][0]]} for row in rows]}, data)
# Select all
data, count = fn(test_data,
fields=['Column1', 'Column2', 'Column3', 'Column4', 'Column5', 'Column6'])
self.assertEquals({'cols': cols, 'rows': rows}, data)
@staticmethod
def _create_api():
context = TestCases._create_context()
return datalab.bigquery._api.Api(context.credentials, context.project_id)
@staticmethod
def _create_context():
project_id = 'test'
creds = mock.Mock(spec=google.auth.credentials.Credentials)
return datalab.context.Context(project_id, creds)
| apache-2.0 |
winklerand/pandas | pandas/io/msgpack/__init__.py | 26 | 1233 | # coding: utf-8
from collections import namedtuple
from pandas.io.msgpack.exceptions import * # noqa
from pandas.io.msgpack._version import version # noqa
class ExtType(namedtuple('ExtType', 'code data')):
"""ExtType represents ext type in msgpack."""
def __new__(cls, code, data):
if not isinstance(code, int):
raise TypeError("code must be int")
if not isinstance(data, bytes):
raise TypeError("data must be bytes")
if not 0 <= code <= 127:
raise ValueError("code must be 0~127")
return super(ExtType, cls).__new__(cls, code, data)
import os # noqa
from pandas.io.msgpack._packer import Packer # noqa
from pandas.io.msgpack._unpacker import unpack, unpackb, Unpacker # noqa
def pack(o, stream, **kwargs):
"""
Pack object `o` and write it to `stream`
See :class:`Packer` for options.
"""
packer = Packer(**kwargs)
stream.write(packer.pack(o))
def packb(o, **kwargs):
"""
Pack object `o` and return packed bytes
See :class:`Packer` for options.
"""
return Packer(**kwargs).pack(o)
# alias for compatibility to simplejson/marshal/pickle.
load = unpack
loads = unpackb
dump = pack
dumps = packb
| bsd-3-clause |
ClimbsRocks/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 55 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
andrewnc/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
nelson-liu/scikit-learn | benchmarks/bench_isolation_forest.py | 46 | 3782 | """
==========================================
IsolationForest benchmark
==========================================
A test of IsolationForest on classical anomaly detection datasets.
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle as sh
np.random.seed(1)
datasets = ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
fig_roc, ax_roc = plt.subplots(1, 1, figsize=(8, 5))
for dat in datasets:
# loading and vectorization
print('loading data')
if dat in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dat, shuffle=True, percent10=True)
X = dataset.data
y = dataset.target
if dat == 'shuttle':
dataset = fetch_mldata('shuttle')
X = dataset.data
y = dataset.target
X, y = sh(X, y)
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dat == 'forestcover':
dataset = fetch_covtype(shuffle=True)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print('vectorizing data')
if dat == 'SF':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != 'normal.').astype(int)
if dat == 'SA':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
lb.fit(X[:, 2])
x2 = lb.transform(X[:, 2])
lb.fit(X[:, 3])
x3 = lb.transform(X[:, 3])
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != 'normal.').astype(int)
if dat == 'http' or dat == 'smtp':
y = (y != 'normal.').astype(int)
n_samples, n_features = X.shape
n_samples_train = n_samples // 2
X = X.astype(float)
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:, :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
print('IsolationForest processing...')
model = IsolationForest(n_jobs=-1)
tstart = time()
model.fit(X_train)
fit_time = time() - tstart
tstart = time()
scoring = - model.decision_function(X_test) # the lower, the more normal
# Show score histograms
fig, ax = plt.subplots(3, sharex=True, sharey=True)
bins = np.linspace(-0.5, 0.5, 200)
ax[0].hist(scoring, bins, color='black')
ax[0].set_title('decision function for %s dataset' % dat)
ax[0].legend(loc="lower right")
ax[1].hist(scoring[y_test == 0], bins, color='b',
label='normal data')
ax[1].legend(loc="lower right")
ax[2].hist(scoring[y_test == 1], bins, color='r',
label='outliers')
ax[2].legend(loc="lower right")
# Show ROC Curves
predict_time = time() - tstart
fpr, tpr, thresholds = roc_curve(y_test, scoring)
AUC = auc(fpr, tpr)
label = ('%s (area: %0.3f, train-time: %0.2fs, '
'test-time: %0.2fs)' % (dat, AUC, fit_time, predict_time))
ax_roc.plot(fpr, tpr, lw=1, label=label)
ax_roc.set_xlim([-0.05, 1.05])
ax_roc.set_ylim([-0.05, 1.05])
ax_roc.set_xlabel('False Positive Rate')
ax_roc.set_ylabel('True Positive Rate')
ax_roc.set_title('Receiver operating characteristic (ROC) curves')
ax_roc.legend(loc="lower right")
fig_roc.tight_layout()
plt.show()
| bsd-3-clause |
cdfassnacht/CodeCDF | python/gradefuncs.py | 1 | 3401 | """
Functions that are useful in plotting a grade histogram
"""
import numpy as np
from matplotlib import pyplot as plt
from astropy.io import ascii
#---------------------------------------------------------------------------
def read_table(infile, colname):
"""
This new code can be used to read both the old-school text files (if they
had the two-column format - see help for the read_text function) but
also (and more importantly) the information directly from a CSV table
of the form that is exported from smartsite or canvas.
Inputs:
infile - input file name
colname - the name of the column containing the score of interest.
NOTE: for the old-school text files, this will be 'col2'
while for the CSV files it could be something like
'Midterm 2 (32620)' or 'MT2' or 'Final Score'
"""
""" Read in the table """
try:
tab = ascii.read(infile, guess=False, format='csv')
except:
tab = ascii.read(infile)
print(tab.colnames)
""" Get the relevant information """
try:
tot = tab[colname].copy()
except KeyError:
print('')
print('Could not find a column matching %s in %s' % (colname,infile))
tot = None
return tot
#---------------------------------------------------------------------------
def read_text(infile, nscorecols=1):
"""
Function to read in the scores from the old-school text files that were
created by modifying the csv files that came from downloading the gradebook
from smartsite or canvas.
There are two expected input formats:
Two-column, designated by setting nscorecols=1
Name total_score
Three-column, designated by setting nscorecols=2
Name multiple_choice_score short_answer_score
The old code used the numpy loadtxt function to load the data, but this
new code uses the astropy.io.ascii read function
"""
""" Read the data into an astropy Table structure """
tab = ascii.read(infile)
""" Generate the total score array """
if nscorecols == 1:
tot = tab['col2'].copy()
else:
tot = tab['col2'] + tab['col3']
return tot
#---------------------------------------------------------------------------
def plot_tothist(infile, tot, maxy, binsize=3):
"""
Plot the total-score histogram, where the total score (tot) has been
previous calculated or read-in by the input functions
"""
""" Calculate moments of the distribution """
mn = tot.mean()
med = np.median(tot)
mp = tot.mean() + tot.std()
mm = tot.mean() - tot.std()
""" Report on the properties of the distibution """
print('')
print("Statistics for %s" % infile)
print("---------------------------------")
print(" Mean: %5.1f" % mn)
print(" Median: %5.1f" % med)
print(" Sigma: %5.1f" % tot.std())
print(" Mean - 1 sig: %5.1f" % mm)
print(" Mean + 1 sig: %5.1f" % mp)
print('')
""" Plot the distribution """
binhist = range(int(tot.min())-1,int(tot.max())+3,binsize)
plt.hist(tot,binhist,histtype='step',ec='k')
plt.ylim(0,maxy)
plt.axvline(x=mn, ymin=0, ymax=maxy, c='r', lw=3)
plt.axvline(x=mm, ymin=0, ymax=maxy, c='b', lw=3)
plt.axvline(x=mp, ymin=0, ymax=maxy, c='b', lw=3)
plt.title("Distribution of scores for %s" % infile)
plt.xlabel("Scores")
plt.ylabel("N")
plt.show()
| mit |
mrcslws/htmresearch | projects/wavelet_dataAggregation/run_nupic_aggregator.py | 11 | 3532 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Run Nupic aggregator on good day bad day data
(1) Perform aggregation using nupic.data.aggregator
(2) Plot aggregated and raw data using matplotlib/plotly
"""
from nupic.data import aggregator
from nupic.data import fieldmeta
from unicorn_backend.utils import date_time_utils
import matplotlib.pyplot as plt
import plotly.plotly as py
plt.ion()
plt.close('all')
def initializeAggregator(aggSpec, modelSpec):
inputRecordSchema = (
fieldmeta.FieldMetaInfo(modelSpec["timestampFieldName"],
fieldmeta.FieldMetaType.datetime,
fieldmeta.FieldMetaSpecial.timestamp),
fieldmeta.FieldMetaInfo(modelSpec["valueFieldName"],
fieldmeta.FieldMetaType.float,
fieldmeta.FieldMetaSpecial.none),
)
dataAggregator = aggregator.Aggregator(
aggregationInfo=dict(
fields=([(modelSpec["valueFieldName"], aggSpec["func"])]
if aggSpec is not None else []),
seconds=aggSpec["windowSize"] if aggSpec is not None else 0
),
inputFields=inputRecordSchema)
return dataAggregator
if __name__ == "__main__":
inputFile = open('example_data/JAO_Apple_Heart Rate_raw_20160404.csv')
# skip header lines
inputFile.readline()
aggSpec = {"func": "mean",
"windowSize": 3000}
modelSpec = {"timestampFieldName": "timestamp",
"valueFieldName": "value"}
dataAggregator = initializeAggregator(aggSpec, modelSpec)
timeStampRaw = []
timeStampAgg = []
valueRaw = []
valueAgg = []
sliceEndTime = []
for inputRow in inputFile.readlines():
inputRow = inputRow.split(',')
fields = [
date_time_utils.parseDatetime(inputRow[0],
'%m/%d/%y %H:%M'),
float(inputRow[1])
]
aggRow, _ = dataAggregator.next(fields, None)
timeStampRaw.append(fields[0])
valueRaw.append(fields[1])
if aggRow is not None:
sliceEndTime.append(dataAggregator._endTime)
timeStampAgg.append(aggRow[0])
valueAgg.append(aggRow[1])
fig = plt.figure()
plt.plot(timeStampRaw, valueRaw, '.')
plt.plot(timeStampAgg, valueAgg, 'r+')
yl = plt.ylim()
# for timestamp in sliceEndTime:
# plt.vlines(timestamp, yl[0], yl[1])
plt.legend(['Raw', 'Aggregate'])
plt.xlabel('Timestamp')
plt.ylabel('Value')
plt.xlim([timeStampRaw[100], timeStampRaw[300]])
# plot_url = py.plot_mpl(fig, filename='GDBD_HeartRate_VisualizeAggregation',
# fileopt='overwrite', sharing='private')
| agpl-3.0 |
farthir/msc-project | snippets/plot_all.py | 1 | 1515 | import sys
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
import matplotlib.cm as cm
def main():
input_filename = sys.argv[1]
df = pd.read_csv('data/%s.csv' % input_filename).round(10)
df.columns = ['property', 'horizontal', 'vertical', 'force']
#max_force = math.ceil(df.ix[:,3].max())
font = {'family' : 'DejaVu Sans',
'weight' : 'normal',
'size' : 14}
rcParams.update({'figure.autolayout': True})
properties = np.unique(df["property"])
plt.figure()
plt.rc('font', **font)
plt.xlabel('Vertical Displacement (mm)')
plt.ylabel('Force (kN)')
for prop in properties:
label = '$e/l = %s$' % prop
plt_df = df[df['property'] == prop]
if prop == 0:
prop = 0.9
plt.scatter(plt_df['vertical'], plt_df['force'], label=label, s=10, c=cm.Set1(prop))
legend = plt.legend()
plt.savefig('data/%s_v.pdf' % input_filename)
plt.show()
plt.figure()
plt.rc('font', **font)
plt.xlabel('Horizontal Displacement (mm)')
plt.ylabel('Force (kN)')
for prop in properties:
label = '$e/l = %s$' % prop
plt_df = df[df['property'] == prop]
if prop == 0:
prop = 0.9
plt.scatter(plt_df['horizontal'], plt_df['force'], label=label, s=10, c=cm.Set1(prop))
legend = plt.legend()
plt.savefig('data/%s_h.pdf' % input_filename)
plt.show()
if __name__ == "__main__":
main()
| mit |
ZwickyTransientFacility/ztf_sim | ztf_sim/QueueManager.py | 1 | 55792 | """Queue classes."""
import os
from collections import defaultdict
from datetime import datetime
import logging
import numpy as np
import pandas as pd
import astropy.coordinates as coord
import astropy.units as u
from astropy.time import Time, TimeDelta
import astroplan
from .Fields import Fields
from .optimize import tsp_optimize, night_optimize
from .cadence import enough_gap_since_last_obs
from .constants import P48_loc, PROGRAM_IDS, FILTER_IDS, TIME_BLOCK_SIZE
from .constants import EXPOSURE_TIME, READOUT_TIME, FILTER_CHANGE_TIME, slew_time
from .constants import PROGRAM_BLOCK_SEQUENCE, LEN_BLOCK_SEQUENCE, MAX_AIRMASS
from .constants import BASE_DIR
from .utils import approx_hours_of_darkness
from .utils import skycoord_to_altaz, seeing_at_pointing
from .utils import altitude_to_airmass, airmass_to_altitude, RA_to_HA, HA_to_RA
from .utils import scalar_len, nightly_blocks, block_index, block_index_to_time
from .utils import block_use_fraction, maximum_altitude, compute_limiting_mag
class QueueEmptyError(Exception):
"""Error class for when the nightly queue has no more fields"""
pass
class QueueManager(object):
def __init__(self, queue_name, queue_configuration, rp=None, fields=None):
self.logger = logging.getLogger(__name__)
# queue name (useful in Scheduler object when swapping queues)
self.queue_name = queue_name
# list of ObservingPrograms
self.observing_programs = queue_configuration.build_observing_programs()
# defaults to handle time-windowed queues
self.is_TOO = False
self.validity_window = None
# Hack for greedy queues
self.requests_in_window = True
if 'validity_window_mjd' in queue_configuration.config:
window = queue_configuration.config['validity_window_mjd']
if window is not None:
assert(len(window) == 2)
self.set_validity_window_mjd(window[0], window[1])
else:
self.validity_window = None
else:
self.validity_window = None
# flag to check if assign_nightly_requests has been called tonight
self.queue_night = None
# block on which the queue parameters were calculated
self.queue_slot = None
# number allowed requests by subprogram tonight
# (dict of (program_id, subprogram_name))
self.requests_allowed = {}
# the queue itself
self.queue = pd.DataFrame()
# should we only consider fields from one program in a given
# observing block?
# CURRENTLY NOT IMPLEMENTED.
self.block_programs = False
if rp is None:
# initialize an empty RequestPool
self.rp = RequestPool()
else:
self.rp = rp
if fields is None:
self.fields = Fields()
else:
self.fields = fields
self.missed_obs_queue = None
def is_valid(self, time):
if self.validity_window is None:
return True
window_start = self.validity_window[0]
window_stop = self.validity_window[1]
return window_start <= time <= window_stop
def validity_window_mjd(self):
if self.validity_window is None:
return None
return [self.validity_window[0].mjd, self.validity_window[1].mjd]
def set_validity_window_mjd(self, window_start, window_stop):
"""Set the time at which this queue can run.
Parameters
----------
window_start : `float`
Modified Julian Date start time
window_stop : `float`
Modified Julian Date end time
"""
if window_start >= window_stop:
raise ValueError("validity window start time must be less than end time")
# rough sanity checks
if window_start <= Time('2017-01-01').mjd:
raise ValueError(f"MJD likely out of range: {window_start}")
if window_stop >= Time('2030-01-01').mjd:
raise ValueError(f"MJD likely out of range: {window_stop}")
self.validity_window = [Time(window_start,format='mjd'),
Time(window_stop,format='mjd')]
def compute_block_use(self):
"""Returns a dictionary with the fraction of blocks used by the queue,
assuming observing starts at the beginning of the validity window"""
if self.validity_window is None:
raise ValueError('All blocks are valid')
start_block = block_index(self.validity_window[0])
obs_start_time = Time(self.validity_window[0],format='mjd')
# greedy queues have no len until they have assignments made, so
# just use the validity window
if len(self.queue) == 0:
stop_block = block_index(self.validity_window[1])
obs_end_time = self.validity_window[1]
else:
# with no weather, we start at the start of the window
if 'n_repeats' in self.queue.columns:
n_obs = np.sum(self.queue.n_repeats)
exp_time = np.sum(self.queue.exposure_time * self.queue.n_repeats)
else:
n_obs = len(self.queue)
exp_time = np.sum(self.queue.exposure_time)
obs_time = (exp_time * u.second) + n_obs * READOUT_TIME
obs_end_time = self.validity_window[0] + obs_time
stop_block = block_index(obs_end_time)
# below breaks if the window is longer than the observations
#stop_block = block_index(self.validity_window[1])
assert obs_end_time > obs_start_time
# compute fraction of the blocks used by the queue
block_use = defaultdict(float)
for block in np.arange(start_block, stop_block+1):
block_use[block] = block_use_fraction(block, obs_start_time,
obs_end_time)
return block_use
def add_observing_program(self, observing_program):
self.observing_programs.append(observing_program)
def assign_nightly_requests(self, current_state, obs_log,
time_limit = 30 * u.second, block_use = defaultdict(float),
timed_obs_count = defaultdict(int)):
# clear previous request pool
if self.queue_name != 'missed_obs':
self.rp.clear_all_request_sets()
# set number of allowed requests by program.
self.determine_allowed_requests(current_state['current_time'],
obs_log, timed_obs_count = timed_obs_count)
# can be used by field_selection_functions downstream
program_fields = {}
for program in self.observing_programs:
key = (program.program_id, program.subprogram_name)
program_fields[key] = \
{'field_ids': program.field_ids,
'field_selection_function': program.field_selection_function,
'requests_allowed': self.requests_allowed[key]}
for program in self.observing_programs:
request_sets = program.assign_nightly_requests(
current_state['current_time'], self.fields,
obs_log, program_fields, block_programs=self.block_programs)
for rs in request_sets:
self.rp.add_request_sets(rs['program_id'],
rs['subprogram_name'], rs['program_pi'],
rs['field_ids'], rs['filter_ids'],
rs['intranight_gap'],
rs['exposure_time'],
rs['total_requests_tonight'])
# assert(len(self.rp.pool) > 0)
# any specific tasks needed)
self._assign_nightly_requests(current_state,
time_limit = time_limit, block_use = block_use)
# mark that we've set up the pool for tonight
self.queue_night = np.floor(current_state['current_time'].mjd)
def adjust_program_exposures_tonight(self, obs_log, mjd_start, mjd_stop):
"""Use past history to adjust the number of exposures per program tonight.
Counts exposures from the start of the month and equalizes any excess
over NIGHTS_TO_REDISTRIBUTE or the number of nights to the end of
the month, whichever is less."""
obs_count_by_program = obs_log.count_equivalent_obs_by_program(
mjd_range = [mjd_start, mjd_stop])
# drop engineering/commissioning
obs_count_by_program = obs_count_by_program[
obs_count_by_program['program_id'] != 0]
obs_count_by_program.set_index('program_id', inplace=True)
# if there are no observations, add zeros
for program_id in PROGRAM_IDS:
if program_id != 0:
if program_id not in obs_count_by_program.index:
obs_count_by_program.loc[program_id] = 0
total_obs = np.sum(obs_count_by_program['n_obs'])
# infer the program fractions from the subprograms
target_program_fractions = {propid:0 for propid in PROGRAM_IDS
if propid != 0}
for op in self.observing_programs:
target_program_fractions[op.program_id] = \
op.program_observing_time_fraction
target_program_fractions = pd.Series(target_program_fractions)
target_program_fractions.index.name = 'program_id'
target_program_fractions.name = 'target_fraction'
target_program_nobs = target_program_fractions * total_obs
target_program_nobs.name = 'target_program_nobs'
# note that this gives 0 in case of no observations, as desired
# have to do the subtraction backwords because of Series/DataFrame
# API nonsense
delta_program_nobs = \
-1*obs_count_by_program.subtract(target_program_nobs,
axis=0)
NIGHTS_TO_REDISTRIBUTE = 5
time = Time(mjd_stop,format='mjd')
dtnow = time.to_datetime()
if dtnow.month != 12:
next_month_start_mjd = Time(datetime(dtnow.year,dtnow.month+1,1),
scale='utc').mjd
else:
next_month_start_mjd = Time(datetime(dtnow.year+1,1,1),
scale='utc').mjd
nights_left_this_month = np.round(next_month_start_mjd - time.mjd)
if nights_left_this_month > NIGHTS_TO_REDISTRIBUTE:
divisor = NIGHTS_TO_REDISTRIBUTE
else:
divisor = nights_left_this_month
if divisor == 0:
divisor = 1
delta_program_nobs /= divisor
delta_program_nobs = np.round(delta_program_nobs).astype(int)
return delta_program_nobs
def adjust_subprogram_exposures_tonight(self, obs_log, mjd_start, mjd_stop):
"""Use past history to adjust the number of exposures per subprogram tonight.
Counts exposures from the start of the month and equalizes any excess
over NIGHTS_TO_REDISTRIBUTE or the number of nights to the end of
the month, whichever is less."""
obs_count_by_subprogram_all = obs_log.count_equivalent_obs_by_subprogram(
mjd_range = [mjd_start, mjd_stop])
# drop engineering/commissioning
obs_count_by_subprogram_all = obs_count_by_subprogram_all[
obs_count_by_subprogram_all['program_id'] != 0]
obs_count_by_subprogram_all.set_index(['program_id','subprogram_name'],
inplace=True)
# only count the subprograms that are currently active. This is
# going to cause problems when the programs change--but we are going to
# only use the subprogram balance for i-band
obs_count_by_current_subprogram_dict = {}
# if there are no observations, add zeros
for op in self.observing_programs:
idx = (op.program_id, op.subprogram_name)
if idx not in obs_count_by_subprogram_all.index:
obs_count_by_current_subprogram_dict[idx] = 0
else:
obs_count_by_current_subprogram_dict[idx] = obs_count_by_subprogram_all.loc[idx,'n_obs']
obs_count_by_subprogram = pd.Series(obs_count_by_current_subprogram_dict)
obs_count_by_subprogram.name = 'n_obs'
obs_count_by_subprogram.index.set_names(
['program_id','subprogram_name'], inplace=True)
total_obs = obs_count_by_subprogram.sum()
# record the subprogram fractions
target_subprogram_fractions = defaultdict(float)
for op in self.observing_programs:
target_subprogram_fractions[(op.program_id, op.subprogram_name)] = \
op.program_observing_time_fraction * op.subprogram_fraction
target_subprogram_fractions = pd.Series(target_subprogram_fractions)
# target_program_fractions.index.name = 'program_id'
target_subprogram_fractions.name = 'target_fraction'
target_subprogram_nobs = target_subprogram_fractions * total_obs
target_subprogram_nobs.name = 'target_subprogram_nobs'
target_subprogram_nobs.index.set_names(
['program_id','subprogram_name'], inplace=True)
# note that this gives 0 in case of no observations, as desired
# have to do the subtraction backwords because of Series/DataFrame
# API nonsense
delta_subprogram_nobs = \
-1*obs_count_by_subprogram.subtract(target_subprogram_nobs,
axis=0).fillna(0)
NIGHTS_TO_REDISTRIBUTE = 5
time = Time(mjd_stop,format='mjd')
dtnow = time.to_datetime()
if dtnow.month != 12:
next_month_start_mjd = Time(datetime(dtnow.year,dtnow.month+1,1),
scale='utc').mjd
else:
next_month_start_mjd = Time(datetime(dtnow.year+1,1,1),
scale='utc').mjd
nights_left_this_month = np.round(next_month_start_mjd - time.mjd)
if nights_left_this_month > NIGHTS_TO_REDISTRIBUTE:
divisor = NIGHTS_TO_REDISTRIBUTE
else:
divisor = nights_left_this_month
if divisor == 0:
divisor = 1
delta_subprogram_nobs /= divisor
delta_subprogram_nobs = np.round(delta_subprogram_nobs).astype(int)
return delta_subprogram_nobs
def determine_allowed_requests(self, time, obs_log,
timed_obs_count = defaultdict(int)):
"""Use count of past observations and expected observing time fractions
to determine number of allowed requests tonight.
Exclude observations already planned in timed queues."""
self.requests_allowed = {}
# rather than using equivalent obs, might be easier to work in
# exposure time directly?
# enforce program balance on a monthly basis
dtnow = time.to_datetime()
month_start_mjd = Time(datetime(dtnow.year,dtnow.month,1),
scale='utc').mjd
delta_program_exposures_tonight = self.adjust_program_exposures_tonight(
obs_log, month_start_mjd, time.mjd)
# use this for i-band only
delta_subprogram_exposures_tonight = self.adjust_subprogram_exposures_tonight(
obs_log, month_start_mjd, time.mjd)
self.logger.info(f'Change in allowed exposures: {delta_program_exposures_tonight}')
self.logger.info(f'Needed change in allowed exposures by subprogram: {delta_subprogram_exposures_tonight}')
self.logger.debug(f"Sum of change in allowed exposures by subprogram: {delta_subprogram_exposures_tonight.reset_index().groupby('program_id').agg(np.sum)}")
self.logger.info(f'Number of timed observations: {timed_obs_count}')
dark_time = approx_hours_of_darkness(time)
# calculate subprogram fractions excluding list queues and TOOs
scheduled_subprogram_sum = defaultdict(float)
for op in self.observing_programs:
# list queues and TOOs should set field_ids = [], but not None
# OPs scheduled using field_selection_function will have
# field_ids = None
if op.field_ids is not None:
if len(op.field_ids) == 0:
continue
scheduled_subprogram_sum[op.program_id] += \
op.subprogram_fraction
for op in self.observing_programs:
program_time_tonight = (
dark_time * op.program_observing_time_fraction +
(delta_program_exposures_tonight.loc[op.program_id,'n_obs']
- timed_obs_count[op.program_id]) * (EXPOSURE_TIME+READOUT_TIME))
subprogram_time_tonight = (
program_time_tonight * op.subprogram_fraction /
scheduled_subprogram_sum[op.program_id])
n_requests = (subprogram_time_tonight.to(u.min) /
op.time_per_exposure().to(u.min)).value[0]
n_requests = np.round(n_requests).astype(np.int)
# i_band program balance needs individual tuning due to
# longer cadence and filter blocking
if op.subprogram_name == 'i_band':
delta_i_nexp = delta_subprogram_exposures_tonight.loc[(2,'i_band')]
if delta_i_nexp > 0:
self.logger.info(f'Adding {delta_i_nexp} additional i-band exposures')
n_requests += delta_i_nexp
else:
self.logger.info(f'Implied change in i-band exposures is negative, skipping supplementation: {delta_i_nexp}')
self.requests_allowed[(op.program_id,
op.subprogram_name)] = n_requests
for key, n_requests in self.requests_allowed.items():
if n_requests < 0:
self.requests_allowed[key] = 0
self.logger.info(self.requests_allowed)
def next_obs(self, current_state, obs_log):
"""Given current state, return the parameters for the next request"""
# don't store the telescope state locally!
# check that assign_nightly_requests has been called tonight.
if self.queue_type != 'list':
if np.floor(current_state['current_time'].mjd) != self.queue_night:
self.assign_nightly_requests(current_state, obs_log)
# define functions that actually do the work in subclasses
next_obs = self._next_obs(current_state, obs_log)
# check if we have a disallowed observation, and reject it:
if next_obs['target_limiting_mag'] < 0:
self.logger.warning(f'Target is unobservable! Removing from queue {next_obs}')
self.remove_requests(next_obs['request_id'])
next_obs = self.next_obs(current_state, obs_log)
next_obs['queue_name'] = self.queue_name
return next_obs
def update_queue(self, current_state, obs_log, **kwargs):
"""Recalculate queue"""
# define functions that actually do the work in subclasses
return self._update_queue(current_state, obs_log)
def remove_requests(self, request_id):
"""Remove a request from both the queue and the request set pool"""
# define functions that actually do the work in subclasses
return self._remove_requests(request_id)
def return_queue(self):
"""Return queue values, ordered in the expected sequence if possible"""
queue = self._return_queue()
cols = ['field_id','filter_id','exposure_time','program_id',
'subprogram_name','ra','dec','ordered']
if self.queue_type == 'gurobi':
cols.append('slot_start_time')
if self.queue_type == 'list':
cols.append('mode_num')
cols.append('ewr_num_images')
return queue.loc[:,cols]
class GurobiQueueManager(QueueManager):
def __init__(self, queue_name, queue_configuration, **kwargs):
super().__init__(queue_name, queue_configuration, **kwargs)
self.block_obs_number = 0
self.queue_type = 'gurobi'
def _assign_nightly_requests(self, current_state,
time_limit = 30.*u.second, block_use = defaultdict(float)):
self._assign_slots(current_state, time_limit = time_limit,
block_use = block_use)
def _next_obs(self, current_state, obs_log):
"""Select the highest value request."""
# do the slot assignment at the beginning of the night
# (or if the queue is empty, which should be unusual)
# if we've entered a new block, solve the TSP to sequence the requests
if (block_index(current_state['current_time'])[0] != self.queue_slot):
try:
self._move_requests_to_missed_obs(self.queue_slot)
except Exception as e:
self.logger.exception(e)
self.logger.error('Failed moving requests to missed obs!')
self._sequence_requests_in_block(current_state)
if (len(self.queue_order) == 0):
raise QueueEmptyError("Ran out of observations this block.")
idx = self.queue_order[0]
row = self.queue.loc[idx]
if self.queue_slot in self.filter_by_slot:
filter_id = int(self.filter_by_slot[self.queue_slot])
else:
raise QueueEmptyError("No requests in this slot!")
next_obs = {'target_field_id': int(row['field_id']),
'target_ra': row['ra'],
'target_dec': row['dec'],
'target_filter_id': filter_id,
'target_program_id': int(row['program_id']),
'target_subprogram_name': row['subprogram_name'],
'target_program_pi': row['program_pi'],
'target_exposure_time': row['exposure_time'] * u.second,
'target_sky_brightness':
self.block_sky_brightness.loc[idx,self.queue_slot][filter_id],
'target_limiting_mag':
self.block_lim_mags.loc[idx,self.queue_slot][filter_id],
'target_metric_value':
self.block_slot_metric.loc[idx,self.queue_slot][filter_id],
'target_total_requests_tonight': int(row['total_requests_tonight']),
'target_mode_num': 0,
'target_num_images': 1,
'request_id': idx}
# 'target_sky_brightness': self.queue.ix[idx].sky_brightness,
# 'target_limiting_mag': self.queue.ix[idx].limiting_mag,
# 'target_metric_value': self.queue.ix[idx].value,
# 'target_request_number_tonight':
return next_obs
def _slot_metric(self, limiting_mag, dec):
"""Calculate metric for assigning fields to slots.
penalizes volume for both extinction (airmass) and fwhm penalty
due to atmospheric refraction, plus sky brightness from
moon phase and distance
== 1 for 21st mag.
normalize metrics by maximum value at transit
so low-declination fields are not penalized
"""
#see 200430 notes
metric = (10.**(0.6 * (limiting_mag - 21)) /
(1-1e-4*(maximum_altitude(dec) - 90)**2.))
# lock out -99 limiting mags even more aggressively
return metric.where(limiting_mag > 0, -0.99)
def _assign_slots(self, current_state, time_limit = 30*u.second,
block_use = defaultdict(float)):
"""Assign requests in the Pool to slots"""
# check that the pool has fields in it
if len(self.rp.pool) == 0:
raise QueueEmptyError("No fields in pool")
# join with fields so we have the information we need
# make a copy so rp.pool and self.queue are not linked
df = self.rp.pool.join(self.fields.fields, on='field_id').copy()
# calculate limiting mag by block. uses the block midpoint time
blocks, times = nightly_blocks(current_state['current_time'],
time_block_size=TIME_BLOCK_SIZE)
# remove the excluded blocks, if any. Could do this in optimize.py
# but it makes the optimization problem unneccesarily bigger
# don't demand 100% of the block is used: tiny fractions lead to
# infeasible models
exclude_blocks = [b for (b,v) in block_use.items() if v > 0.95]
self.logger.debug(f'Excluding completely filled blocks {exclude_blocks}')
if len(exclude_blocks):
cut_blocks = np.setdiff1d(blocks, exclude_blocks)
cut_times = block_index_to_time(cut_blocks,
current_state['current_time'], where='mid')
blocks, times = cut_blocks, cut_times
lim_mags = {}
sky_brightnesses = {}
decs = {}
for bi, ti in zip(blocks, times):
if 'altitude' in df.columns:
df.drop('altitude', axis=1, inplace=True)
if 'azimuth' in df.columns:
df.drop('azimuth', axis=1, inplace=True)
# use pre-computed blocks
df_alt = self.fields.block_alt[bi]
df_alt.name = 'altitude'
df = df.join(df_alt, on='field_id')
df_az = self.fields.block_az[bi]
df_az.name = 'azimuth'
df = df.join(df_az, on='field_id')
for fid in FILTER_IDS:
df_limmag, df_sky = \
compute_limiting_mag(df, ti, self.fields.Sky,
filter_id = fid)
lim_mags[(bi, fid)] = df_limmag
sky_brightnesses[(bi, fid)] = df_sky
decs[(bi, fid)] = df.dec
# this results in a MultiIndex on the *columns*: level 0 is block,
# level 1 is filter_id. df_metric.unstack() flattens it
self.block_lim_mags = pd.DataFrame(lim_mags)
self.block_sky_brightness = pd.DataFrame(sky_brightnesses)
block_decs = pd.DataFrame(decs)
self.block_slot_metric = self._slot_metric(self.block_lim_mags,
block_decs)
# count the number of observations requested by filter
df['n_reqs_tot'] = 0
for fid in FILTER_IDS:
df['n_reqs_{}'.format(fid)] = \
df.filter_ids.apply(lambda x: np.sum([xi == fid for xi in x]))
df['n_reqs_tot'] += df['n_reqs_{}'.format(fid)]
# prepare the data for input to gurobi
#import shelve
#s = shelve.open('tmp_vars.shelf')
#s['block_lim_mags'] = self.block_lim_mags
#s['block_slot_metric'] = self.block_slot_metric
#s['df'] = df
#s.close()
self.request_sets_tonight, df_slots, dft = night_optimize(
self.block_slot_metric, df, self.requests_allowed,
time_limit = time_limit, block_use = block_use)
grp = df_slots.groupby('slot')
self.queued_requests_by_slot = grp['request_id'].apply(list)
self.filter_by_slot = \
grp['metric_filter_id'].apply(lambda x: np.unique(x)[0])
# rework to dump output
df_slots['scheduled'] = True
dft.set_index(['request_id','slot','metric_filter_id'],inplace=True)
df_slots.set_index(['request_id','slot','metric_filter_id'],inplace=True)
dft = dft.join(df_slots,how='outer')
dft['scheduled'] = dft['scheduled'].fillna(False)
dft.reset_index(inplace=True)
dft = pd.merge(dft,df[['field_id']],
left_on='request_id', right_index=True)
n_requests_scheduled = np.sum(dft['scheduled'])
total_metric_value = np.sum(dft['scheduled']*dft['metric'])
avg_metric_value = total_metric_value / n_requests_scheduled
tot_avail_requests_bysubprogram = \
df.groupby(['program_id','subprogram_name'])['n_reqs_tot'].agg(np.sum)
tot_avail_requests_bysubprogram.name = 'available'
# use self.requests_allowed and join this all up
nscheduled_requests_bysubprogram = \
dft.loc[dft['scheduled'],['program_id','subprogram_name']].groupby(['program_id','subprogram_name']).agg(len)
nscheduled_requests_bysubprogram.name = 'scheduled'
# reformat requests_allowed for joining
mux = pd.MultiIndex.from_tuples(self.requests_allowed.keys(),
names = ['program_id','subprogram_name'])
df_allowed = pd.DataFrame(list(self.requests_allowed.values()),
index=mux,columns=['allowed'])
df_summary = df_allowed.join(tot_avail_requests_bysubprogram).join(nscheduled_requests_bysubprogram)
self.logger.info(df_summary)
self.logger.info(f'{n_requests_scheduled} requests scheduled')
self.logger.info(f'{total_metric_value:.2f} total metric value; '
f'{avg_metric_value:.2f} average per request')
# this is not ideal for
tnow = current_state['current_time']
yymmdd = tnow.iso.split()[0][2:].replace('-','')
solution_outfile = f'{BASE_DIR}/../sims/gurobi_solution_{yymmdd}.csv'
before_noon_utc = (tnow.mjd - np.floor(tnow.mjd)) < 0.5
# avoid clobbering the solution file with restarts after observing has
# completed
if before_noon_utc or (not os.path.exists(solution_outfile)):
dft.drop(columns=['Yrtf']).to_csv(solution_outfile)
def _sequence_requests_in_block(self, current_state):
"""Solve the TSP for requests in this slot"""
self.queue_slot = block_index(current_state['current_time'])[0]
# raise an error if there are missing blocks--potentially due to
# excluded blocks
if self.queue_slot not in self.queued_requests_by_slot.index:
raise QueueEmptyError(f"Current block {self.queue_slot} is not stored")
# retrieve requests to be observed in this block
req_list = self.queued_requests_by_slot.loc[self.queue_slot]
# request_set ids should be unique per block
assert( (len(set(req_list)) == len(req_list) ) )
if np.all(np.isnan(req_list)):
raise QueueEmptyError("No requests assigned to this block")
idx = pd.Index(req_list)
# reconstruct
df = self.rp.pool.loc[idx].join(self.fields.fields, on='field_id').copy()
az = self.fields.block_az[self.queue_slot]
df = df.join(az, on='field_id')
# now prepend the CALSTOW positoin so we can minimize slew from
# filter exchanges
# Need to use current HA=0
df_blockstart = pd.DataFrame({'ra':HA_to_RA(0,
current_state['current_time']).to(u.degree).value,
'dec':-48.,'azimuth':180.},index=[0])
df_fakestart = pd.concat([df_blockstart,df],sort=True)
# compute overhead time between all request pairs
# compute pairwise slew times by axis for all pointings
slews_by_axis = {}
def coord_to_slewtime(coord, axis=None):
c1, c2 = np.meshgrid(coord, coord)
dangle = np.abs(c1 - c2)
angle = np.where(dangle < (360. - dangle), dangle, 360. - dangle)
return slew_time(axis, angle * u.deg)
slews_by_axis['dome'] = coord_to_slewtime(
df_fakestart['azimuth'], axis='dome')
slews_by_axis['dec'] = coord_to_slewtime(
df_fakestart['dec'], axis='dec')
slews_by_axis['ra'] = coord_to_slewtime(
df_fakestart['ra'], axis='ha')
maxradec = np.maximum(slews_by_axis['ra'], slews_by_axis['dec'])
maxslews = np.maximum(slews_by_axis['dome'], maxradec)
# impose a penalty on zero-length slews (which by construction
# in this mode are from different programs)
wnoslew = maxslews == 0
maxslews[wnoslew] = READOUT_TIME * 10.
overhead_time = np.maximum(maxslews, READOUT_TIME)
tsp_order, tsp_overhead_time = tsp_optimize(overhead_time.value)
# remove the fake starting point. tsp_optimize always starts with
# the first observation in df, which by construction is our fake point,
# so we can simply cut it off.
tsp_order = tsp_order[1:]
assert(0 not in tsp_order)
# tsp_order is 0-indexed from overhead time, so I need to
# reconstruct the request_id
self.queue_order = df_fakestart.index.values[tsp_order]
self.queue = df
def _move_requests_to_missed_obs(self, queue_slot):
"""After a block is expired, move any un-observed requests into the missed_obs queue."""
#self.queue should have any remaining obs
if len(self.queue):
cols = ['program_id', 'subprogram_name', 'program_pi', 'field_id',
'intranight_gap_min', 'exposure_time', 'priority']
# it's a little confusing, because each queue entry has all of the
# filter_ids from the original request set. So we have to
# make a pool that only has single filters in it.
filter_id = int(self.filter_by_slot[queue_slot])
missed_obs = self.queue.loc[:,cols].copy()
missed_obs['filter_ids'] = pd.Series([[filter_id] for i in missed_obs.index],index=missed_obs.index)
missed_obs['total_requests_tonight'] = 1
self.logger.info(f"Saving {len(missed_obs)} requests (filter {filter_id}) to the missed_obs queue: {missed_obs.loc[:,['subprogram_name','field_id']]}")
# the missed obs RequestPool wants request *sets*, so find out
# if previous requests were missed
rows_to_append = []
for idx, row in missed_obs.iterrows():
if idx in self.missed_obs_queue.rp.pool.index:
assert(len(self.missed_obs_queue.rp.pool.loc[idx] == 1))
self.missed_obs_queue.rp.pool.loc[idx,'filter_ids'].append(filter_id)
self.missed_obs_queue.rp.pool.loc[idx,'total_requests_tonight'] += 1
else:
rows_to_append.append(row)
self.missed_obs_queue.rp.pool = self.missed_obs_queue.rp.pool.append(rows_to_append)
else:
self.logger.debug(f'No remaining queued observations in slot {queue_slot}')
def _remove_requests(self, request_set_id):
"""Remove a request from both the queue and the pool.
Note that gurobi queue uses request_set_id to index."""
# should be the topmost item
assert (self.queue_order[0] == request_set_id)
self.queue_order = self.queue_order[1:]
row = self.queue.loc[request_set_id]
self.queue = self.queue.drop(request_set_id)
# (past slot assignments are still in self.queued_requests_by_slot)
# (we will only reuse the RequestPool if we do recomputes)
self.rp.remove_request(request_set_id,
self.filter_by_slot.loc[self.queue_slot])
def _return_queue(self):
# start by setting up the current slot
if len(self.queue) > 0:
queue = self.queue.loc[self.queue_order].copy()
queue.loc[:,'ordered'] = True
queue.loc[:,'slot_start_time'] = block_index_to_time(
self.queue_slot, Time.now(), where='start').iso
else:
# before the night starts, the queue is empty
queue = self.queue.copy()
# now loop over upcoming slots, ensuring they are sorted (should be)
slots = self.queued_requests_by_slot.index.values
slots = np.sort(slots)
for slot in slots:
if (self.queue_slot is not None):
if slot <= self.queue_slot:
continue
slot_requests = self.queued_requests_by_slot.loc[slot]
idx = pd.Index(slot_requests)
# reconstruct
df = self.rp.pool.loc[idx].join(self.fields.fields, on='field_id').copy()
df.loc[:,'filter_id'] = self.filter_by_slot[slot]
df.loc[:,'ordered'] = False
df.loc[:,'slot_start_time'] = block_index_to_time(slot,
Time.now(), where='start').iso
queue = queue.append(df)
return queue
class GreedyQueueManager(QueueManager):
def __init__(self, queue_name, queue_configuration, **kwargs):
super().__init__(queue_name, queue_configuration, **kwargs)
self.time_of_last_filter_change = None
self.min_time_before_filter_change = TIME_BLOCK_SIZE
self.queue_type = 'greedy'
def _assign_nightly_requests(self, current_state,
time_limit = 30.*u.second, block_use = defaultdict(float)):
# initialize the time of last filter change
if self.time_of_last_filter_change is None:
self.time_of_last_filter_change = current_state['current_time']
def _next_obs(self, current_state, obs_log):
"""Select the highest value request."""
# since this is a greedy queue, we update the queue after each obs
# for speed, only do the whole recalculation if we're in a new slot
# if ((block_index(current_state['current_time'])[0] != self.queue_slot)
# or (len(self.queue) == 0)):
# self._update_queue(current_state)
# else:
# # otherwise just recalculate the overhead times
# _ = self._update_overhead(current_state)
# to get the "on the fly" cadence windows to work I have to
# run the whole queue every time right now...
self._update_queue(current_state, obs_log)
# in case this wasn't initialized by assign_nightly_requests
if self.time_of_last_filter_change is None:
self.time_of_last_filter_change = current_state['current_time']
# check if filter changes are allowed yet
if ((current_state['current_time'] - self.time_of_last_filter_change)
< self.min_time_before_filter_change):
# only consider observations in the current filter
queue = self.queue[self.queue['filter_id'] == current_state['current_filter_id']]
# unless there are no more observations, in which case allow a
# change
if len(queue) == 0:
queue = self.queue
else:
# allow filter changes if desired
queue = self.queue
# request_id of the highest value request
max_idx = queue.value.idxmax()
row = queue.loc[max_idx]
next_obs = {'target_field_id': row['field_id'],
'target_ra': row['ra'],
'target_dec': row['dec'],
'target_filter_id': row['filter_id'],
'target_program_id': row['program_id'],
'target_subprogram_name': row['subprogram_name'],
'target_program_pi': row['program_pi'],
'target_exposure_time': row['exposure_time'] * u.second,
'target_sky_brightness': row['sky_brightness'],
'target_limiting_mag': row['limiting_mag'],
'target_metric_value': row['value'],
'target_total_requests_tonight': row['total_requests_tonight'],
'target_mode_num': 0,
'target_num_images': 1,
'request_id': max_idx}
return next_obs
def _metric(self, df):
"""Calculate metric for prioritizing fields.
Penalizes volume for both extinction (airmass) and fwhm penalty
due to atmospheric refraction, plus sky brightness from
moon phase and distance, overhead time
== 1 for 21st mag, 15 sec overhead.
Normalize by value at transit."""
return 10.**(0.6 * (df['limiting_mag'] - 21)) / \
(1-1e-4*(maximum_altitude(df['dec']) - 90)**2.) / \
((EXPOSURE_TIME.value + df['overhead_time']) /
(EXPOSURE_TIME.value + 10.))
def _update_overhead(self, current_state, df=None):
"""recalculate overhead values without regenerating whole queue"""
inplace = df is None
if inplace:
# no dataframe supplied, so replace existing self.queue on exit
df = self.queue
df.drop(['overhead_time', 'altitude', 'azimuth'], axis=1,
inplace=True)
# compute readout/slew overhead times, plus current alt/az
df_overhead, df_altaz = self.fields.overhead_time(current_state)
# nb: df has index request_id, not field_id
df = pd.merge(df, df_overhead, left_on='field_id', right_index=True)
df = pd.merge(df, df_altaz, left_on='field_id', right_index=True)
df.rename(columns={'alt': 'altitude', 'az': 'azimuth'}, inplace=True)
# add overhead for filter changes
w = df['filter_id'] != current_state['current_filter_id']
if np.sum(w):
df.loc[w, 'overhead_time'] += FILTER_CHANGE_TIME.to(u.second).value
if inplace:
df.loc[:, 'value'] = self._metric(df)
self.queue = df
return df
def _update_queue(self, current_state, obs_log):
"""Calculate greedy weighting of requests in the Pool using current
telescope state only"""
# store block index for which these values were calculated
self.queue_slot = block_index(current_state['current_time'])[0]
# check that the pool has fields in it
if len(self.rp.pool) == 0:
raise QueueEmptyError("No fields in pool")
# join with fields so we have the information we need
# make a copy so rp.pool and self.queue are not linked
df_rs = self.rp.pool.join(self.fields.fields, on='field_id').copy()
# now expand the dataframe of request sets to a dataframe with one
# row per obs.
requests = []
for request_set_id, row in df_rs.iterrows():
rdict = row.to_dict()
filter_ids = rdict.pop('filter_ids')
for filter_id in filter_ids:
ri = rdict.copy()
ri['filter_id'] = filter_id
ri['request_set_id'] = request_set_id
requests.append(ri)
df = pd.DataFrame(requests)
df = self._update_overhead(current_state, df=df)
# start with conservative altitude cut;
# airmass weighting applied naturally below
# also make a copy because otherwise it retains knowledge of
# (discarded) previous reference and raises SettingWithCopyWarnings
df = df.loc[df['altitude'] > 20, :].copy()
if len(df) == 0:
raise QueueEmptyError("No fields in queue above altitude cut")
# if restricting to one program per block, drop other programs
if self.block_programs:
current_block_program = PROGRAM_BLOCK_SEQUENCE[
self.queue_slot % LEN_BLOCK_SEQUENCE]
df = df.loc[df['program_id'] == current_block_program, :]
cadence_cuts = enough_gap_since_last_obs(df,
current_state,obs_log)
self.requests_in_window = np.sum(cadence_cuts) > 0
if ~self.requests_in_window:
self.logger.warning(calc_queue_stats(df, current_state,
intro="No fields with observable cadence windows. Queue in progress:"))
raise QueueEmptyError("No fields with observable cadence windows")
# also make a copy because otherwise it retains knowledge of
# (discarded) previous reference and raises SettingWithCopyWarnings
df = df.loc[cadence_cuts, :].copy()
# compute airmasses by field_id
# airmass = zenith_angle_to_airmass(90. - df_alt)
# airmass.name = 'airmass'
# df = pd.merge(df, pd.DataFrame(airmass),
# left_on='field_id', right_index=True)
# airmass cut (or add airmass weighting to value below)
# df = df[(df['airmass'] <= MAX_AIRMASS) & (df['airmass'] > 0)]
df_limmag, df_sky = compute_limiting_mag(df,
current_state['current_time'], self.fields.Sky)
df.loc[:, 'limiting_mag'] = df_limmag
df.loc[:, 'sky_brightness'] = df_sky
#df_limmag.name = 'limiting_mag'
#df = pd.merge(df, df_limmag, left_on='field_id', right_index=True)
df.loc[:, 'value'] = self._metric(df)
self.queue = df
def _remove_requests(self, request_id):
"""Remove a request from both the queue and the request pool"""
row = self.queue.loc[request_id]
self.queue = self.queue.drop(request_id)
self.rp.remove_request(row['request_set_id'], row['filter_id'])
def _return_queue(self):
if 'value' in self.queue.columns:
queue = self.queue.sort_values('value',ascending=False).copy()
else:
queue = self.queue.copy()
# we have put these in value order but the sequence can change
queue['ordered'] = False
return queue
class ListQueueManager(QueueManager):
"""Simple Queue that returns observations in order."""
def __init__(self, queue_name, queue_configuration, fields=None, **kwargs):
self.queue_type = 'list'
# queue name (useful in Scheduler object when swapping queues)
self.queue_name = queue_name
if fields is None:
self.fields = Fields()
else:
self.fields = fields
# the queue itself
self.load_list_queue(queue_configuration.config['targets'])
if 'validity_window_mjd' in queue_configuration.config:
window = queue_configuration.config['validity_window_mjd']
if window is not None:
assert(len(window) == 2)
assert(window[1] > window[0])
self.validity_window = [Time(window[0],format='mjd'),
Time(window[1],format='mjd')]
else:
self.validity_window = None
else:
self.validity_window = None
self.is_TOO = queue_configuration.config['targets'][0]['subprogram_name'].startswith('ToO')
def _assign_nightly_requests(self, current_state,
**kwargs):
pass
def _update_queue(self, current_state, obs_log):
pass
def load_list_queue(self, queue_dict_list, append=False):
"""Initialize an ordered queue.
queue_dict_list is a list of dicts, one per observation"""
df = pd.DataFrame(queue_dict_list)
# check that major columns are included
required_columns = ['field_id','program_id', 'subprogram_name',
'filter_id', 'program_pi']
for col in required_columns:
if col not in df.columns:
raise ValueError(f'Missing required column {col}')
# by default use field ids alone to specify pointings,
# but allow manual ra/dec if needed
if ('ra' not in df.columns) and ('dec' not in df.columns):
queue = df.join(self.fields.fields, on='field_id', how='inner').sort_index().copy()
else:
queue = df
# if some of the field ids are bad, there will be missing rows
if len(queue) != len(df):
raise ValueError('One or more field ids are malformed: {}'.format(
df.index.difference(self.fields.fields.index)))
# add standard keywords if not present
if 'exposure_time' not in queue.columns:
queue['exposure_time'] = EXPOSURE_TIME.to(u.second).value
if 'max_airmass' not in queue.columns:
queue['max_airmass'] = MAX_AIRMASS
if 'n_repeats' not in queue.columns:
queue['n_repeats'] = 1
if 'mode_num' not in queue.columns:
queue['mode_num'] = 0
if 'ewr_num_images' not in queue.columns:
queue['num_images'] = 1
else:
queue['num_images'] = queue['ewr_num_images']
if append:
self.queue = self.queue.append(queue, ignore_index=True)
else:
self.queue = queue
def _next_obs(self, current_state, obs_log):
"""Return the next observation in the time ordered queue unless it has expired."""
if len(self.queue) == 0:
raise QueueEmptyError("No more observations in queue!")
# take the next observation in line
idx = 0
while True:
if idx == len(self.queue):
raise QueueEmptyError("No valid observations in queue!")
ra = self.queue.iloc[idx].ra
ha = RA_to_HA(ra * u.degree, current_state['current_time']
).to(u.degree).wrap_at(180.*u.degree).value
dec = self.queue.iloc[idx].dec
sc = coord.SkyCoord(ra,dec, unit=u.deg)
airmass = altitude_to_airmass(
skycoord_to_altaz(sc,
current_state['current_time']).alt.to(u.deg).value)
if airmass >= self.queue.iloc[idx].max_airmass:
idx += 1
continue
# Reed limits |HA| to < 5.95 hours (most relevant for circumpolar
# fields not hit by the airmass cut)
if np.abs(ha) >= (5.95 * u.hourangle).to(u.degree).value:
idx += 1
continue
# 1) HA < -17.6 deg && Dec < -22 deg is rejected for both track & stow because of interference with FFI.
if (ha <= -17.6) & (dec <= -22):
idx += 1
continue
# West of HA -17.6 deg, Dec < -45 deg is rejected for tracking because of the service platform in the south.
if (ha >= -17.6) & (dec <= -45):
idx += 1
continue
# fabs(HA) > 3 deg is rejected for Dec < -46 to protect the shutter "ears".
if (np.abs(ha) >= 3.) & (dec <= -46):
idx += 1
continue
# dec > 87.5 is rejected
if (dec > 87.5):
idx += 1
continue
break
next_obs = {'target_field_id': int(self.queue.iloc[idx].field_id),
'target_ra': self.queue.iloc[idx].ra,
'target_dec': self.queue.iloc[idx].dec,
'target_filter_id': self.queue.iloc[idx].filter_id,
'target_program_id': int(self.queue.iloc[idx].program_id),
'target_subprogram_name': self.queue.iloc[idx].subprogram_name,
'target_program_pi': self.queue.iloc[idx].program_pi,
'target_exposure_time': self.queue.iloc[idx].exposure_time * u.second,
'target_sky_brightness': 0.,
'target_limiting_mag': 0.,
'target_metric_value': 0.,
'target_total_requests_tonight': 1,
'target_mode_num': int(self.queue.iloc[idx].mode_num),
'target_num_images': int(self.queue.iloc[idx].num_images),
'request_id': self.queue.index[idx]}
return next_obs
def _remove_requests(self, request_id):
"""Remove a request from the queue"""
try:
if self.queue.loc[request_id,'n_repeats'] > 1:
self.queue.loc[request_id,'n_repeats'] -= 1
else:
self.queue = self.queue.drop(request_id)
except Exception:
self.logger.exception(f'Failure removing request {request_id}')
def _return_queue(self):
# by construction the list queue is already in order
queue = self.queue.copy()
queue['ordered'] = True
return queue
class RequestPool(object):
def __init__(self):
# initialize empty dataframe to add to
self.pool = pd.DataFrame()
pass
def add_request_sets(self, program_id, subprogram_name, program_pi,
field_ids, filter_ids, intranight_gap, exposure_time,
total_requests_tonight, priority=1):
"""program_ids must be scalar"""
assert (scalar_len(program_id) == 1)
assert (scalar_len(subprogram_name) == 1)
n_fields = scalar_len(field_ids)
if n_fields == 1:
# see if it's iterable or not
try:
iterator = iter(field_ids)
except TypeError:
# if not, assume it's a scalar and wrap in a list
field_ids = [field_ids]
# build df as a list of dicts
request_sets = []
for i, field_id in enumerate(field_ids):
request_sets.append({
'program_id': program_id,
'subprogram_name': subprogram_name,
'program_pi': program_pi,
'field_id': field_id,
'filter_ids': filter_ids.copy(),
# pandas doesn't play well with astropy quantities, so change
# back to seconds
'intranight_gap_min': intranight_gap.to(u.minute).value,
'exposure_time': exposure_time.to(u.second).value,
'total_requests_tonight': total_requests_tonight,
'priority': priority})
self.pool = self.pool.append(pd.DataFrame(request_sets),
ignore_index=True)
def n_request_sets(self):
return len(self.pool)
def remove_request_sets(self, request_set_ids):
"""Remove completed or otherwise unwanted requests by request_id
request_ids : scalar or list
requests to drop (index of self.pool)"""
self.pool = self.pool.drop(request_set_ids)
def remove_request(self, request_set_id, filter_id):
"""Remove single completed request from a request set.
request_set_id: scalar
request set to modify (index of self.pool)
filter_id: scalar
filter_id of completed observation"""
rs = self.pool.loc[request_set_id].copy()
filters = rs['filter_ids']
# this is another step that shouldn't be necessary...
filters.remove(filter_id)
if len(filters) == 0:
self.remove_request_sets(request_set_id)
else:
self.pool.at[request_set_id, 'filter_ids'] = filters
def clear_all_request_sets(self):
self.pool = pd.DataFrame()
# utils for examining inputs
def calc_pool_stats(df, intro=""):
"""
df = Q.rp.pool"""
stats_str = intro + "\n"
stats_str += "\t{} request sets\n".format(len(df))
stats_str += "\t{} unique fields\n".format(len(set(df.field_id)))
for prog_id in PROGRAM_IDS:
w = df.program_id == prog_id
stats_str += "\tProgram {}:\n".format(prog_id)
stats_str += "\t\t{} request sets\n".format(np.sum(w))
stats_str += "\t\t{} unique fields\n".format(
len(set(df.loc[w, 'field_id'])))
stats_str += "\t\t{} median requests tonight per field\n".format(
np.median(df.loc[w, 'total_requests_tonight']))
return stats_str
def calc_queue_stats(df, current_state, intro=""):
"""
df = Q.queue"""
stats_str = intro + "\n"
stats_str += "\t{} queued requests\n".format(len(df))
stats_str += "\t{} unique fields\n".format(len(set(df.field_id)))
for prog_id in PROGRAM_IDS:
w = df.program_id == prog_id
stats_str += "\tProgram {}:\n".format(prog_id)
if np.sum(w) == 0:
stats_str += "\t\tNo queued requests!\n"
continue
stats_str += "\t\t{} requests\n".format(np.sum(w))
stats_str += "\t\t{} unique fields\n".format(
len(set(df.loc[w, 'field_id'])))
walt = w & (df.loc[w, 'altitude'] > 20)
stats_str += "\t\t{} fields above altitude cut\n".format(
np.sum(walt))
# wfirst = walt & (df.loc[walt, 'request_number_tonight'] == 1)
# stats_str += "\t\t{} requests awaiting first obs tonight\n".format(
# np.sum(wfirst))
return stats_str
| bsd-3-clause |
liuzhaoguo/FreeROI | froi/gui/component/unused/volumedintensitydialog.py | 6 | 2368 | __author__ = 'zhouguangfu'
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
class VolumeIntensityDialog(QDialog):
"""
A dialog for action of voxel time point curve display.
"""
def __init__(self, model,parent=None):
super(VolumeIntensityDialog, self).__init__(parent)
self._model = model
self._init_gui()
self._create_actions()
self._plot()
def _init_gui(self):
"""
Initialize GUI.
"""
# a figure instance to plot on
self.figure = plt.figure()
# this is the Canvas Widget that displays the `figure`
# it takes the `figure` instance as a parameter to __init__
self.canvas = FigureCanvas(self.figure)
# this is the Navigation widget,it takes the Canvas widget and a parent
self.toolbar = NavigationToolbar(self.canvas, self)
# set the layout
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.setLayout(layout)
def _create_actions(self):
self._model.time_changed.connect(self._plot)
def _plot(self):
''' plot time time point curve.'''
volume_data = self._model.data(self._model.currentIndex(),Qt.UserRole + 5)
if self._model.data(self._model.currentIndex(),Qt.UserRole + 8):
data = volume_data[:,:,:,self._model.get_current_time_point()]
self.points = data[data!=0]
# self.points = volume_data[volume_data[:,:,:,self._model.get_current_time_point()]!=0l,
# self._model.get_current_time_point()]
else:
self.points = volume_data[volume_data!=0]
# create an axis
ax = self.figure.add_subplot(111)
ax.hold(False)
ax.hist(self.points,50)
plt.xlabel("Intensity")
plt.ylabel("Number")
plt.grid()
self.canvas.draw()
def closeEvent(self, QCloseEvent):
self._model.time_changed.disconnect(self._plot)
| bsd-3-clause |
swharden/ROI-Analysis-Pipeline | pyLS/old/processFolders.py | 1 | 3097 | from pyLineScan import LineScan
import glob
import os
from PIL import Image
import matplotlib.pyplot as plt
import datetime
def analyzeSubfolders(folderParent,overwrite=False):
"""
given a parent directly, perform automated linescan analysis on all sub-folders.
Output data is saved in each linescan folder's 'results' sub-folder.
"""
folderParent=os.path.abspath(folderParent)
print("analyzing all linescans in",folderParent)
linescanFolders=sorted(os.listdir(folderParent))
for i,name in enumerate(linescanFolders):
if not name.startswith("LineScan-"):
continue
folderLinescan=os.path.join(folderParent,name)
print("PROCESSING LINESCAN %d OF %d: %s"%(i+1,len(linescanFolders),name))
folderOutput=os.path.join(folderLinescan,"analysis")
if not os.path.exists(folderOutput):
os.mkdir(folderOutput)
if overwrite or not os.path.exists(os.path.join(folderOutput,"fig_01_img.png")):
print(" analyzing linescan data...")
LS=LineScan(folderLinescan,baseline=None)
LS.allFigures()
plt.close('all')
if overwrite or not os.path.exists(os.path.join(folderOutput,"ref.png")):
refFigures=glob.glob(folderLinescan+"/References/*Window2*.tif")
if len(refFigures):
print(" generating reference figure...")
im=Image.open(refFigures[0])
im.save(os.path.join(folderOutput,"ref.png"))
def index(folderParent):
"""make index.html and stick it in the parent directory."""
timestamp=datetime.datetime.now().strftime("%I:%M %p on %B %d, %Y")
folders=os.listdir(folderParent)
out="<html><style>"
out+="""
img{
margin: 10px;
border: 1px solid black;
box-shadow: 5px 5px 10px rgba(0, 0, 0, .2);
}
"""
out+="</style><body>"
out+="<b style='font-size: 300%%'>boshLS</b><br><i>automatic linescan index generated at %s</i><hr><br>"%timestamp
for folder in sorted(folders):
if not folder.startswith("LineScan-"):
continue
path=os.path.abspath(folderParent+"/"+folder)
rel=folderParent+"/"+folder
out+="<div style='background-color: #336699; color: white; padding: 10px; page-break-before: always;'>"
out+="<span style='font-size: 200%%; font-weight: bold;'>%s</span><br>"%folder
out+="<code>%s</code></div>"%path
for fname in sorted(glob.glob(folderParent+"/"+folder+"/analysis/*.png")):
fname=os.path.basename(fname)
out+='<a href="%s/analysis/%s"><img src="%s/analysis/%s" height=300></a>'%(rel,fname,rel,fname)
out+="<br>"*6
out+="</code></body></html>"
fileOut=os.path.abspath(folderParent+"/index.html")
with open(fileOut,'w') as f:
f.write(out)
print("saved",fileOut)
if __name__=="__main__":
#folderParent='../data/linescan/realistic/'
folderParent=r'X:\Data\SCOTT\2017-06-16 OXT-Tom\2p'
analyzeSubfolders(folderParent,overwrite=False)
index(folderParent)
print("DONE") | mit |
VillarrealA/pyoptools | pyoptools/misc/pmisc/misc.py | 9 | 18011 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import numpy as N
from numpy import array, sin, cos, float64, dot, float_, sqrt, ceil, floor, dot, \
meshgrid, zeros, zeros_like, where, nan, pi, isnan, nonzero, rint, \
linspace, arange, argwhere
from numpy.ma import is_masked, MaskedArray
from numpy.ma import array as ma_array
#from enthought.traits.api import Trait, TraitHandler
from scipy import interpolate
from pylab import griddata, meshgrid
'''Auxiliary functions and classes
'''
#~ class TraitUnitVector(TraitHandler):
#~ ''' Class to define unit vector trait
#~
#~ Description:
#~
#~ This class defines a unit vector. If the value assigned is not a unit
#~ vector, it gets automaticaly normalized
#~ '''
#~
#~ def validate(self, object, name, value):
#~ try:
#~ avalue=array(value)
#~ except:
#~ self.error(object, name, value)
#~
#~ if len(avalue.shape)!=1 or avalue.shape[0]!=3:
#~ return self.error(object, name, avalue)
#~
#~ avalue=array(avalue/sqrt(dot(avalue,avalue)))
#~ return avalue
#~
#~ # Trait to define a unit vector based on the unit vector trait
#~ UnitVector = Trait(array([0,0,1], float_),TraitUnitVector())
#~ print "Nota: Hay que revisar las convenciones de las rotaciones para que queden\n\r "\
#~ "consistentes en x,y,z. Me parece que hay un error en el signo de la \n\r rotacion"\
#~ "al rededor de alguno de los ejes. Modulo misc \n\r"\
#~ "si no estoy mal el error esta en la rotacion respecto a los ejez Y y Z"
def rot_x(tx):
'''Returns the transformation matrix for a rotation around the X axis
'''
return array([[1.,0. ,0. ],
[0.,cos(tx),-sin(tx)],
[0.,sin(tx), cos(tx)]]).astype(float64)
def rot_y(ty):
'''Returns the transformation matrix for a rotation around the Y axis
'''
return array([[ cos(ty),0. ,sin(ty) ],
[ 0. ,1 ,0. ],
[-sin(ty),0. ,cos(ty) ]]).astype(float64)
def rot_z(tz):
'''Returns the transformation matrix for a rotation around the Z axis
'''
return array([[ cos(tz),-sin(tz),0. ],
[ sin(tz), cos(tz),0. ],
[ 0. ,0. ,1. ]]).astype(float64)
#~ def rot_mat(r):
#~ '''Returns the transformation matrix for a rotation around the Z,Y,X axes
#~
#~ The rotation is made first around the Z axis, then around the Y axis, and
#~ finally around the X axis.
#~
#~ Parameters
#~
#~ r= (rx,ry,rz)
#~ '''
#~
#~ c=cos(r)
#~ s=sin(r)
#~
#~ rx=array([[1. , 0., 0.],
#~ [0. , c[0],-s[0]],
#~ [0. , s[0], c[0]]])
#~
#~ ry=array([[ c[1], 0., s[1]],
#~ [ 0., 1., 0.],
#~ [-s[1], 0., c[1]]])
#~
#~
#~ rz=array([[ c[2],-s[2], 0.],
#~ [ s[2], c[2], 0.],
#~ [ 0., 0., 1.]])
#~
#~
#~ tm=dot(rz,dot(ry,rx))
#~
#~ return tm
# To improve speed, this routine was moved to cmisc.pyx
#~ def rot_mat_i(r):
#~ '''Returns the inverse transformation matrix for a rotation around the Z,Y,X axes
#~
#~ Parameters
#~
#~ r= (rx,ry,rz)
#~ '''
#~
#~ c=cos(r)
#~ s=sin(r)
#~
#~ rx=array([[ 1., 0., 0.],
#~ [ 0., c[0], s[0]],
#~ [ 0.,-s[0], c[0]]])
#~
#~ ry=array([[ c[1], 0.,-s[1]],
#~ [ 0., 1., 0.],
#~ [ s[1], 0., c[1]]])
#~
#~
#~ rz=array([[ c[2], s[2], 0.],
#~ [-s[2], c[2], 0.],
#~ [ 0., 0., 1.]])
#~
#~ # Nota: se hizo una prueba para optimizar escribirndo la expresión del producto
#~ # escalar, y el resultado fue considerablemente mas lento, toca revisar
#~
#~
#~ return dot(rx,dot(ry,rz))
def cross(a,b):
'''3D Vector product producto vectorial '''
x1,y1,z1=a
x2,y2,z2=b
return array((y1*z2-y2*z1,x2*z1-x1*z2,x1*y2-x2*y1))
def wavelength2RGB(wl):
'''Function to aproximate and RGB tuple from the wavelength value
Parameter:
wavelength wavelength in um
if the wavelength is outside the visible spectrum returns (0,0,0)
Original code fount at:
http://www.physics.sfasu.edu/astro/color/spectra.html
'''
R,G,B=0.,0.,0.
if (wl>=.380) & (wl<.440):
R = -1.*(wl-.440)/(.440-.380)
G = 0.
B = 1.
if (wl>=.440) & (wl<.490):
R = 0.
G = (wl-.440)/(.490-.440)
B = 1.
if (wl>=.490) & (wl<.510):
R = 0.
G = 1.
B = -1.*(wl-.510)/(.510-.490)
if (wl>=.510) & (wl<.580):
R = (wl-.510)/(.580-.510)
G = 1.
B = 0.
if (wl>=.580) & (wl<.645):
R = 1.
G = -1.*(wl-.645)/(.645-.580)
B = 0.
if (wl>=.645) & (wl < .780):
R = 1.
G = 0.
B = 0.
# LET THE INTENSITY FALL OFF NEAR THE VISION LIMITS
if (wl>=.700):
sss =.3+.7* (.780-wl)/(.780-.700)
elif (wl < .420) :
sss=.3+.7*(wl-.380)/(.420-.380)
else :
sss=1
R=R*sss
G=G*sss
B=B*sss
return (R,G,B)
def matrix_interpolation(M, i, j, type="bilinear"):
"""Returns the interpolated value of a matrix, when the indices i,j are floating
point numbers.
M
Matrix to interpolate
i,j
Indices to interpolate
type
Interpolation type. supported types: nearest,bilinear
"""
mi, mj=M.shape
if i<0 or i>mi-2 or j<0 or j>mj-2:
raise IndexError("matrix Indexes out of range")
# Allowed interpolation types
inter_types=["nearest","bilinear", ]
if not type in inter_types:
raise ValueError("Interpolation type not allowed. The allowed types"\
" are: {0}".format(inter_types))
if type=="nearest":
iri=int(round(i))
irj=int(round(j))
return M[iri, irj]
elif type=="bilinear":
i_s, j_s=floor((i, j))
#calc 1
m=M[i_s:i_s+2, j_s:j_s+2]
iv=array([1-(i-i_s), i-i_s])
jv=array([[1-(j-j_s),], [j-j_s, ]])
return dot(iv, dot(m, jv))[0]
#dx=i-i_s
#dy=j-j_s
##print i, j, i_s, j_s, dx, dy
#p1=dx*dy*M[i_s, j_s]
#p2=(1.-dx)*dy*M[i_s+1, j_s]
#p3=dx*(1.-dy)*M[i_s, j_s+1]
#p4=(1.-dx)*(1.-dy)*M[i_s+1, j_s+1]
#return p1+ p2+ p3+ p4
print "error"
return 1.
def hitlist2int(x, y, z, xi, yi):
"""Function that estimates an intensity distribution on a plane from a
ray hitlist
"""
import matplotlib.delaunay as delaunay
from pylab import griddata, meshgrid
from scipy import interpolate
#if xi.ndim != yi.ndim:
# raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
#if xi.ndim != 1 and xi.ndim != 2:
# raise TypeError("inputs xi and yi must be 1D or 2D.")
#if not len(x)==len(y)==len(z):
# raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
#if hasattr(z,'mask'):
# x = x.compress(z.mask == False)
# y = y.compress(z.mask == False)
# z = z.compressed()
#if xi.ndim == 1:
# xi,yi = meshgrid(xi,yi)
#triangulate data
tri=delaunay.Triangulation(x, y)
#calculate triangles area
ntriangles=tri.circumcenters.shape[0]
coord=array(zip(tri.x, tri.y))
#I=zeros((ntriangles, ))
#xc=zeros((ntriangles, ))
#yc=zeros((ntriangles, ))
# for i in range(ntriangles):
# i1, i2, i3=tri.triangle_nodes[i]
# p1=coord[i1]
# p2=coord[i2]
# p3=coord[i3]
# v1=p1-p2
# v2=p3-p2
# I[i]=1./(abs(v1[0]*v2[1]-v1[1]*v2[0]))
# # the circumcenter data from the triangulation, has some problems so we
# # recalculate it
# xc[i], yc[i]=(p1+p2+p3)/3.
# The previous code was replaced by the following code
###
i1=tri.triangle_nodes[:, 0]
i2=tri.triangle_nodes[:, 1]
i3=tri.triangle_nodes[:, 2]
p1=coord[i1]
p2=coord[i2]
p3=coord[i3]
v1=p1-p2
v2=p3-p2
I=abs(1./(v1[:, 0]*v2[:, 1]-v1[:, 1]*v2[:, 0]))
c=(p1+p2+p3)/3.
xc=c[:, 0]
yc=c[:, 1]
###
# Because of the triangulation algorithm, there are some really high values
# in the intensity data. To filter these values, remove the 5% points of the
# higher intensity.
ni=int(0.1*len(I))
j=I.argsort()[:-ni]
xc=xc[j]
yc=yc[j]
I=I[j]
I=I/I.max()
# #print tri.circumcenters[:, 0]
# #print tri.circumcenters.shape
# print ntriangles, tri.circumcenters[:, 0].shape, tri.circumcenters[:, 0].flatten().shape
#itri=delaunay.Triangulation(xc,yc)
#inti=itri.linear_interpolator(I)
#xi,yi = meshgrid(xi,yi)
#d1=itri(xi, yi)
#Interpolacion con Splines
#di=interpolate.SmoothBivariateSpline(xc, yc, I)
#d1=di(xi,yi)
#Interpolacion nn, y generación de pupila
xi,yi = meshgrid(xi,yi)
d1=griddata(xc, yc, I,xi, yi )
return d1
def hitlist2int_list(x, y):
"""Function that estimates an intensity distribution on a plane from a
ray hitlist. Returns the intensity samples as an x,y,I list
"""
import matplotlib.delaunay as delaunay
from pylab import griddata, meshgrid
from scipy import interpolate
#if xi.ndim != yi.ndim:
# raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)")
#if xi.ndim != 1 and xi.ndim != 2:
# raise TypeError("inputs xi and yi must be 1D or 2D.")
#if not len(x)==len(y)==len(z):
# raise TypeError("inputs x,y,z must all be 1D arrays of the same length")
# remove masked points.
#if hasattr(z,'mask'):
# x = x.compress(z.mask == False)
# y = y.compress(z.mask == False)
# z = z.compressed()
#if xi.ndim == 1:
# xi,yi = meshgrid(xi,yi)
#triangulate data
tri=delaunay.Triangulation(x, y)
#calculate triangles area
ntriangles=tri.circumcenters.shape[0]
coord=array(zip(tri.x, tri.y))
#I=zeros((ntriangles, ))
#xc=zeros((ntriangles, ))
#yc=zeros((ntriangles, ))
# for i in range(ntriangles):
# i1, i2, i3=tri.triangle_nodes[i]
# p1=coord[i1]
# p2=coord[i2]
# p3=coord[i3]
# v1=p1-p2
# v2=p3-p2
# I[i]=1./(abs(v1[0]*v2[1]-v1[1]*v2[0]))
# # the circumcenter data from the triangulation, has some problems so we
# # recalculate it
# xc[i], yc[i]=(p1+p2+p3)/3.
# The previous code was replaced by the following code
###
i1=tri.triangle_nodes[:, 0]
i2=tri.triangle_nodes[:, 1]
i3=tri.triangle_nodes[:, 2]
p1=coord[i1]
p2=coord[i2]
p3=coord[i3]
v1=p1-p2
v2=p3-p2
I=abs(1./(v1[:, 0]*v2[:, 1]-v1[:, 1]*v2[:, 0]))
c=(p1+p2+p3)/3.
xc=c[:, 0]
yc=c[:, 1]
###
# Because of the triangulation algorithm, there are some really high values
# in the intensity data. To filter these values, remove the 5% points of the
# higher intensity.
ni=int(0.1*len(I))
j=I.argsort()[:-ni]
xc=xc[j]
yc=yc[j]
I=I[j]
I=I/I.max()
# #print tri.circumcenters[:, 0]
# #print tri.circumcenters.shape
# print ntriangles, tri.circumcenters[:, 0].shape, tri.circumcenters[:, 0].flatten().shape
#itri=delaunay.Triangulation(xc,yc)
#inti=itri.linear_interpolator(I)
#xi,yi = meshgrid(xi,yi)
#d1=itri(xi, yi)
#Interpolacion con Splines
#di=interpolate.SmoothBivariateSpline(xc, yc, I)
#d1=di(xi,yi)
return xc,yc,I
def unwrapv(inph,in_p=(), uv=2*pi):
"""Return the input matrix unwraped the value given in uv
This is a vectorized routine, but is not as fast as it should
"""
if not is_masked(inph):
fasei=MaskedArray(inph, isnan(inph))
else:
fasei=inph.copy()
size=fasei.shape
nx, ny=size
# If the initial unwraping point is not given, take the center of the image
# as initial coordinate
if in_p==():
in_p=(int(size[0]/2),int(size[1]/2))
# Create a temporal space to mark if the points are already unwrapped
# 0 the point has not been unwrapped
# 1 the point has not been unwrapped, but it is in the unwrapping list
# 2 the point was already unwrapped
fl=N.zeros(size)
# List containing the points to unwrap
l_un=[in_p]
fl[in_p]=1
# unwrapped values
faseo=fasei.copy()
XI_, YI_= meshgrid(range(-1, 2), range(-1, 2))
XI_=XI_.flatten()
YI_=YI_.flatten()
while len(l_un)>0:
# remove the first value from the list
unp=l_un.pop(0)
#l_un[0:1]=[]
XI=XI_+unp[0]
YI=YI_+unp[1]
#Remove from the list the values where XI is negative
nxi=XI>-1
nyi=YI>-1
nxf=XI<nx
nyf=YI<ny
n=nonzero(nxi& nyi & nxf & nyf)
lco=zip(XI[n], YI[n])
# Put the coordinates of unwrapped the neigbors in the list
# And check for wrapping
nv=0
wv=0
for co in lco:
if (fl[co]==0) & (faseo.mask[co]==False):
fl[co]=1
l_un.append(co)
elif fl[co]==2:
wv=wv+rint((faseo[co]-faseo[unp])/uv)
nv=nv+1
if nv!=0:
wv=wv/nv
#if wv>=0: wv=int(wv+0.5)
#else: wv=int(wv-0.5)
fl[unp]=2
faseo[unp]=faseo[unp]+wv*uv
return faseo
def unwrap_py(inph,in_p=(), uv=2*pi):
"""Return the input matrix unwraped the valu given in uv
The same as unwrapv, but using for-s, written in python
"""
if not is_masked(inph):
fasei=MaskedArray(inph, isnan(inph))
else:
fasei=inph
nx, ny=(fasei.shape[0],fasei.shape[1])
# If the initial unwraping point is not given, take the center of the image
# as initial coordinate
if in_p==():
in_p=(int(nx/2),int(ny/2))
# Create a temporal space to mark if the points are already unwrapped
# 0 the point has not been unwrapped
# 1 the point has not been unwrapped, but it is in the unwrapping list
# 2 the point was already unwrapped
fl=zeros((nx, ny))
# List containing the points to unwrap
l_un=[in_p]
fl[in_p]=1
# unwrapped values
faseo=fasei.copy()
while len(l_un)>0:
# remove the first value from the list
cx, cy=l_un.pop(0)
# Put the coordinates of unwrapped the neigbors in the list
# And check for wrapping
nv=0
wv=0
for i in range(cx-1, cx+2):
for j in range(cy-1, cy+2):
if (i>-1) and (i<nx) and (j>-1) and (j<ny):
if (fl[i, j]==0)&(faseo.mask[i, j]==False):
fl[i, j]=1
l_un.append((i, j))
elif fl[i, j]==2:
wv=wv+rint((faseo[i, j]-faseo[cx, cy])/uv)
nv=nv+1
if nv!=0:
wv=wv/nv
fl[cx, cy]=2
faseo[cx, cy]=faseo[cx, cy]+wv*uv
return faseo
def interpolate_g(xi,yi,zi,xx,yy,knots=10, error=False,mask=None):
"""Create a grid of zi values interpolating the values from xi,yi,zi
xi,yi,zi 1D Lists or arrays containing the values to use as base for the interpolation
xx,yy 1D vectors or lists containing the output coordinates
samples tuple containing the shape of the output array.
knots number of knots to be used in each direction
error if set to true, half of the points (x, y, z) are used to create
the interpolation, and half are used to evaluate the interpolation error
"""
xi=array(xi)
yi=array(yi)
zi=array(zi)
#print xi
#print yi
#print zi
assert xi.ndim==1 ,"xi must ba a 1D array or list"
assert yi.ndim==1 ,"yi must ba a 1D array or list"
assert zi.ndim==1 ,"zi must ba a 1D array or list"
assert xx.ndim==1 ,"xx must ba a 1D array or list"
assert yy.ndim==1 ,"yy must ba a 1D array or list"
assert len(xi)==len(yi) and len(xi)==len(zi), "xi, yi, zi must have the same number of items"
if error==True:
# Create a list of indexes to be able to select the points that are going
# to be used as spline generators, and as control points
idx=where(arange(len(xi)) %2 ==0, False, True)
# Use only half of the samples to create the Spline,
if error == True:
isp=argwhere(idx==True)
ich=argwhere(idx==False)
xsp=xi[isp]
ysp=yi[isp]
zsp=zi[isp]
xch=xi[ich]
ych=yi[ich]
zch=zi[ich]
else:
xsp=xi
ysp=yi
zsp=zi
#Distribute homogeneously the knots
xk=linspace(xsp.min(), xsp.max(),knots)
yk=linspace(ysp.min(), ysp.max(),knots)
# LSQBivariateSpline using some knots gives smaller error than
# SmoothBivariateSpline
di=interpolate.LSQBivariateSpline(xsp, ysp, zsp, xk[1:-1], yk[1:-1])
#print xsp,ysp,zsp
#di=interpolate.SmoothBivariateSpline(xsp, ysp, zsp)
# Evaluate error
if error==True:
zch1=di.ev(xch, ych)
er=(zch.flatten()-zch1).std()
if mask==None:
#d=griddata(xi, yi, zi, xx, yy) #
d=di(xx,yy).transpose()
else:
d=ma_array(di(xx,yy).transpose(), mask=mask)
if error==True: return d, er
else: return d
####### Fin Funciones auxiliares
| bsd-3-clause |
llenfest/programingworkshop | Python/pandas_and_parallel/plotting.py | 8 | 2827 | import pandas as pd
import os
import matplotlib.pyplot as plt
import datetime as dt
import numpy as np
from scipy import interpolate
from mpl_toolkits.basemap import Basemap, cm
def sfc_plot(starttime, endtime, variables, variablest, locations,
met, xi, yi, xmin, xmax, ymin, ymax):
''' Script for plotting the mesonet data with wind barbs over a
county map in a given time interval
'''
interval = int((endtime - starttime).total_seconds()/300)
z_max = np.max(met[variablest[1]])
z_min = np.min(met[variablest[1]])
levels = np.arange(z_min, z_max+0.1, 0.1)
shapefile = 'UScounties/UScounties'
if not os.path.exists('%s' %(variables)):
os.makedirs('%s' %(variables))
for i in range(interval):
time_selection = starttime + dt.timedelta(minutes=5*i)
zi = interpolate.griddata((met.ix[time_selection]['Lon'],
met.ix[time_selection]['Lat']),
met.ix[time_selection][variablest[1]],
(xi, yi), method='linear')
maps = Basemap(llcrnrlon=xmin, llcrnrlat=ymin,
urcrnrlon=xmax, urcrnrlat=ymax, projection='cyl')
maps.readshapefile(shapefile, name='counties')
if (variables == 'dew_point'):
maps.contourf(xi, yi, zi, levels, cmap=plt.cm.gist_earth_r)
if (variables == 'temperature'):
maps.contourf(xi, yi, zi, levels, cmap=plt.cm.jet)
if variables == 'rainfall':
maps.contourf(xi, yi, zi, levels, cmap=plt.cm.YlGn)
if ((variables == 'pressure') or (variables == 'wind_speed') or
(variables == 'gust_speed')):
maps.contourf(xi, yi, zi, levels, cmap=plt.cm.gist_earth)
c = plt.colorbar()
c.set_label(variablest[0])
maps.scatter(met.ix[time_selection]['Lon'],
met.ix[time_selection]['Lat'], latlon=True, marker='o', c='b', s=5)
maps.barbs(met.ix[time_selection]['Lon'],
met.ix[time_selection]['Lat'],
met.ix[time_selection]['u'].values*1.94384,
met.ix[time_selection]['v'].values*1.94384, latlon=True)
maps.drawparallels(np.arange(31.,36,1.), color='0.5',
labels=[1,0,0,0], fontsize=10)
maps.drawmeridians(np.arange(-104.,-98.,1.), color='0.5',
labels=[0,0,0,1], fontsize=10)
plt.title(variablest[1])
filename = '%s_%s.png' % (variables,
time_selection.strftime('%Y%m%d_%H%M'))
plt.tight_layout()
plt.savefig(variables + '/' + filename, dpi=150)
plt.clf()
| mit |
ubenu/Blits | src/blitspak/blits.py | 1 | 50733 | """
Blits:
Created on 23 May 2017
Original Blivion:
Created on Tue Oct 25 13:11:32 2016
@author: Maria Schilstra
"""
#from PyQt5.uic import loadUiType
from PyQt5 import QtCore as qt
from PyQt5 import QtWidgets as widgets
from PyQt5 import QtGui as gui
import pandas as pd, numpy as np, copy as cp
from blitspak.blits_mpl import MplCanvas, NavigationToolbar
from blitspak.blits_data import BlitsData
from blitspak.function_dialog import FunctionSelectionDialog
from blitspak.data_creation_dialog import DataCreationDialog
from functions.framework import FunctionsFramework
# from PyQt5.uic import loadUiType
# Ui_MainWindow, QMainWindow = loadUiType('..\\Resources\\UI\\blits.ui')
# Original:
# To avoid using .ui file (from QtDesigner) and loadUIType,
# created a python-version of the .ui file using pyuic5 from command line
# Here: pyuic5 blits.ui -o blits_ui.py
# Also: cannot (easily) use .qrc file, so need to create _rc.py file
# with icon definitions: pyrcc5 -o blits_rc.py blits.qrc
# Then import .py package, as below.
# (QMainWindow is a QtWidget; UI_MainWindow is generated by the converted .ui)
import blitspak.blits_ui as ui
class Main(widgets.QMainWindow, ui.Ui_MainWindow):
N_STATES = 5
ST_START, ST_DATA_ONLY, FUNCTION_ONLY, ST_READY, REJECT = range(N_STATES)
N_PS_SPECTYPES = 7
PS_VALUES, PS_LEDITS, PS_VALUE_FIXED, PS_FIX_CBOXES, PS_GROUPS, PS_COMBOS, PS_SIGMAS = range(N_PS_SPECTYPES)
N_P_SPECTYPES = 4
P_ALL_FIXED, P_FIX_CBOXES, P_ALL_LINKED, P_LINK_CBOXES = range(N_P_SPECTYPES)
N_S_SPECTYPES = 3
S_INCLUDED, S_INCLUDE_CBOXES, S_FTOL = range(N_S_SPECTYPES)
ps_types = ['param_values', 'param_line_edits', 'param_values_fixed', 'param_fix_cboxes', 'series_groups', 'series_combos', 'sigmas']
s_types = ['included', 'included_cboxes', 'ftol']
p_types = ['all_fixed', 'all_fixed_cboxes', 'all_linked', 'all_linked_cboxes']
def __init__(self, ):
super(Main, self).__init__()
self.setupUi(self)
self.scrutinize_dialog = None
self.function_dialog = None
self.create_data_set_dialog = None
self.canvas = MplCanvas(self.mpl_window)
self.plot_toolbar = NavigationToolbar(self.canvas, self.mpl_window)
self.mpl_layout.addWidget(self.canvas)
self.grp_show_axis = widgets.QGroupBox()
self.axis_layout = widgets.QHBoxLayout()
self.grp_show_axis.setLayout(self.axis_layout)
self.grp_show_axis.setSizePolicy(widgets.QSizePolicy.Maximum, widgets.QSizePolicy.Maximum)
self.axisgrp_layout = widgets.QHBoxLayout()
self.axisgrp_layout.addWidget(self.grp_show_axis)
self.mpl_layout.addLayout(self.axisgrp_layout)
self.mpl_layout.addWidget(self.plot_toolbar)
ft = gui.QFont('Calibri', 14)
self.btn_est = widgets.QPushButton("Estimate")
self.btn_est.setFont(ft)
self.btn_apply = widgets.QPushButton("Calculate")
self.btn_apply.setFont(ft)
self.btn_fit = widgets.QPushButton("Fit")
self.btn_fit.setFont(ft)
self.bbox_fit.addButton(self.btn_apply, widgets.QDialogButtonBox.ActionRole)
self.bbox_fit.addButton(self.btn_est, widgets.QDialogButtonBox.ActionRole)
self.bbox_fit.addButton(self.btn_fit, widgets.QDialogButtonBox.ActionRole)
self.action_open.triggered.connect(self.on_open)
self.action_create.triggered.connect(self.on_create)
self.action_close.triggered.connect(self.on_close_data)
self.action_save.triggered.connect(self.on_save)
self.action_select_function.triggered.connect(self.on_select_function)
self.action_analyze.triggered.connect(self.on_analyze)
self.action_quit.triggered.connect(self.close)
self.action_apply.triggered.connect(self.on_calculate)
self.action_estimate.triggered.connect(self.on_estimate)
self.btn_est.clicked.connect(self.on_estimate)
self.btn_apply.clicked.connect(self.on_calculate)
self.btn_fit.clicked.connect(self.on_analyze)
self.chk_global.stateChanged.connect(self.on_global_changed)
self.blits_data = BlitsData()
self.blits_fitted = BlitsData()
self.blits_residuals = BlitsData()
self.pn_fit_spec = None
self.df_params_spec = None
self.df_series_spec = None
self.df_xlimits = None
self.current_xaxis = None
self.axis_selector_buttons = None
self.current_function = None
self.nfitted_points = 100
self.npoints_max = 1000
self.current_state = self.ST_START
self.update_controls()
def init_fit_spec(self):
self.df_xlimits = None
self.pn_fit_spec = None
self.df_series_spec = None
self.df_params_spec = None
if self.current_state in (self.ST_READY, ):
series_names = self.blits_data.get_series_names()
param_names = self.current_function.get_parameter_names()
axis_names = self.blits_data.get_axes_names()
self.df_xlimits = pd.DataFrame(columns=['min', 'max'], index=axis_names)
mins, maxs = self.blits_data.series_extremes()
xmins, xmaxs = mins.iloc[:, :-1].min(axis=0), maxs.iloc[:, :-1].max(axis=0)
self.df_xlimits.loc[:, 'min'] = xmins
self.df_xlimits.loc[:, 'max'] = xmaxs
self.pn_fit_spec = pd.Panel(major_axis=param_names, minor_axis=series_names, items=self.ps_types)
self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES]] = 1.0
self.pn_fit_spec.loc[self.ps_types[self.PS_VALUE_FIXED]] = qt.Qt.Unchecked
self.df_series_spec = pd.DataFrame(index=series_names, columns=self.s_types)
self.df_series_spec.loc[:, self.s_types[self.S_INCLUDED]] = qt.Qt.Checked
self.df_params_spec = pd.DataFrame(index=param_names, columns=self.p_types)
self.df_params_spec.loc[:, self.p_types[self.P_ALL_FIXED]] = qt.Qt.Unchecked
self.df_params_spec.loc[:, self.p_types[self.P_ALL_LINKED]] = qt.Qt.Unchecked
for sname in series_names:
cbx = widgets.QCheckBox()
cbx.setText("")
cbx.setToolTip("Uncheck to exclude from analysis")
cbx.setCheckState(int(self.df_series_spec.loc[sname, self.s_types[self.S_INCLUDED]]))
# int() is necessary for the checkbox to recognise the type as valid (int64 isn't)
self.df_series_spec.loc[sname, self.s_types[self.S_INCLUDE_CBOXES]] = cbx
cbx.stateChanged.connect(self.on_series_selected_changed)
for pname in param_names:
cb_lnk = widgets.QCheckBox()
cb_lnk.setCheckState(qt.Qt.Unchecked)
cb_lnk.setText("")
cb_lnk.setToolTip("Check to link " + pname + " across all series")
cb_lnk.stateChanged.connect(self.on_all_linked_changed)
cb_fix = widgets.QCheckBox()
cb_fix.setCheckState(qt.Qt.Unchecked)
cb_fix.setText("")
cb_fix.setToolTip("Check to keep " + pname + " constant for all series")
cb_fix.stateChanged.connect(self.on_all_fixed_changed)
self.df_params_spec.loc[pname, self.p_types[self.P_ALL_LINKED]] = int(cb_lnk.checkState())
self.df_params_spec.loc[pname, self.p_types[self.P_LINK_CBOXES]] = cb_lnk
self.df_params_spec.loc[pname, self.p_types[self.P_ALL_FIXED]] = int(cb_fix.checkState())
self.df_params_spec.loc[pname, self.p_types[self.P_FIX_CBOXES]] = cb_fix
for pname in param_names:
for sname in series_names:
edt = widgets.QLineEdit()
edt.setValidator(gui.QDoubleValidator())
edt.setText("{:.3g}".format(self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES], pname, sname]))
edt.textChanged.connect(self.on_param_val_changed)
cbx = widgets.QCheckBox()
cbx.setToolTip("Check to keep " + pname + " constant for series " + sname)
cbx.setCheckState(qt.Qt.Unchecked)
cbx.stateChanged.connect(self.on_param_fix_changed)
combo = widgets.QComboBox()
combo.addItems(series_names)
combo.setEditable(False)
combo.setCurrentText(sname)
combo.currentIndexChanged.connect(self.on_linkage_changed)
try:
sp_vals = [float(edt.text()), edt, cbx.checkState(), cbx, combo.currentText(), combo]
for sp, val in zip(self.ps_types, sp_vals):
self.pn_fit_spec.loc[sp, pname, sname] = val
except Exception as e:
print(e)
def init_ui(self):
self.tbl_series_links.clear()
self.tbl_series_links.setRowCount(0)
self.tbl_series_links.setColumnCount(0)
self.tbl_param_values.clear()
self.tbl_param_values.setRowCount(0)
self.tbl_param_values.setColumnCount(0)
if self.current_state not in (self.ST_START, self.ST_DATA_ONLY,): # there is a current function
self.lbl_fn_name.setText("Selected function: " + self.current_function.name)
self.txt_description.setText(self.current_function.long_description)
else:
self.lbl_fn_name.setText("Selected function: None")
self.txt_description.setText("")
if self.current_state in (self.ST_READY, ):
if self.pn_fit_spec is not None:
params = self.pn_fit_spec.major_axis.values
series = self.pn_fit_spec.minor_axis.values
colours = self.canvas.curve_colours
ptbl_vheader = [widgets.QTableWidgetItem("All")]
for sname in series:
i = widgets.QTableWidgetItem(sname)
i.setIcon(self.line_icon(colours[sname]))
ptbl_vheader.extend([i])
self.tbl_param_values.setRowCount(len(ptbl_vheader))
for i in range(len(ptbl_vheader)):
self.tbl_param_values.setVerticalHeaderItem(i, ptbl_vheader[i])
ptbl_hheader = ["Include"]
ptbl_hheader.extend(params)
self.tbl_param_values.setColumnCount(len(ptbl_hheader))
self.tbl_param_values.setHorizontalHeaderLabels(ptbl_hheader)
ltbl_vheader = [widgets.QTableWidgetItem("All")]
for sname in series:
i = widgets.QTableWidgetItem(sname)
i.setIcon(self.line_icon(colours[sname]))
ltbl_vheader .extend([i])
self.tbl_series_links.setRowCount(len(ltbl_vheader))
for i in range(len(ltbl_vheader )):
self.tbl_series_links.setVerticalHeaderItem(i, ltbl_vheader[i])
ltbl_hheader = []
ltbl_hheader.extend(params)
self.tbl_series_links.setColumnCount(len(ltbl_hheader))
self.tbl_series_links.setHorizontalHeaderLabels(ltbl_hheader)
# create the parameter values table
vrange = range(len(ptbl_vheader)-len(series), len(ptbl_vheader))
hrange = range((len(ptbl_hheader)-len(params)), len(ptbl_hheader))
for sname, row in zip(series, vrange):
w = self.centred_tablewidget(self.df_series_spec.loc[sname, self.s_types[self.S_INCLUDE_CBOXES]])
self.tbl_param_values.setCellWidget(row, 0, w)
for pname, col in zip(params, hrange):
w = self.centred_tablewidget(self.df_params_spec.loc[pname, self.p_types[self.P_FIX_CBOXES]])
self.tbl_param_values.setCellWidget(0, col, w)
for sname, row in zip(series, vrange):
for pname, col in zip(params, hrange):
edt = self.pn_fit_spec.loc[self.ps_types[self.PS_LEDITS], pname, sname]
cbx = self.pn_fit_spec.loc[self.ps_types[self.PS_FIX_CBOXES], pname, sname]
w = self.checkable_edit_widget(cbx, edt)
self.tbl_param_values.setCellWidget(row, col, w)
# create the linkage table
vrange = range(len(ltbl_vheader)-len(series), len(ltbl_vheader))
hrange = range((len(ltbl_hheader)-len(params)), len(ltbl_hheader))
for pname, col in zip(params, hrange):
w = self.centred_tablewidget(self.df_params_spec.loc[pname, 'all_linked_cboxes'])
self.tbl_series_links.setCellWidget(0, col, w)
for sname, row in zip(series, vrange):
for pname, col in zip(params, hrange):
self.tbl_series_links.setCellWidget(row, col, self.pn_fit_spec.loc['series_combos', pname, sname])
self.tbl_param_values.resizeRowsToContents()
self.tbl_series_links.resizeRowsToContents()
self.on_global_changed()
def on_all_fixed_changed(self):
if self.current_state in (self.ST_READY, ):
param, col = self.find_sender_index(self.df_params_spec)
if param is not None:
checkstate = int(self.df_params_spec.loc[param, col].checkState())
self.df_params_spec.loc[param, self.p_types[self.P_ALL_FIXED]] = checkstate # synchronise with logical representation
self.pn_fit_spec.loc[self.ps_types[self.PS_VALUE_FIXED], param] = checkstate
self.update_param_vals_table()
def on_all_linked_changed(self):
if self.current_state in (self.ST_READY, ):
param, col = self.find_sender_index(self.df_params_spec)
if param is not None:
checkstate = self.df_params_spec.loc[param, col].checkState()
self.df_params_spec.loc[param, self.p_types[self.P_ALL_LINKED]] = checkstate # synchronise with logical representation
linkto = self.pn_fit_spec.loc[self.ps_types[self.PS_GROUPS], param].iloc[0]
for series in self.pn_fit_spec.loc[self.ps_types[self.PS_GROUPS], param].index:
if checkstate == qt.Qt.Unchecked:
linkto = series
self.pn_fit_spec.loc[self.ps_types[self.PS_GROUPS], param, series] = linkto
self.update_linkage_table()
def on_analyze(self):
if self.current_state in (self.ST_READY, ):
try:
params = self.current_function.parameters
series = self.get_selected_series_names()
fitted_params, sigmas, confidence_intervals, tol = self.perform_fit()
df_pars = pd.DataFrame(fitted_params.transpose(), index=params, columns=series)
df_sigm = pd.DataFrame(sigmas.transpose(), index=params, columns=series)
sr_ftol = pd.Series(tol, index=series)
for pname, row in df_pars.iterrows():
for sname, val in row.iteritems():
self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES], pname, sname] = val
for pname, row in df_sigm.iterrows():
for sname, val in row.iteritems():
self.pn_fit_spec.loc[self.ps_types[self.PS_SIGMAS], pname, sname] = val
for sname, val in sr_ftol.iteritems():
self.df_series_spec.loc[sname, self.s_types[self.S_FTOL]] = val
self.on_calculate()
self.update_controls()
self.update_param_vals_table()
self.show_selected_data()
self.show_smooth_line()
self.show_fitted_params()
except Exception as e:
print(e)
pass
def on_calculate(self):
if self.current_state in (self.ST_READY, ):
self.set_calculated_curves()
self.set_residual_curves()
self.draw_current_data_set()
pass
def on_close_data(self):
if self.current_state in (self.ST_DATA_ONLY, self.ST_READY, ):
self.current_xaxis = None
self.set_axis_selector()
self.canvas.clear_plots()
self.blits_data = BlitsData()
self.blits_fitted = BlitsData()
self.blits_residuals = BlitsData()
if self.current_state == self.ST_DATA_ONLY:
self.current_state = self.ST_START
else:
self.current_state = self.FUNCTION_ONLY
self.init_fit_spec()
self.init_ui()
self.update_controls()
pass
def on_create(self):
if self.current_state in (self.FUNCTION_ONLY, ):
self.create_data_set_dialog = DataCreationDialog(None, self.current_function)
if self.create_data_set_dialog.exec() == widgets.QDialog.Accepted:
self.blits_data = BlitsData()
self.blits_data.series_names = self.create_data_set_dialog.get_series_names()
self.blits_data.axis_names = self.create_data_set_dialog.get_axes()
self.blits_data.series_dict = self.create_data_set_dialog.get_series_dict()
df_pars = self.create_data_set_dialog.get_parameters()
self.current_state = self.ST_READY
self.current_xaxis = self.blits_data.get_axes_names()[0]
try:
self.set_axis_selector()
self.draw_current_data_set()
self.init_fit_spec()
for pname, row in df_pars.iterrows():
for sname, val in row.iteritems():
self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES], pname, sname] = val
self.init_ui()
except Exception as e:
print(e)
self.update_controls()
self.on_select_function()
pass
pass
def on_estimate(self):
if self.current_state in (self.ST_READY, ):
fn_p0 = self.current_function.p0
params = self.current_function.parameters
series = self.get_selected_series_names()
data = self.get_data_for_fitting(series)
ffw = FunctionsFramework()
values = ffw.get_initial_param_estimates(data, fn_p0, len(params)).transpose()
df_pars = pd.DataFrame(values, index=params, columns=series)
try:
for pname, row in df_pars.iterrows():
for sname, val in row.iteritems():
self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES], pname, sname] = val
except Exception as e:
print(e)
self.update_param_vals_table()
self.on_calculate()
pass
def on_global_changed(self):
if self.chk_global.checkState() == qt.Qt.Checked:
self.tbl_series_links.setEnabled(True)
else:
self.tbl_series_links.setEnabled(False)
def on_linkage_changed(self):
if self.current_state in (self.ST_READY, ):
df = self.pn_fit_spec.loc[self.ps_types[self.PS_COMBOS]]
param, series = self.find_sender_index(df)
if param is not None and series is not None:
link = df.loc[param, series].currentText()
self.pn_fit_spec.loc[self.ps_types[self.PS_GROUPS], param, series] = link
self.rationalise_groups(param)
self.update_linkage_table()
pass
pass
def on_open(self):
if self.current_state in (self.ST_START, self.FUNCTION_ONLY, ):
file_path = widgets.QFileDialog.getOpenFileName(self,
"Open Data File", "", "CSV data files (*.csv);;All files (*.*)")[0]
if file_path:
self.blits_data.import_data(file_path)
axes = self.blits_data.get_axes_names() #cp.deepcopy(self.blits_data.get_axes_names())
self.current_xaxis = axes[0] #self.blits_data.get_axes_names()[0]
if self.current_state == self.ST_START:
self.current_state = self.ST_DATA_ONLY
else:
if len(self.current_function.independents) <= len(axes):
self.current_state = self.ST_READY
else:
self.current_function = None
self.current_state = self.ST_DATA_ONLY
self.set_axis_selector()
self.init_fit_spec()
self.init_ui()
self.update_controls()
self.on_select_function()
def on_param_fix_changed(self):
if self.current_state in (self.ST_READY, ):
param, series = None, None
df = self.pn_fit_spec.loc[self.ps_types[self.PS_FIX_CBOXES]]
param, series = self.find_sender_index(df)
if param is not None and series is not None:
param, series = self.find_sender_index(df)
try:
self.pn_fit_spec.loc[self.ps_types[self.PS_VALUE_FIXED], param, series] = int(self.sender().checkState())
except Exception as e:
print(e)
def on_param_val_changed(self):
if self.current_state in (self.ST_READY, ):
param, series = None, None
df = self.pn_fit_spec.loc[self.ps_types[self.PS_LEDITS]]
param, series = self.find_sender_index(df)
if param is not None and series is not None:
param, series = self.find_sender_index(df)
try:
self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES], param, series] = float(self.sender().text())
except Exception as e:
print(e)
def on_series_selected_changed(self):
if self.current_state in (self.ST_READY, ):
series, col = None, None
series, col = self.find_sender_index(self.df_series_spec)
if series is not None:
try:
checkstate = self.df_series_spec.loc[series, col].checkState()
self.df_series_spec.loc[series, self.s_types[self.S_INCLUDED]] = int(checkstate)
# synchronise with logical representation; int is necessary to make sure Qt recognises it (won't recognise int64 (??))
except Exception as e:
print(e)
def on_save(self):
file_path = ""
if self.current_state in (self.ST_READY, ):
file_path = widgets.QFileDialog.getSaveFileName(self,
"Save all", "", "Excel files (*.xlsx);;All files (*.*)")[0]
if file_path:
smooth_lines = self.get_xs_fitted_smooth_df()
obs_fit_res = self.get_xs_obs_fit_res_df()
# pd.concat((obs_fit_res, smooth_lines), axis=1)
params = self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES]]
try:
writer = pd.ExcelWriter(file_path)
obs_fit_res.to_excel(writer,'Data')
smooth_lines.to_excel(writer, 'Fit')
params.to_excel(writer,'Parameters')
writer.save()
writer.close()
except Exception as e:
print(e)
def on_select_function(self):
if self.current_state in range(self.N_STATES): # should work from all states
name, n_axes = "", 0
if not self.current_state in (self.ST_START, self.ST_DATA_ONLY): # a current function exists
name = self.current_function.name
if self.current_state in (self.ST_DATA_ONLY, self.ST_READY, ):
n_axes = len(self.blits_data.get_axes_names())
self.function_dialog = FunctionSelectionDialog(self, n_axes=n_axes, selected_fn_name=name)
if self.function_dialog.exec() == widgets.QDialog.Accepted:
self.current_function = self.function_dialog.get_selected_function()
self.blits_fitted = BlitsData()
self.blits_residuals = BlitsData()
if self.current_state in (self.ST_START, self.FUNCTION_ONLY):
self.current_state = self.FUNCTION_ONLY
else:
self.current_state = self.ST_READY
self.init_fit_spec()
self.init_ui()
self.draw_current_data_set()
self.update_controls()
def on_xaxis_changed(self, checked):
if self.current_state not in (self.ST_START, self.FUNCTION_ONLY, ):
btn = self.sender()
xaxis = btn.text()
if btn.isChecked():
self.preserve_xlimits()
self.current_xaxis = xaxis
self.draw_current_data_set()
def draw_current_data_set(self):
self.canvas.clear_plots()
if self.current_state not in (self.ST_START, self.FUNCTION_ONLY, ):
if self.blits_data.has_data():
self.canvas.set_colours(self.blits_data.series_names.tolist())
for key in self.blits_data.series_names:
series = self.blits_data.series_dict[key]
x = series[self.current_xaxis]
y = series[key]
self.canvas.draw_series(key, x, y, 'primary')
if self.blits_fitted.has_data():
for key in self.blits_fitted.series_names:
series = self.blits_fitted.series_dict[key]
x = series[self.current_xaxis]
y = series[key]
self.canvas.draw_series(key, x, y, 'calculated')
if self.blits_residuals.has_data():
for key in self.blits_residuals.series_names:
series = self.blits_residuals.series_dict[key]
x = series[self.current_xaxis]
y = series[key]
self.canvas.draw_series(key, x, y, 'residuals')
if self.df_xlimits is not None:
self.canvas.set_vlines(self.df_xlimits.loc[self.current_xaxis].as_matrix())
def get_constant_params_for_fitting(self, series_names):
"""
Returns an (n_curves, n_params)-shaped array of Boolean values
(with rows and columns parallel to self.series_names and self.current_function.parameters,
respectively) with values for each parameter for each series); if True,
parameter values is constant, if False, parameter value is variable.
"""
selected = (self.pn_fit_spec.loc[self.ps_types[self.PS_VALUE_FIXED], :, series_names] == qt.Qt.Checked).transpose()
return selected.as_matrix()
def get_data_for_fitting(self, series_names):
data = []
self.preserve_xlimits()
start, stop = self.df_xlimits.loc[self.current_xaxis].as_matrix() # self.canvas.get_vline_positions()
for s in series_names:
series = self.blits_data.series_dict[s] # the full data set
indmin = series[self.current_xaxis].searchsorted(start, side='left')[0]
indmax = series[self.current_xaxis].searchsorted(stop, side='right')[0]
selection = cp.deepcopy(series[indmin:indmax]).as_matrix().transpose()
if len(data) == 0:
data = [selection]
else:
data.append(selection)
return data
def get_param_values_for_fitting(self, series_names):
"""
Returns an (n_curves, n_params)-shaped array (with rows and columns
parallel to self.series_names and self.current_function.parameters,
respectively) with values for each parameter for each series).
"""
selected = self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES], :, series_names]
params = selected.as_matrix().transpose()
return params
def get_selected_series_names(self):
"""
Returns a numpy array of the selected series names
"""
selected = self.df_series_spec.loc[:, self.s_types[self.S_INCLUDED]] == qt.Qt.Checked
all_series = self.df_series_spec.index.values
return all_series[selected]
def get_series_linkage_for_fitting(self, series_names):
"""
Returns an (n_curves, n_params)-shaped array (with rows and columns parallel to
self.series_names and self.current_function.parameters, respectively)
of integers, in which linked parameters are grouped by their values.
Example for 4 curves and 3 parameters:
p0 p1 p2
c0 0 2 3
c1 0 2 4
c2 1 2 5
c3 1 2 6
indicates that parameter p0 is assumed to have the same value in
curves c0 and c1, and in curves c2 and c3 (a different value),
and that the value for p1 is the same in all curves, whereas
the value of p2 is different for all curves.
"""
selected = self.pn_fit_spec.loc[self.ps_types[self.PS_GROUPS], :, series_names].transpose()
links_array = cp.deepcopy(selected)
for series, row in selected.iterrows():
for param, txt in row.iteritems():
links_array.loc[series, param] = param + "_" + txt
return links_array.as_matrix()
def perform_fit(self):
# Collect the required information
func = self.current_function.func
series_names = self.get_selected_series_names()
data = self.get_data_for_fitting(series_names)
param_values = self.get_param_values_for_fitting(series_names)
const_params = self.get_constant_params_for_fitting(series_names)
links = self.get_series_linkage_for_fitting(series_names)
# set up for the fitting procedure
fitted_params = cp.deepcopy(param_values)
sigmas = np.empty_like(fitted_params)
confidence_intervals = np.empty_like(fitted_params)
tol = None
results = None
ffw = FunctionsFramework()
# Do the fit
if self.chk_global.checkState() == qt.Qt.Checked: # Global
results = ffw.perform_global_curve_fit(data, func, param_values, const_params, links)
fitted_params = results[0]
sigmas = results[1]
confidence_intervals = results[2]
tol = results[3]
else: # not global)
tol = []
n = 0
for d, p, c, l in zip(data, param_values, const_params, links):
d = [d, ]
p = np.reshape(p, (1, p.shape[0]))
c = np.reshape(c, (1, c.shape[0]))
l = np.reshape(l, (1, l.shape[0]))
results = ffw.perform_global_curve_fit(d, func, p, c, l)
fitted_params[n] = results[0]
sigmas[n] = results[1]
confidence_intervals[n] = results[2]
tol.append(results[3])
n += 1
return fitted_params, sigmas, confidence_intervals, tol
def preserve_xlimits(self):
if self.current_state in (self.ST_READY, ):
if self.df_xlimits is not None: # its shouldn't be, but just to be sure
self.df_xlimits.loc[self.current_xaxis] = self.canvas.get_vline_positions()
else:
self.df_xlimits = None # probably superfluous as well
def set_axis_selector(self):
self.axis_selector_buttons = {}
self.clearLayout(self.axis_layout)
if self.blits_data.has_data():
self.axis_layout.addStretch()
for name in self.blits_data.get_axes_names():
btn = widgets.QRadioButton()
btn.setText(name)
btn.toggled.connect(self.on_xaxis_changed)
self.axis_layout.addWidget(btn)
self.axis_selector_buttons[btn.text()] = btn
self.axis_layout.addStretch()
if not self.current_xaxis is None:
if self.current_xaxis in self.axis_selector_buttons:
self.axis_selector_buttons[self.current_xaxis].setChecked(True)
def set_calculated_curves(self):
selected_series = self.get_selected_series_names()
params = self.get_param_values_for_fitting(selected_series)
data = self.get_data_for_fitting(selected_series)
axes = self.blits_data.get_axes_names()
series_dict = {}
for series_name, series_params, i in zip(selected_series, params, range(len(selected_series))):
x_all = data[i][:-1]
x = np.zeros((x_all.shape[0], self.nfitted_points))
for i in range(x_all.shape[0]):
start, stop = x_all[i][0], x_all[i][-1]
x[i] = np.linspace(start, stop, self.nfitted_points)
y_fit = np.atleast_2d(self.current_function.func(x, series_params))
# create the y values and put them in a DataFrame, transpose for easy concatenation
df_x = pd.DataFrame(x, index=axes)
df_y = pd.DataFrame(y_fit, index=[series_name])
df_data = pd.concat((df_x, df_y)).transpose()
series_dict[series_name] = df_data
self.blits_fitted = BlitsData()
self.blits_fitted.series_names= np.array(selected_series)
self.blits_fitted.axis_names = cp.deepcopy(axes)
self.blits_fitted.series_dict = series_dict
def set_residual_curves(self):
selected_series = self.get_selected_series_names()
params = self.get_param_values_for_fitting(selected_series)
data = self.get_data_for_fitting(selected_series)
axes = self.blits_data.get_axes_names()
series_dict = {}
for series_name, series_params, i in zip(selected_series, params, range(len(selected_series))):
x = data[i][:-1]
y_obs = data[i][-1]
y_fit = self.current_function.func(x, series_params)
y_res = np.atleast_2d(y_obs - y_fit)
# create the y values and put them in a DataFrame, transpose for easy concatenation
df_x = pd.DataFrame(x, index=axes)
df_y = pd.DataFrame(y_res, index=[series_name])
df_data = pd.concat((df_x, df_y)).transpose()
series_dict[series_name] = df_data
self.blits_residuals = BlitsData()
self.blits_residuals.series_names = np.array(selected_series)
self.blits_residuals.axis_names = cp.deepcopy(axes)
self.blits_residuals.series_dict = series_dict
def get_xs_obs_fit_res_df(self):
selected_series = self.get_selected_series_names()
params = self.get_param_values_for_fitting(selected_series)
data = self.get_data_for_fitting(selected_series)
daxes = self.blits_data.get_axes_names()
faxes = self.current_function.independents
axes = np.array([f + "\n(" + a + ")" for a, f in zip(daxes, faxes)])
df_data = None
for series_name, series_params, i in zip(selected_series, params, range(len(selected_series))):
x = data[i][:-1]
y_obs = np.atleast_2d(data[i][-1])
y_fit = np.atleast_2d(self.current_function.func(x, series_params))
y_res = np.atleast_2d(y_obs - y_fit)
df_x = pd.DataFrame(x, index=axes) # no series name, get confusing
df_y_obs = pd.DataFrame(y_obs, index=[' y-obs \n(' + series_name + ')' ])
df_y_fit = pd.DataFrame(y_fit, index=[' y-fit\n(' + series_name + ')'])
df_y_res = pd.DataFrame(y_res, index=[' y-res\n(' + series_name + ')'])
df_data = pd.concat((df_data, df_x, df_y_obs, df_y_fit, df_y_res))
return df_data.transpose()
def get_xs_fitted_smooth_df(self):
selected_series = self.get_selected_series_names()
params = self.get_param_values_for_fitting(selected_series)
data = self.get_data_for_fitting(selected_series)
daxes = self.blits_data.get_axes_names()
faxes = self.current_function.independents
axes = np.array([f + "\n(" + a + ")" for a, f in zip(daxes, faxes)])
df_data = None
for series_name, series_params, i in zip(selected_series, params, range(len(selected_series))):
x0 = data[i][:-1, 0]
x1 = data[i][:-1, -1]
x = np.empty((len(axes), self.nfitted_points))
for i, i0, i1 in zip(range(len(axes)), x0, x1):
x[i] = np.linspace(i0, i1, self.nfitted_points, dtype=float)
y_fit = np.atleast_2d(self.current_function.func(x, series_params))
df_x = pd.DataFrame(x, index=axes) # no series name, get confusing
df_y_fit = pd.DataFrame(y_fit, index=[' y-fit\n(' + series_name + ')'])
df_data = pd.concat((df_data, df_x, df_y_fit))
return df_data.transpose()
def show_selected_data(self):
self.tbl_fitted_data.clear()
self.tbl_fitted_data.setColumnCount(0)
self.tbl_fitted_data.setRowCount(0)
all_data = self.get_xs_obs_fit_res_df()
self.tbl_fitted_data.setRowCount(all_data.shape[0])
self.tbl_fitted_data.setColumnCount(all_data.shape[1])
self.tbl_fitted_data.setHorizontalHeaderLabels(all_data.columns.values)
for i in range(self.tbl_fitted_data.rowCount()):
for j in range(self.tbl_fitted_data.columnCount()):
w = widgets.QTableWidgetItem()
txt = ""
if not np.isnan(all_data.iloc[i, j]):
txt = "{:8.3g}".format(all_data.iloc[i, j])
w.setText(txt)
self.tbl_fitted_data.setItem(i, j, w)
self.tbl_fitted_data.resizeColumnsToContents()
def show_smooth_line(self):
self.tbl_smooth_line.clear()
self.tbl_smooth_line.setColumnCount(0)
self.tbl_smooth_line.setRowCount(0)
all_data = self.get_xs_fitted_smooth_df()
self.tbl_smooth_line.setRowCount(all_data.shape[0])
self.tbl_smooth_line.setColumnCount(all_data.shape[1])
self.tbl_smooth_line.setHorizontalHeaderLabels(all_data.columns.values)
for i in range(self.tbl_smooth_line.rowCount()):
for j in range(self.tbl_smooth_line.columnCount()):
w = widgets.QTableWidgetItem()
txt = ""
if not np.isnan(all_data.iloc[i, j]):
txt = "{:8.3g}".format(all_data.iloc[i, j])
w.setText(txt)
self.tbl_smooth_line.setItem(i, j, w)
self.tbl_smooth_line.resizeColumnsToContents()
def show_fitted_params(self):
self.tbl_fitted_params.clear()
self.tbl_fitted_params.setColumnCount(0)
self.tbl_fitted_params.setRowCount(0)
pnames = self.pn_fit_spec.major_axis.values
pheader = np.vstack((pnames, np.array(["Stderr\non " + pname for pname in pnames]))).transpose().ravel()
pheader = np.hstack((pheader, np.array(["ftol"])))
sheader = self.pn_fit_spec.minor_axis.values
self.tbl_fitted_params.setColumnCount(len(pheader))
self.tbl_fitted_params.setHorizontalHeaderLabels(pheader)
self.tbl_fitted_params.setRowCount(len(sheader))
self.tbl_fitted_params.setVerticalHeaderLabels(sheader)
irow = -1
for sname in self.pn_fit_spec.minor_axis.values:
irow += 1
icol = -1
for pname in self.pn_fit_spec.major_axis.values:
pval = self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES], pname, sname]
perr = self.pn_fit_spec.loc[self.ps_types[self.PS_SIGMAS], pname, sname]
spval, sperr = "", ""
if not np.isnan(pval):
spval = '{:8.3g}'.format(pval)
if not np.isnan(perr):
sperr = '{:8.3g}'.format(perr)
icol += 1
wi = widgets.QTableWidgetItem(spval)
self.tbl_fitted_params.setItem(irow, icol, wi)
icol += 1
wi = widgets.QTableWidgetItem(sperr)
self.tbl_fitted_params.setItem(irow, icol, wi)
icol += 1
ftol = self.df_series_spec.loc[sname, self.s_types[self.S_FTOL]]
sftol = ""
if not np.isnan(ftol):
sftol = '{:8.3g}'.format(ftol)
wi = widgets.QTableWidgetItem(sftol)
self.tbl_fitted_params.setItem(irow, icol, wi)
self.tbl_fitted_params.resizeColumnsToContents()
def rationalise_groups(self, parameter):
if self.current_state in (self.ST_READY, ) and parameter != '':
prow = self.pn_fit_spec.loc[self.ps_types[self.PS_GROUPS], parameter]
x = prow.index
df_wf = pd.DataFrame(np.zeros((len(x), len(x))), index=x, columns=x, dtype=bool) # set up the matrix
for series, val in prow.iteritems():
df_wf.loc[series, series] = True # make the matrix reflexive
if series != val:
df_wf.loc[series, val] = True
df_wf.loc[val, series] = True # make the matrix symmetrical
# make matrix transitive (Warshall-Floyd)
for k in range(len(x)):
for i in range(len(x)):
for j in range(len(x)):
df_wf.iloc[i, j] = df_wf.iloc[i, j] or (df_wf.iloc[i, k] == 1 and df_wf.iloc[k, j] == 1)
# Find the equivalence classes for this parameter
seen = []
sr_equiv_clss = pd.Series(index=x)
for series0, row in df_wf.iterrows():
for series1, val in row.iteritems():
if val:
if series1 not in seen:
sr_equiv_clss.loc[series1] = series0
seen.append(series1)
for series in x:
self.pn_fit_spec.loc[self.ps_types[self.PS_GROUPS], parameter, series] = sr_equiv_clss.loc[series]
pass
def update_controls(self):
"""
Enables and disables controls for each state
"""
if self.current_state == self.ST_START:
self.action_open.setEnabled(True)
self.action_create.setEnabled(False)
self.action_close.setEnabled(False)
self.action_save.setEnabled(False)
self.action_select_function.setEnabled(True)
self.action_analyze.setEnabled(False)
self.action_estimate.setEnabled(False)
self.action_apply.setEnabled(False)
self.btn_apply.setEnabled(False)
self.btn_fit.setEnabled(False)
self.btn_est.setEnabled(False)
self.action_quit.setEnabled(True)
elif self.current_state == self.ST_DATA_ONLY:
self.action_open.setEnabled(False)
self.action_create.setEnabled(False)
self.action_close.setEnabled(True)
self.action_save.setEnabled(True)
self.action_select_function.setEnabled(True)
self.action_analyze.setEnabled(False)
self.action_estimate.setEnabled(False)
self.action_apply.setEnabled(False)
self.btn_apply.setEnabled(False)
self.btn_fit.setEnabled(False)
self.btn_est.setEnabled(False)
self.action_quit.setEnabled(True)
elif self.current_state == self.FUNCTION_ONLY:
self.action_open.setEnabled(True)
self.action_create.setEnabled(True)
self.action_close.setEnabled(False)
self.action_save.setEnabled(False)
self.action_select_function.setEnabled(True)
self.action_analyze.setEnabled(False)
self.action_estimate.setEnabled(False)
self.action_apply.setEnabled(False)
self.btn_apply.setEnabled(False)
self.btn_fit.setEnabled(False)
self.btn_est.setEnabled(False)
self.action_quit.setEnabled(True)
elif self.current_state == self.ST_READY:
self.action_open.setEnabled(False)
self.action_create.setEnabled(False)
self.action_close.setEnabled(True)
self.action_save.setEnabled(True)
self.action_select_function.setEnabled(True)
self.action_analyze.setEnabled(True)
self.action_estimate.setEnabled(True)
self.action_apply.setEnabled(True)
self.btn_apply.setEnabled(True)
self.btn_fit.setEnabled(True)
self.btn_est.setEnabled(True)
self.action_quit.setEnabled(True)
else:
print('Illegal state')
def update_linkage_table(self):
"""
Sets combo-boxes in linkage_combos to the current values in linkage_groups
"""
if self.current_state in (self.ST_READY, ):
combos = self.pn_fit_spec.loc[self.ps_types[self.PS_COMBOS]]
vals = self.pn_fit_spec.loc[self.ps_types[self.PS_GROUPS]]
try:
for i, row in vals.iterrows():
for j, val in row.iteritems():
box = combos.loc[i, j]
if box.currentText() != val:
box.currentIndexChanged.disconnect()
box.setCurrentText(val)
box.currentIndexChanged.connect(self.on_linkage_changed)
except Exception as e:
print(e)
def update_param_vals_table(self):
"""
Sets text and checkstate of values table items to their corresponding
logical values in pn_fit_spec
"""
if self.current_state in (self.ST_READY, ):
edts = self.pn_fit_spec.loc[self.ps_types[self.PS_LEDITS]]
cbxs = self.pn_fit_spec.loc[self.ps_types[self.PS_FIX_CBOXES]]
vals = self.pn_fit_spec.loc[self.ps_types[self.PS_VALUES]]
chks = self.pn_fit_spec.loc[self.ps_types[self.PS_VALUE_FIXED]]
try:
for i, row in vals.iterrows():
for j, val in row.iteritems():
edt = edts.loc[i, j]
cbx = cbxs.loc[i, j]
checkstate = chks.loc[i, j]
if float(edt.text()) != val:
edt.textChanged.disconnect()
edt.setText('{:.3g}'.format(val))
edt.textChanged.connect(self.on_param_val_changed)
if cbx.checkState() != checkstate:
cbx.stateChanged.disconnect()
cbx.setCheckState(qt.Qt.Unchecked)
if checkstate == qt.Qt.Checked:
cbx.setCheckState(qt.Qt.Checked)
cbx.stateChanged.connect(self.on_param_fix_changed)
except Exception as e:
print(e)
def write_param_values_to_table(self, param_values):
pass
#self.parameters_model.change_content(param_values.transpose())
#self.parameters_model.df_data[:] = param_values.transpose()
#self.tbl_params.resizeColumnsToContents() # This redraws the table (necessary)
### Convenience functions and procedures
def circle_icon(self, color):
pix = gui.QPixmap(30,30)
pix.fill(gui.QColor("transparent"))
paint = gui.QPainter()
paint.begin(pix)
paint.setBrush(gui.QColor(color))
paint.setPen(gui.QColor("transparent"))
paint.drawEllipse(0,0,30,30)
paint.end()
icon = gui.QIcon(pix)
return icon
def clearLayout(self, layout):
while layout.count():
child = layout.takeAt(0)
if child.widget() is not None:
child.widget().deleteLater()
elif child.layout() is not None:
self.clearLayout(child.layout())
def find_sender_index(self, dataframe):
sender_i, sender_j = None, None
for i, row, in dataframe.iterrows():
for j, item in row.iteritems():
if item is self.sender():
sender_i = i
sender_j = j
return sender_i, sender_j
def centred_tablewidget(self, qtwidget):
wid = widgets.QWidget()
hlo = widgets.QVBoxLayout()
hlo.setContentsMargins(12, 0, 12, 0)
hlo.setAlignment(qt.Qt.AlignCenter)
wid.setLayout(hlo)
hlo.addWidget(qtwidget)
return wid
def checkable_edit_widget(self, checkbox, textbox):
wid = widgets.QWidget()
hlo = widgets.QHBoxLayout()
hlo.setContentsMargins(12, 0, 12, 0)
wid.setLayout(hlo)
hlo.addWidget(textbox)
hlo.addStretch()
hlo.addWidget(checkbox)
return wid
def is_number(self, s):
try:
float(s)
return True
except ValueError:
return False
def line_icon(self, color):
pixmap = gui.QPixmap(50,10)
pixmap.fill(gui.QColor(color))
icon = gui.QIcon(pixmap)
return icon
# Standard main loop code
if __name__ == '__main__':
import sys
app = widgets.QApplication(sys.argv)
main = Main()
main.show()
sys.exit(app.exec_())
| gpl-3.0 |
stanmoore1/lammps | python/examples/matplotlib_plot.py | 4 | 2246 | #!/usr/bin/env python -i
# preceding line should have path for Python on your machine
# matplotlib_plot.py
# Purpose: plot Temp of running LAMMPS simulation via matplotlib
# Syntax: plot.py in.lammps Nfreq Nsteps compute-ID
# in.lammps = LAMMPS input script
# Nfreq = plot data point every this many steps
# Nsteps = run for this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
from __future__ import print_function
import sys
import matplotlib.pyplot as plt
# parse command line
argv = sys.argv
if len(argv) != 5:
print("Syntax: plot.py in.lammps Nfreq Nsteps compute-ID")
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
nsteps = int(sys.argv[3])
compute = sys.argv[4]
me = 0
# uncomment this if running in parallel via mpi4py
#from mpi4py import MPI
#me = MPI.COMM_WORLD.Get_rank()
#nprocs = MPI.COMM_WORLD.Get_size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
# initial 0-step run to generate initial 1-point plot
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
# create matplotlib plot
# just proc 0 handles plotting
if me == 0:
fig = plt.figure()
line, = plt.plot(xaxis, yaxis)
plt.xlim([0, nsteps])
plt.title(compute)
plt.xlabel("Timestep")
plt.ylabel("Temperature")
plt.show(block=False)
# run nfreq steps at a time w/out pre/post, query compute, refresh plot
import time
while ntimestep < nsteps:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
if me == 0:
line.set_xdata(xaxis)
line.set_ydata(yaxis)
ax = plt.gca()
ax.relim()
ax.autoscale_view(True, True, True)
plt.pause(0.001)
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via mpi4py
#print("Proc %d out of %d procs has" % (me,nprocs), lmp)
if me == 0:
if sys.version_info[0] == 3:
input("Press Enter to exit...")
else:
raw_input("Press Enter to exit...")
| gpl-2.0 |
Suranjandas7/SketchPad | RedditComments/redditcomments.py | 1 | 6472 | import sqlite3
import praw
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import configparser
class work():
def __init__(self, limitP, limitC, database_name, r_name, c_type):
self.title = ''
self.list_of_comments = []
self.limitP = limitP
self.limitC = limitC
self.post_id = ''
self.tags = ''
self.database_name = str(database_name)
self.r_name = str(r_name)
self.c_type = str(c_type)
def data_count(self):
def control(mode):
def shortcut(data):
container = []
for lines in data:
container.append(lines)
return len(container)
conn = sqlite3.connect(self.database_name)
c = conn.cursor()
data = c.execute("SELECT DISTINCT * from {}".format(mode))
count = shortcut(data)
c.close()
conn.close()
return count
no_of_comments = control('Comments')
no_of_posts = control('Posts')
print 'No of Posts - {}\nNo of comments - {}'.format(
no_of_posts,
no_of_comments)
def make_wordcloud(self, w, h):
conn = sqlite3.connect(self.database_name)
c = conn.cursor()
lines = c.execute("SELECT DISTINCT * from Comments")
all_text = []
s = ''
for l in lines:
all_text.append(l[0].encode('utf-8'))
for at in all_text:
s = s+at
wordcloud = WordCloud(
width=w,
height=h,
).generate(s)
c.close()
conn.close()
plt.imshow(wordcloud)
plt.axis('off')
plt.show()
def read(self):
f = open('output.txt', 'w')
conn = sqlite3.connect(self.database_name)
c = conn.cursor()
post_dict = {}
list_of_unique_posts = c.execute(
"SELECT DISTINCT Title, PostID from Posts"
)
for post in list_of_unique_posts:
post_dict[post[1]] = post[0]
for key in post_dict:
current_parent_id = (key,)
f.write('\n\n[Title : {}]\n\n'.format(post_dict[key].encode('utf-8')))
list_of_unique_comments = c.execute(
"SELECT DISTINCT * from Comments WHERE ParentId=?",
current_parent_id
)
for comment in list_of_unique_comments:
f.write('\n{}'.format(comment[0].encode('utf-8')))
f.write('\n---END COMMENT---\n')
f.close()
c.close()
conn.close()
def create_database(self):
conn = sqlite3.connect(str(self.database_name)+'.db')
c = conn.cursor()
c.execute('''CREATE TABLE Posts
(Title text, PostId text)'''
)
c.execute('''CREATE TABLE Comments
(Content text, ParentId text, CommentId text)'''
)
conn.commit()
c.close()
conn.close()
def process(self):
def addtodb(post_title, post_id, list_of_comments):
conn = sqlite3.connect(self.database_name)
c = conn.cursor()
post_insert = (
post_title,
post_id,
)
c.execute("INSERT INTO Posts VALUES (?,?)", post_insert)
for comment in list_of_comments:
comment_insert = (
comment[0],
post_id,
comment[1],
)
c.execute("INSERT INTO comments VALUES (?,?,?)", comment_insert)
conn.commit()
c.close()
conn.close()
reddit = praw.Reddit('bot1')
subreddit = reddit.subreddit(self.r_name)
counter = 0
for s in subreddit.hot(limit=self.limitP):
submission = s
submission.comment_sort = self.c_type
self.title = submission.title
self.post_id = submission.id
comments = s.comments
i=0
for comment in comments:
w_o_a = comment.body
comment_id = comment.id
if w_o_a == '[deleted]' or w_o_a == '[removed]':
continue
else:
output = [w_o_a, comment_id]
self.list_of_comments.append(output)
i+=1
if i == self.limitC:
break
#display(self.title, self.list_of_comments, self.post_id)
counter +=1
print '[Writing to database]\t {} out of {}'.format(
str(counter),
str(self.limitP))
addtodb(self.title, self.post_id, self.list_of_comments)
self.title = ''
self.list_of_comments = []
self.tags = ''
def main():
reader = configparser.ConfigParser()
reader.read('config.ini')
flag = False
while flag==False:
choice = str(raw_input('''
1 - Read
2 - Catch
3 - WordCloud
4 - DataCount
5 - Create Database
'Exit' - Exit
Enter Your Choice :
'''))
if choice == '1':
name_of_db = str(raw_input('Enter name of db : '))
sd = work(0,0, name_of_db, 'NA', 'NA')
sd.read()
elif choice == '2':
name_of_db = str(raw_input('Enter name of db : '))
subreddit_name = reader.get(name_of_db, 'subreddit')
comment_type = reader.get(name_of_db, 'comments')
sd = work(15,45, name_of_db, subreddit_name, comment_type)
sd.process()
elif choice == '3':
name_of_db = str(raw_input('Enter name of db : '))
sd = work(0,0, name_of_db, 'NA', 'NA')
sd.make_wordcloud(1920,1080)
elif choice == '4':
name_of_db = str(raw_input('Enter name of db : '))
sd = work(0,0, name_of_db, 'NA', 'NA')
sd.data_count()
elif choice == '5':
name_of_db = str(raw_input('Enter name of database: '))
sd = work(0,0,name_of_db, 'NA', 'NA')
sd.create_database()
elif choice == 'Exit':
flag = True
else:
print 'INVALID OPTIOEN'
if __name__ == '__main__':
main()
| lgpl-3.0 |
kapil-malik/airflow | airflow/hooks/hive_hooks.py | 11 | 15954 | from __future__ import print_function
from builtins import zip
from past.builtins import basestring
import csv
import logging
import re
import subprocess
from tempfile import NamedTemporaryFile
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from hive_service import ThriftHive
import pyhs2
from airflow.utils import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils import TemporaryDirectory
class HiveCliHook(BaseHook):
"""
Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default"):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.conn = conn
def run_cli(self, hql, schema=None, verbose=True):
"""
Run an hql statement using the hive cli
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(**locals())
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(hql)
f.flush()
fname = f.name
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
jdbc_url = (
"jdbc:hive2://"
"{0}:{1}/{2}"
";auth=noSasl"
).format(conn.host, conn.port, conn.schema)
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
cmd_extra += ['-p', conn.login]
hive_cmd = [hive_bin, '-f', fname] + cmd_extra
if self.hive_cli_params:
hive_params_list = self.hive_cli_params.split()
hive_cmd.extend(hive_params_list)
if verbose:
logging.info(" ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir)
self.sp = sp
stdout = ''
for line in iter(sp.stdout.readline, ''):
stdout += line
if verbose:
logging.info(line.strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql):
"""
Test an hql statement using the hive cli and EXPLAIN
"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ',
'add jar ',
'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
logging.info("Testing HQL [{0} (...)]".format(query_preview))
if query_set == insert:
query = other + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
logging.info(message)
error_loc = re.search('(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
l = int(error_loc.group(1))
begin = max(l-2, 0)
end = min(l+3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
logging.info("Context :\n {0}".format(context))
else:
logging.info("SUCCESS")
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param delimiter: field delimiter in the file
:type delimiter: str
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n"
if create or recreate:
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile;"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} "
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals});"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.kill()
class HiveMetastoreHook(BaseHook):
'''
Wrapper to interact with the Hive Metastore
'''
def __init__(self, metastore_conn_id='metastore_default'):
self.metastore_conn = self.get_connection(metastore_conn_id)
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
'''
Returns a Hive thrift client.
'''
ms = self.metastore_conn
transport = TSocket.TSocket(ms.host, ms.port)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return ThriftHive.Client(protocol)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
'''
Checks whether a partition exists
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
'''
self.metastore._oprot.trans.open()
partitions = self.metastore.get_partitions_by_filter(
schema, table, partition, 1)
self.metastore._oprot.trans.close()
if partitions:
return True
else:
return False
def get_table(self, table_name, db='default'):
'''
Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
'''
self.metastore._oprot.trans.open()
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
table = self.metastore.get_table(dbname=db, tbl_name=table_name)
self.metastore._oprot.trans.close()
return table
def get_tables(self, db, pattern='*'):
'''
Get a metastore table object
'''
self.metastore._oprot.trans.open()
tables = self.metastore.get_tables(db_name=db, pattern=pattern)
objs = self.metastore.get_table_objects_by_name(db, tables)
self.metastore._oprot.trans.close()
return objs
def get_databases(self, pattern='*'):
'''
Get a metastore table object
'''
self.metastore._oprot.trans.open()
dbs = self.metastore.get_databases(pattern)
self.metastore._oprot.trans.close()
return dbs
def get_partitions(
self, schema, table_name, filter=None):
'''
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
'''
self.metastore._oprot.trans.open()
table = self.metastore.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = self.metastore.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=32767)
else:
parts = self.metastore.get_partitions(
db_name=schema, tbl_name=table_name, max_parts=32767)
self.metastore._oprot.trans.close()
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
def max_partition(self, schema, table_name, field=None, filter=None):
'''
Returns the maximum value for all partitions in a table. Works only
for tables that have a single partition key. For subpartitioned
table, we recommend using signal tables.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow', table_name=t)
'2015-01-01'
'''
parts = self.get_partitions(schema, table_name, filter)
if not parts:
return None
elif len(parts[0]) == 1:
field = list(parts[0].keys())[0]
elif not field:
raise AirflowException(
"Please specify the field you want the max "
"value for")
return max([p[field] for p in parts])
class HiveServer2Hook(BaseHook):
'''
Wrapper around the pyhs2 library
Note that the default authMechanism is NOSASL, to override it you
can specify it in the ``extra`` of your connection in the UI as in
``{"authMechanism": "PLAIN"}``. Refer to the pyhs2 for more details.
'''
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self):
db = self.get_connection(self.hiveserver2_conn_id)
return pyhs2.connect(
host=db.host,
port=db.port,
authMechanism=db.extra_dejson.get('authMechanism', 'NOSASL'),
user=db.login,
database=db.schema or 'default')
def get_results(self, hql, schema='default', arraysize=1000):
with self.get_conn() as conn:
if isinstance(hql, basestring):
hql = [hql]
results = {
'data': [],
'header': [],
}
for statement in hql:
with conn.cursor() as cur:
cur.execute(statement)
records = cur.fetchall()
if records:
results = {
'data': records,
'header': cur.getSchema(),
}
return results
def to_csv(self, hql, csv_filepath, schema='default'):
schema = schema or 'default'
with self.get_conn() as conn:
with conn.cursor() as cur:
logging.info("Running query: " + hql)
cur.execute(hql)
schema = cur.getSchema()
with open(csv_filepath, 'w') as f:
writer = csv.writer(f)
writer.writerow([c['columnName'] for c in cur.getSchema()])
i = 0
while cur.hasMoreRows:
rows = [row for row in cur.fetchmany() if row]
writer.writerows(rows)
i += len(rows)
logging.info("Written {0} rows so far.".format(i))
logging.info("Done. Loaded a total of {0} rows.".format(i))
def get_records(self, hql, schema='default'):
'''
Get a set of records from a Hive query.
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
'''
return self.get_results(hql, schema=schema)['data']
def get_pandas_df(self, hql, schema='default'):
'''
Get a pandas dataframe from a Hive query
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
'''
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c['columnName'] for c in res['header']]
return df
| apache-2.0 |
blakeboswell/valence | setup.py | 1 | 1919 | #!/usr/bin/env python
import os
from setuptools import setup
from pyvalence import __version__
PKG_NAME = 'pyvalence'
PKG_AUTHOR = 'Audere Labs'
PKG_LICENSE = 'BSD'
AUTHOR_EMAIL = '[email protected]'
MAINTAINER_EMAIL = '' # google group / forum
URL = 'https://audere.github.com/valence'
DOWNLOAD_URL = ''
DESCRIPTION = 'Package for processing analytical chemistry data.'
# Get the long description from the README file
LONG_DESCRIPTION = 'pyvalence is a python package for processing data generated from analytical chemistry. pyvalence reads analytical data from native formats into readily accessible pandas DataFrames and supports common analysis techniques (e.g. standard curves regression and utilization) to reduce manual, one-off data processing. Analysis conducted with pyvalence allows researchers to spend less time processing data and more time interpreting results.'
options = {
'version': __version__,
'name': PKG_NAME,
'author': PKG_AUTHOR,
'license': PKG_LICENSE,
'author_email': AUTHOR_EMAIL,
'maintainer_email': MAINTAINER_EMAIL,
'url': URL,
'download_url': DOWNLOAD_URL,
'description': DESCRIPTION,
'long_description': LONG_DESCRIPTION,
'platforms': ['Any'],
'classifiers': [
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Chemistry'
],
'packages': [
'pyvalence', 'pyvalence.build', 'pyvalence.analyze'
],
'package_data': {'pyvalence': []},
'include_package_data': True,
'install_requires': ['numpy', 'scipy', 'pandas']
}
setup(**options)
| bsd-3-clause |
JeffAbrahamson/UNA_compta | ffa_qif.py | 1 | 4576 | #!/usr/bin/python3
"""Convert FFA downloads (CSV) to QIF for import to gnucash.
"""
import argparse
import dateutil.parser as dp
import pandas as pd
import sys
import base64
def piece_comptable(row):
"""Create the accounting ID in the format I want.
"""
xact_date_raw = row['DATE ECR']
# Remove time part of date, everything after the first space; also
# remove '/'.
xact_date = xact_date_raw[:xact_date_raw.find(' ')].replace('/','')
xact_id = base64.urlsafe_b64encode(xact_date.encode('ascii')).decode()
return 'FFA-{date}-{id}'.format(date=xact_date, id=xact_id)
def entry_remark(row):
"""Create the value for the entry's remark field.
"""
this_fullname = row['LIBELLE']
return '{d})'.format(d=this_fullname)
def get_data(infile):
"""Read dataframe from CSV file and return view.
"""
data = pd.read_csv(
infile,
sep=';',
)
data['amount'] = pd.Series(
[float(s.replace(',', '.'))
for s
in data['DEBIT']])
data['transaction_date_yyyy_mm_dd'] = pd.Series(
[dp.parse(val, dayfirst=True)
for val
in data['DATE ECR']])
data['transaction_date_qif'] = pd.Series([
'{d:0>2d}/{m:0>2d}/{y:0>4d}'.format(y=val.year, m=val.month, d=val.day)
for val
in data.transaction_date_yyyy_mm_dd])
data['transaction_yyyymmdd'] = pd.Series([
'{y:0>4d}{m:0>2d}{d:0>2d}'.format(y=val.year, m=val.month, d=val.day)
for val
in data.transaction_date_yyyy_mm_dd])
data['description'] = pd.Series(
[val.strip()
for val
in data.LIBELLE])
data['type'] = pd.Series(
[val.strip()
for val
in data['TYPE ECR']])
data['piece-comptable'] = data.apply(piece_comptable, axis=1)
data_view = data[['transaction_date_yyyy_mm_dd', 'transaction_date_qif',
'description', 'type',
'piece-comptable', 'amount']]
return data_view.sort_values(by=['transaction_date_yyyy_mm_dd'])
def make_qif(data_view):
"""Build qif file from dataframe.
The dataframe should have columns as provided by get_data(), above.
We want a qif so that we can construct splits.
Cf. https://en.wikipedia.org/wiki/Quicken_Interchange_Format
"""
qif_data_view = data_view[['transaction_date_qif', 'piece-comptable',
'description', 'type', 'amount']]
qif = '!Account\n'
qif += 'N401_FFA licences\n'
qif += '^\n'
qif += '!Type:Bank\n'
def qif_entry(row):
"""Create a single QIF file entry.
"""
this_transaction_date = row['transaction_date_qif']
this_piece_comptable = row['piece-comptable']
this_description = row['description']
this_type = row['type']
this_amount = -row['amount']
# D is the date. It may be required to be in English "dd mmmm
# yyyy" format.
# T is the amount of the transaction.
# N is the id number (pièce comptable).
# P is the payee (which quicken thinks of as the comment, not the account)
# M is a memo
entry = 'D{date}\nT{total}\nN{pc}\nP{payee}\nM{memo}\n'.format(
date=this_transaction_date,
total=this_amount,
pc=this_piece_comptable, payee=this_description,
memo=this_type)
# S is the split category (account number on split line).
# $ is the amount of the split entry.
# E is the split memo.
split_line = 'S{cpty}\n${amt}\nE{memo}\n'
entry += split_line.format(
cpty='6075_FFA licences',
amt=-this_amount,
memo='')
return entry
rows = qif_data_view.to_dict('records')
transactions = []
for row in rows:
if row['amount'] != 0:
transactions.append(qif_entry(row))
qif += '\n^\n'.join(transactions) + '\n^\n'
return qif
def main():
"""Do what we do.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--infile', type=str, required=True,
help='Name of file to read')
parser.add_argument('--outfile', type=str, required=False,
help='Name of file to write')
args = parser.parse_args()
data_view = get_data(args.infile)
qif = make_qif(data_view)
if args.outfile:
with open(args.outfile, 'w') as f_ptr:
f_ptr.write(qif)
else:
print(qif)
return 0
if __name__ == '__main__':
retval = main()
sys.exit(retval)
| gpl-3.0 |
zduputel/wphase | bin/make_cwp.py | 1 | 7164 | #!/usr/bin/env python
# *-* coding: iso-8859-1 *-*
############################################################################
#
# W phase source inversion package
# -------------
#
# Main authors: Zacharie Duputel, Luis Rivera and Hiroo Kanamori
#
# (c) California Institute of Technology and Universite de Strasbourg / CNRS
# April 2013
#
# Neither the name of the California Institute of Technology (Caltech)
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
# CONCATENATED W PHASE TRACES
from Arguments import *
#Customizing matplotlib
import matplotlib
matplotlib.use('PDF')
# Import external modules
import os,sys,re
import getopt as go
import numpy as np
import matplotlib.pyplot as plt
# Import internal modules
import sacpy
# Internal functions
def usage(cmd):
print('usage: %s [chan1 chan2 (default: LHZ LHN LHE LH1 LH2)] [option] (for help see %s -h)'%(cmd,cmd))
# All done
return;
def disphelp(cmd):
print('Make CWP traces\n')
usage(cmd)
print('\nAll parameters are optional:')
print('\n -i, --ifort15 input fort.15 file (e.g. fort.15, ts_fort.15, xy_fort.15)')
print('\n -n, --noref no reference solution')
print('\n -h, --help display this help and exit')
print('\nReport bugs to: <[email protected]>')
# All done
return;
def main(argv):
# Input parameters
try:
opts, args = go.gnu_getopt(argv[1:],'i:nh',["ifort15=","noref","help"])
except getopt.GetoptError as err:
raise
o_wpfile = O_WPINVERSION
predfile = ''
isref = 1
CHAN = ['LHZ', 'LHN', 'LHE', 'LH1', 'LH2']
for o, a in opts:
if o == '-h' or o == '--help':
disphelp(sys.argv[0])
sys.exit(0)
if o == '-i' or o == '--ifort15':
predfile = a
if o == '-n' or o == '--noref':
isref = 0
if len(args):
CHAN = args
if not os.path.exists(o_wpfile):
sys.stderr.write('Error: file %s not available\n'%(o_wpfile))
if len(predfile) and not os.path.exists(predfile):
raise IOError('No fort.15 file named %s\n'%(predfile))
if not len(predfile):
predfile = 'xy_fort.15'
if not os.path.exists(predfile):
predfile = 'ts_fort.15'
if not os.path.exists(predfile):
predfile = 'fort.15'
if not os.path.exists(predfile):
raise IOError('No fort.15 file found\n')
sys.stdout.write('Input fort.15 file: %s\n'%predfile)
count = 0
sys.stdout.write('Input channels are: ')
for chan in CHAN:
if not os.path.exists('%s_%s'%(predfile,chan)):
continue
else:
count += 1
sys.stdout.write('%5s'%chan)
if not count:
ErrMsg = 'Error: No fort.15_ file for'
for chan in CHAN:
ErrMsg += '%5s'%(chan)
raise IOError(ErrMsg)
sys.stdout.write('\nRead %s ...\n%s pages:\n'%(o_wpfile,count))
# Main loop
sac = sacpy.sac()
L = open(o_wpfile).readlines()
ppW = matplotlib.backends.backend_pdf.PdfPages('CWP_W.pdf')
sys.stdout.write('CWP_W.pdf\n')
if isref:
ppR = matplotlib.backends.backend_pdf.PdfPages('CWP_R.pdf')
sys.stdout.write('CWP_R.pdf')
for chan in CHAN:
cb = 0.0
stat_label = []
stat_posit = []
# Read o_wpinversion
for l in L:
items = l.strip().split()
sac.rsac(items[0])
if sac.kcmpnm[2] != chan[2]:
continue
stat_label.append(sac.kstnm)
i1 = int(items[3])
i2 = int(items[4])
npts = float(i2-i1)
stat_posit.append(cb+npts/2.0)
cb += npts
if not len(stat_label):
sys.stderr.write('WARNING: No channel %s in %s\n'%(chan,o_wpfile))
continue
# Read predfile
ifile = predfile+'_'+chan
L2 = open(ifile).readlines()
ncol = len(L2[0].strip().split())
if ncol < 3 and isref:
print('Warning No ref solution in %s'%ifile)
isref = 0
else:
Wref = []
Wdat = []
Wsyn = []
for l in L2:
items = l.strip().split()
Wdat.append(float(items[0])*1000.0)
Wsyn.append(float(items[1])*1000.0)
if isref:
if len(items)<3:
raise IOError('ERROR: error reading %s\n'%(ifile))
Wref.append(float(items[2])*1000.0)
t = np.arange(0,len(Wdat),dtype='float')
# Display
fig=plt.figure(figsize=CWP_FIGSIZE)
fig.subplots_adjust(left=0.08,bottom=0.12,right=0.96,top=0.88,wspace=0.2,hspace=0.2)
plt.plot(t,Wdat,'k')
plt.plot(t,Wsyn,'r')
ymin = 1.1*min(Wdat)
ymax = 1.1*max(Wdat)
for stnm,x in zip(stat_label,stat_posit):
plt.text(x,ymax*0.6,stnm,rotation=90,fontsize=16,fontstyle='italic')
plt.ylim([ymin,ymax])
plt.xlim([0,t[-1]])
plt.xlabel('time, sec')
plt.ylabel('displacement, mm')
plt.title('Data fit, W Phase solution, %s'%chan[2])
ppW.savefig(papertype='a4',orientation='landscape')
plt.close()
if isref:
fig = plt.figure(figsize=CWP_FIGSIZE)
fig.subplots_adjust(left=0.08,bottom=0.12,right=0.96,top=0.88,wspace=0.2,hspace=0.2)
plt.plot(t,Wdat,'k')
plt.plot(t,Wref,'r')
ymin = 1.1*min(Wdat)
ymax = 1.1*max(Wdat)
for stnm,x in zip(stat_label,stat_posit):
plt.text(x,ymax*0.6,stnm,rotation=90,fontsize=16,fontstyle='italic')
plt.ylim([ymin,ymax])
plt.xlim([0,t[-1]])
plt.xlabel('time, sec')
plt.ylabel('displacement, mm')
plt.title('Data fit, Reference solution, %s'%chan[2])
ppR.savefig(papertype='a4',orientation='landscape')
plt.close()
sys.stdout.write('\n')
ppW.close()
if isref:
ppR.close()
if __name__=='__main__':
main(sys.argv)
| gpl-3.0 |
xavierwu/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
spacepajamas/DiaTop | Mapping/Mapping_1_get_document_topic_distribution.py | 1 | 1554 | # coding: utf-8
import pickle
import gensim
import sys
import pandas as pd
import datetime
mylist = []
today = datetime.date.today()
mylist.append(today)
date = str(mylist[0]) # print the date object, not the container ;-)
lda_corpus = sys.argv[1]
lda_model_name = sys.argv[2]
metadatafilename = sys.argv[3]
corpus = gensim.corpora.MmCorpus(lda_corpus)
model = gensim.models.ldamulticore.LdaMulticore.load(lda_model_name, mmap='r' )
print 'Get document topic document topic distribution from LDA model'
doc_topic_dist = {}
for i in range(len(corpus)):
print len(corpus) - i, 'left'
doc_topic_dist.update({i:model.get_document_topics(corpus[i])})
outpickelfilename = 'document_topic_distribution'+date+'.pkl'
pickle.dump(doc_topic_dist, open(outpickelfilename, 'wb'))
print 'done'
print lda_model_name.split('_')
print lda_model_name
topics = int(lda_model_name.split('_')[4])
metadata = pd.read_csv(metadatafilename)
outer_matrix = [0]*len(doc_topic_dist)
#print outer_matrix
for i in range(len(doc_topic_dist)):
inner_list = [0]*topics
#print i
for v in doc_topic_dist[i]:
inner_list[v[0]] =v[1]
#print inner_list
outer_matrix[i] = inner_list
# print outer_matrix[i]
#print outer_matrix
topic_info = pd.DataFrame(outer_matrix, columns=[i for i in range(1,topics+1)])
topic_distr_df_metadata = metadata.join(topic_info, how='outer')
outputfilename = 'M1_topic_distr_df_'+date+'.csv'
topic_distr_df_metadata.to_csv(outputfilename, sep=',', encoding='utf-8', index = False)
print topic_distr_df_metadata | gpl-3.0 |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/io/formats/test_printing.py | 8 | 7359 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas import compat
import pandas.io.formats.printing as printing
import pandas.io.formats.format as fmt
import pandas.core.config as cf
def test_adjoin():
data = [['a', 'b', 'c'], ['dd', 'ee', 'ff'], ['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = printing.adjoin(2, *data)
assert (adjoined == expected)
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.get_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = printing.pprint_thing(b, quote_strings=True)
assert res == repr(b)
res = printing.pprint_thing(b, quote_strings=False)
assert res == b
class TestFormattBase(object):
def test_adjoin(self):
data = [['a', 'b', 'c'], ['dd', 'ee', 'ff'], ['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = printing.adjoin(2, *data)
assert adjoined == expected
def test_adjoin_unicode(self):
data = [[u'あ', 'b', 'c'], ['dd', u'ええ', 'ff'], ['ggg', 'hhh', u'いいい']]
expected = u'あ dd ggg\nb ええ hhh\nc ff いいい'
adjoined = printing.adjoin(2, *data)
assert adjoined == expected
adj = fmt.EastAsianTextAdjustment()
expected = u"""あ dd ggg
b ええ hhh
c ff いいい"""
adjoined = adj.adjoin(2, *data)
assert adjoined == expected
cols = adjoined.split('\n')
assert adj.len(cols[0]) == 13
assert adj.len(cols[1]) == 13
assert adj.len(cols[2]) == 16
expected = u"""あ dd ggg
b ええ hhh
c ff いいい"""
adjoined = adj.adjoin(7, *data)
assert adjoined == expected
cols = adjoined.split('\n')
assert adj.len(cols[0]) == 23
assert adj.len(cols[1]) == 23
assert adj.len(cols[2]) == 26
def test_justify(self):
adj = fmt.EastAsianTextAdjustment()
def just(x, *args, **kwargs):
# wrapper to test single str
return adj.justify([x], *args, **kwargs)[0]
assert just('abc', 5, mode='left') == 'abc '
assert just('abc', 5, mode='center') == ' abc '
assert just('abc', 5, mode='right') == ' abc'
assert just(u'abc', 5, mode='left') == 'abc '
assert just(u'abc', 5, mode='center') == ' abc '
assert just(u'abc', 5, mode='right') == ' abc'
assert just(u'パンダ', 5, mode='left') == u'パンダ'
assert just(u'パンダ', 5, mode='center') == u'パンダ'
assert just(u'パンダ', 5, mode='right') == u'パンダ'
assert just(u'パンダ', 10, mode='left') == u'パンダ '
assert just(u'パンダ', 10, mode='center') == u' パンダ '
assert just(u'パンダ', 10, mode='right') == u' パンダ'
def test_east_asian_len(self):
adj = fmt.EastAsianTextAdjustment()
assert adj.len('abc') == 3
assert adj.len(u'abc') == 3
assert adj.len(u'パンダ') == 6
assert adj.len(u'パンダ') == 5
assert adj.len(u'パンダpanda') == 11
assert adj.len(u'パンダpanda') == 10
def test_ambiguous_width(self):
adj = fmt.EastAsianTextAdjustment()
assert adj.len(u'¡¡ab') == 4
with cf.option_context('display.unicode.ambiguous_as_wide', True):
adj = fmt.EastAsianTextAdjustment()
assert adj.len(u'¡¡ab') == 6
data = [[u'あ', 'b', 'c'], ['dd', u'ええ', 'ff'],
['ggg', u'¡¡ab', u'いいい']]
expected = u'あ dd ggg \nb ええ ¡¡ab\nc ff いいい'
adjoined = adj.adjoin(2, *data)
assert adjoined == expected
class TestTableSchemaRepr(object):
@classmethod
def setup_class(cls):
pytest.importorskip('IPython')
from IPython.core.interactiveshell import InteractiveShell
cls.display_formatter = InteractiveShell.instance().display_formatter
def test_publishes(self):
df = pd.DataFrame({"A": [1, 2]})
objects = [df['A'], df, df] # dataframe / series
expected_keys = [
{'text/plain', 'application/vnd.dataresource+json'},
{'text/plain', 'text/html', 'application/vnd.dataresource+json'},
]
opt = pd.option_context('display.html.table_schema', True)
for obj, expected in zip(objects, expected_keys):
with opt:
formatted = self.display_formatter.format(obj)
assert set(formatted[0].keys()) == expected
with_latex = pd.option_context('display.latex.repr', True)
with opt, with_latex:
formatted = self.display_formatter.format(obj)
expected = {'text/plain', 'text/html', 'text/latex',
'application/vnd.dataresource+json'}
assert set(formatted[0].keys()) == expected
def test_publishes_not_implemented(self):
# column MultiIndex
# GH 15996
midx = pd.MultiIndex.from_product([['A', 'B'], ['a', 'b', 'c']])
df = pd.DataFrame(np.random.randn(5, len(midx)), columns=midx)
opt = pd.option_context('display.html.table_schema', True)
with opt:
formatted = self.display_formatter.format(df)
expected = {'text/plain', 'text/html'}
assert set(formatted[0].keys()) == expected
def test_config_on(self):
df = pd.DataFrame({"A": [1, 2]})
with pd.option_context("display.html.table_schema", True):
result = df._repr_data_resource_()
assert result is not None
def test_config_default_off(self):
df = pd.DataFrame({"A": [1, 2]})
with pd.option_context("display.html.table_schema", False):
result = df._repr_data_resource_()
assert result is None
def test_enable_data_resource_formatter(self):
# GH 10491
formatters = self.display_formatter.formatters
mimetype = 'application/vnd.dataresource+json'
with pd.option_context('display.html.table_schema', True):
assert 'application/vnd.dataresource+json' in formatters
assert formatters[mimetype].enabled
# still there, just disabled
assert 'application/vnd.dataresource+json' in formatters
assert not formatters[mimetype].enabled
# able to re-set
with pd.option_context('display.html.table_schema', True):
assert 'application/vnd.dataresource+json' in formatters
assert formatters[mimetype].enabled
# smoke test that it works
self.display_formatter.format(cf)
# TODO: fix this broken test
# def test_console_encode():
# """
# On Python 2, if sys.stdin.encoding is None (IPython with zmq frontend)
# common.console_encode should encode things as utf-8.
# """
# if compat.PY3:
# pytest.skip
# with tm.stdin_encoding(encoding=None):
# result = printing.console_encode(u"\u05d0")
# expected = u"\u05d0".encode('utf-8')
# assert (result == expected)
| apache-2.0 |
norheim/pextant | pextant/api.py | 2 | 3350 | import csv
import json
import logging
import re
from pextant.solvers.astarMesh import astarSolver
from pextant.analysis.loadWaypoints import JSONloader
import matplotlib.pyplot as plt
logger = logging.getLogger()
class Pathfinder:
"""
This class performs the A* path finding algorithm and contains the Cost Functions. Also includes
capabilities for analysis of a path.
This class still needs performance testing for maps of larger sizes. I don't believe that
we will be doing anything extremely computationally intensive though.
Current cost functions are Time, Distance, and (Metabolic) Energy. It would be useful to be able to
optimize on other resources like battery power or water sublimated, but those are significantly more
difficult because they depend on shadowing and was not implemented by Aaron.
"""
def __init__(self, explorer_model, environmental_model):
cheating = 1
self.solver = astarSolver(environmental_model, explorer_model,
optimize_on = 'Energy', heuristic_accelerate = cheating)
def aStarCompletePath(self, optimize_on, waypoints, returnType="JSON", dh=None, fileName=None ):
pass
def completeSearch(self, optimize_on, waypoints, filepath=None ):
"""
Returns a tuple representing the path and the total cost of the path.
The path will be a list. All activity points will be duplicated in
the returned path.
waypoints is a list of activityPoint objects, in the correct order. fileName is
used when we would like to write stuff to a file and is currently necessary
for csv return types.
"""
segmentsout, rawpoints, items = self.solver.solvemultipoint(waypoints)
if filepath:
extension = re.search('^(.+\/[^/]+)\.(\w+)$', filepath).group(2)
else:
extension = None
if extension == "json":
json.dump(segmentsout.tojson(), filepath)
elif extension == "csv":
header = [['isStation', 'x', 'y', 'z', 'distanceMeters', 'energyJoules', 'timeSeconds']]
rows = header + segmentsout.tocsv()
with open(filepath, 'wb') as csvfile:
writer = csv.writer(csvfile)
for row in rows:
writer.writerow(row)
return rows
return segmentsout, rawpoints, items
def completeSearchFromJSON(self, optimize_on, jsonInput, filepath=None, algorithm="A*",
numTestPoints=0):
jloader = JSONloader.from_string(jsonInput)
waypoints = jloader.get_waypoints()
#if algorithm == "A*":
segmentsout,_,_ = self.completeSearch(optimize_on, waypoints, filepath)
updatedjson = jloader.add_search_sol(segmentsout.list)
return updatedjson
if __name__ == '__main__':
from pextant.analysis.loadWaypoints import loadPoints
from explorers import Astronaut
from EnvironmentalModel import GDALMesh
hi_low = GDALMesh('maps/HI_lowqual_DEM.tif')
waypoints = loadPoints('waypoints/HI_13Nov16_MD7_A.json')
env_model = hi_low.loadSubSection(waypoints.geoEnvelope())
astronaut = Astronaut(80)
pathfinder = Pathfinder(astronaut, env_model)
out = pathfinder.aStarCompletePath('Energy', waypoints)
print out | mit |
mdhaber/scipy | scipy/signal/wavelets.py | 16 | 14046 | import numpy as np
from scipy.linalg import eig
from scipy.special import comb
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'morlet2', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
"""
N = len(hk) - 1
asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.empty((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {'0': v / sm}
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float, optional
Omega0. Default is 5
s : float, optional
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool, optional
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
morlet2 : Implementation of Morlet wavelet, compatible with `cwt`.
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of `w`.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
This version has a correction
term to improve admissibility. For `w` greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to `s`.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where `r` is the sampling rate.
Note: This function was created before `cwt` and is not compatible
with it.
"""
x = np.linspace(-s * 2 * np.pi, s * 2 * np.pi, M)
output = np.exp(1j * w * x)
if complete:
output -= np.exp(-0.5 * (w**2))
output *= np.exp(-0.5 * (x**2)) * np.pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A * (1 - (x/a)**2) * exp(-0.5*(x/a)**2)``,
where ``A = 2/(sqrt(3*a)*(pi**0.25))``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print(len(vec2))
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
xsq = vec**2
mod = (1 - xsq / wsq)
gauss = np.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def morlet2(M, s, w=5):
"""
Complex Morlet wavelet, designed to work with `cwt`.
Returns the complete version of morlet wavelet, normalised
according to `s`::
exp(1j*w*x/s) * exp(-0.5*(x/s)**2) * pi**(-0.25) * sqrt(1/s)
Parameters
----------
M : int
Length of the wavelet.
s : float
Width parameter of the wavelet.
w : float, optional
Omega0. Default is 5
Returns
-------
morlet : (M,) ndarray
See Also
--------
morlet : Implementation of Morlet wavelet, incompatible with `cwt`
Notes
-----
.. versionadded:: 1.4.0
This function was designed to work with `cwt`. Because `morlet2`
returns an array of complex numbers, the `dtype` argument of `cwt`
should be set to `complex128` for best results.
Note the difference in implementation with `morlet`.
The fundamental frequency of this wavelet in Hz is given by::
f = w*fs / (2*s*np.pi)
where ``fs`` is the sampling rate and `s` is the wavelet width parameter.
Similarly we can get the wavelet width parameter at ``f``::
s = w*fs / (2*f*np.pi)
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> M = 100
>>> s = 4.0
>>> w = 2.0
>>> wavelet = signal.morlet2(M, s, w)
>>> plt.plot(abs(wavelet))
>>> plt.show()
This example shows basic use of `morlet2` with `cwt` in time-frequency
analysis:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t, dt = np.linspace(0, 1, 200, retstep=True)
>>> fs = 1/dt
>>> w = 6.
>>> sig = np.cos(2*np.pi*(50 + 10*t)*t) + np.sin(40*np.pi*t)
>>> freq = np.linspace(1, fs/2, 100)
>>> widths = w*fs / (2*freq*np.pi)
>>> cwtm = signal.cwt(sig, signal.morlet2, widths, w=w)
>>> plt.pcolormesh(t, freq, np.abs(cwtm), cmap='viridis', shading='gouraud')
>>> plt.show()
"""
x = np.arange(0, M) - (M - 1.0) / 2
x = x / s
wavelet = np.exp(1j * w * x) * np.exp(-0.5 * x**2) * np.pi**(-0.25)
output = np.sqrt(1/s) * wavelet
return output
def cwt(data, wavelet, widths, dtype=None, **kwargs):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter. The `wavelet` function
is allowed to be complex.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(length,width)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
dtype : data-type, optional
The desired data type of output. Defaults to ``float64`` if the
output of `wavelet` is real and ``complex128`` if it is complex.
.. versionadded:: 1.4.0
kwargs
Keyword arguments passed to wavelet function.
.. versionadded:: 1.4.0
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(widths), len(data)).
Notes
-----
.. versionadded:: 1.4.0
For non-symmetric, complex-valued wavelets, the input signal is convolved
with the time-reversed complex-conjugate of the wavelet data [1].
::
length = min(10 * width[ii], len(data))
cwt[ii,:] = signal.convolve(data, np.conj(wavelet(length, width[ii],
**kwargs))[::-1], mode='same')
References
----------
.. [1] S. Mallat, "A Wavelet Tour of Signal Processing (3rd Edition)",
Academic Press, 2009.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 200, endpoint=False)
>>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
>>> widths = np.arange(1, 31)
>>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
>>> plt.imshow(cwtmatr, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto',
... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
>>> plt.show()
"""
if wavelet == ricker:
window_size = kwargs.pop('window_size', None)
# Determine output type
if dtype is None:
if np.asarray(wavelet(1, widths[0], **kwargs)).dtype.char in 'FDG':
dtype = np.complex128
else:
dtype = np.float64
output = np.empty((len(widths), len(data)), dtype=dtype)
for ind, width in enumerate(widths):
N = np.min([10 * width, len(data)])
# the conditional block below and the window_size
# kwarg pop above may be removed eventually; these
# are shims for 32-bit arch + NumPy <= 1.14.5 to
# address gh-11095
if wavelet == ricker and window_size is None:
ceil = np.ceil(N)
if ceil != N:
N = int(N)
wavelet_data = np.conj(wavelet(N, width, **kwargs)[::-1])
output[ind] = convolve(data, wavelet_data, mode='same')
return output
| bsd-3-clause |
mattilyra/scikit-learn | sklearn/metrics/regression.py | 31 | 17366 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
from ..externals.six import string_types
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value corresponds to 'variance_weighted', this behaviour is
deprecated since version 0.17 and will be changed to 'uniform_average'
starting from 0.19.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<https://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value which is deprecated since "
"0.17, it will be changed to 'uniform_average' "
"starting from 0.19.",
DeprecationWarning)
multioutput = 'variance_weighted'
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
keitaroyam/yamtbx | yamtbx/dataproc/myspotfinder/command_line/make_html_report.py | 1 | 20241 | import os
import re
import time
import datetime
import collections
import glob
import pysqlite2.dbapi2 as sqlite3
import cPickle as pickle
import numpy
import matplotlib
matplotlib.use('Agg') # Allow to work without X
from PIL import Image
import iotbx.phil
from yamtbx.util import rotate_file
from yamtbx.dataproc.myspotfinder import shikalog
from yamtbx.dataproc.myspotfinder.command_line.spot_finder_gui import Stat
from yamtbx.dataproc.dataset import re_pref_num_ext
from yamtbx.dataproc import bl_logfiles
master_params_str = """\
target_dir = None
.type = path
rotate = False
.type = bool
.help = backup (rotate) old files
mode = *normal zoo
.type = choice
plot = *grid circle
.type = choice
"""
def plot_heatmap(subplot, xs, ys, ds, scaninfo):
#import scipy.interpolate
xlim = min(xs), max(xs)
ylim = min(ys), max(ys)
if scaninfo is not None:
vs, hs = scaninfo.vstep*1000., scaninfo.hstep*1000.
if scaninfo.vpoints == 1: vs = 5
if scaninfo.hpoints == 1: hs = 5
else:
vs, hs = 5, 5
zi = numpy.zeros((int((ylim[1]-ylim[0])/vs+1.5),
int((xlim[1]-xlim[0])/hs+1.5)))
for x, y, d in zip(xs, ys, ds):
i,j = int((y-ylim[0])/vs+0.5), int((x-xlim[0])/hs+0.5)
zi[i,j] = d
p1 = subplot.imshow(zi, origin='lower',
extent=[min(xs)-hs/2, max(xs)+hs/2,
min(ys)-vs/2, max(ys)+vs/2],
interpolation='none', cmap="YlOrRd")#PuRd
return p1
# plot_heatmap()
def plot_circles(subplot, xs, ys, ds, zero_xs, zero_ys):
def normalize(v, m=100., sd=60.):
vm = float(sum(v))/float(len(v))
vsd = math.sqrt(sum(map(lambda x:(x-vm)**2, v))/float(len(v)))
if vsd < 1e-12:
return [m for x in xrange(len(v))]
return map(lambda x:sd*(x-vm)/vsd+m, v)
# normalize()
def normalize_max(v, maximum=400.):
max_v = max(v)
f = maximum / max_v if max_v > 0 else 1.
return map(lambda x:f*x + 1., v) # add 1 to make zero-value pickable
# normalize_max()
p1 = subplot.scatter(xs, ys, s=normalize_max(ds), c=ds, alpha=0.5)
p2 = subplot.scatter(zero_xs, zero_ys, s=50, marker="x", c=[0]*len(zero_xs), alpha=0.5)
return p1, p2
# plot_circles()
def prepare_plot(plot_data, f, kind, wdir, rotate=False, plot_grid=True):
def normalize_max(v, maximum=400.):
max_v = max(v)
f = maximum / max_v if max_v > 0 else 1.
return map(lambda x:f*x + 1., v) # add 1 to make zero-value pickable # XXX when max_v is Inf?
# normalize_max()
scan_prefix = f[:f.index(" ")] if " (phi=" in f else f
pngout = os.path.join(wdir, "plot_%s%s.png" % (scan_prefix, kind))
if rotate:
rotate_file(pngout)
xs, ys, ds, imgfs = [], [], [], []
zero_xs, zero_ys = [], [] # For values of zero
for imgf, stat in plot_data[f]:
gc = stat.grid_coord
if gc is None:
continue
x, y = gc
x *= 1000.
y *= 1000.
d = stat.stats[("n_spots","total_integrated_signal","median_integrated_signal").index(kind)]
xs.append(x)
ys.append(y)
ds.append(d)
imgfs.append(imgf)
if d == 0:
zero_xs.append(x)
zero_ys.append(y)
if len(xs) == 0:
return "", ""
win = (max(xs)-min(xs)+1000)/1000*400/80*1.7 # ad-hoc scale
hin = (max(ys)-min(ys)+1000)/1000*400/80
fig = matplotlib.figure.Figure(figsize=(win,hin), dpi=80) # figsize in inches
ax = fig.add_subplot(111)
#p = ax.scatter(xs, ys, s=normalize_max(ds), c=ds, alpha=0.5) # s in points^2
scaninfo = plot_data[f][0][1].scan_info
if plot_grid:
p = plot_heatmap(ax, xs, ys, ds, scaninfo)
else:
p, _ = plot_circles(ax, xs, ys, ds, zero_xs, zero_ys)
if max(ds) - min(ds) > 1e-5:
fig.colorbar(p)
ax.scatter(zero_xs, zero_ys, s=50, marker="x", c=[0]*len(zero_xs), alpha=0.5)
ax.set_xlabel("horizontal [um]")
ax.set_ylabel("vertical [um]")
if scaninfo is not None:
vp, hp = scaninfo.vpoints, scaninfo.hpoints
vs, hs = scaninfo.vstep*1000., scaninfo.hstep*1000.
if 1 in (vp, hp) or len(plot_data[f]) <= hp:
ax.set_aspect("auto")
else:
ax.set_aspect("equal")
if vp == hp == 1:
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
elif vp == 1:
ax.set_xlim(min(xs) - hs, max(xs) + hs)
ax.set_ylim(-10, 10)
elif hp == 1:
ax.set_xlim(-10, 10)
ax.set_ylim(min(ys) - vs, max(ys) + vs)
else:
ax.set_xlim(min(xs) - hs, max(xs) + hs)
ax.set_ylim(min(ys) - vs, max(ys) + vs)
else:
# Should never reach here.. but should we set limit here?
vs, hs = 5, 5
canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
canvas.print_figure(pngout+".tmp", dpi=80, format="png")
img_width = fig.get_figwidth() * 80
img_height = fig.get_figheight() * 80
map_str = '<map name="%smap">\n' % scan_prefix
for x, y, imgf in zip(xs, ys, imgfs):
if plot_grid:
tx1, ty1 = ax.transData.transform((x-hs/2.,y-vs/2.))
tx2, ty2 = ax.transData.transform((x+hs/2.,y+vs/2.))
map_str += ' <area shape="rect" coords="%.2f,%.2f,%.2f,%.2f" title="%s" onClick=\'plotClick("%s", "%s")\'>\n' % (tx1, img_height-ty1, tx2, img_height-ty2, os.path.basename(imgf), scan_prefix, os.path.basename(imgf))
else:
tx, ty = ax.transData.transform((x,y))
map_str += ' <area shape="circle" coords="%.2f,%.2f,10" title="%s" onClick=\'plotClick("%s", "%s")\'>\n' % (tx, img_height-ty, os.path.basename(imgf), scan_prefix, os.path.basename(imgf))
map_str += "</map>"
return pngout, map_str
# prepare_plot()
def make_html_report(current_stats, wdir, htmlout, zoo_mode, rotate=False, plot_grid=True):
#plot_data = self.plotFrame.data
shikalog.info("Making HTML report for %s"%wdir)
startt = time.time()
plot_data = collections.OrderedDict()
for f, stat in current_stats.items():
if stat is None: continue
fpref = decide_fpref(f, stat.scan_info)
plot_data.setdefault(fpref, []).append((f, stat))
#if gui_params.mode == "zoo": htmlout = os.path.join(wdir, "report_zoo.html")
#else: htmlout = os.path.join(wdir, "report.html")
if rotate: rotate_file(htmlout)
if zoo_mode: assert len(plot_data) <= 1
kinds = ("total_integrated_signal", "median_integrated_signal", "n_spots")
plots=""
pngs = []
for f in plot_data:
scan_prefix = f[:f.index(" ")] if " (phi=" in f else f
info = plot_data[f][0][1].scan_info
if info is None: info = bl_logfiles.ScanInfo() # Empty info
plots += '<table border=0 style="margin-bottom:0px">\n <tr><td>\n'
if zoo_mode:
try:
im = Image.open(os.path.join(wdir, "../../../before.ppm"))
im.save(os.path.join(wdir, "loop_before.jpg"))
except:
import traceback
print "Can't convert loop image"
print traceback.format_exc()
plots += ' Loop image</td><td><img src="loop_before.jpg" /></td></tr>\n'
plots += ' <tr><td>\n'
plots += ' <table class="info"><tr><th>scan</th><td>%s</td></tr>\n' % scan_prefix
plots += ' <tr><th>date</th><td>%s</td></tr>\n' % (info.date.strftime("%Y/%m/%d %H:%M:%S") if info.date!=0 else "??")
if info.is_shutterless():
plots += ' <tr><th>fixed spindle</th><td>%.2f°</td></tr>\n' % info.fixed_spindle
plots += ' <tr><th>frame rate</th><td>%.2f [Hz]</td></tr>\n' % info.frame_rate
else:
plots += ' <tr><th>osc. start</th><td>%.2f°</td></tr>\n' % info.osc_start
plots += ' <tr><th>osc. step</th><td>%.2f°</td></tr>\n' % info.osc_step
plots += ' <tr><th>exp. time</th><td>%.2f [sec]</td></tr>\n' % info.exp_time
plots += ' <tr><th>beam size</th><td>h= %.1f, v= %.1f [μm]</td></tr>\n' % (info.beam_hsize, info.beam_vsize)
plots += ' <tr><th>attenuator</th><td>%s %.1f [μm]</td></tr>\n' % info.attenuator
plots += ' <tr><th>distance</th><td>%.2f [mm]</td></tr>\n' % info.distance
plots += ' <tr><th>wavelength</th><td>%.4f [Å]</td></tr>\n' % info.wavelength
plots += ' <tr><th>scan points</th><td>v=%d, h=%d</td></tr>\n' % (info.vpoints, info.hpoints)
plots += ' <tr><th>scan steps</th><td>v=%.2f, h=%.2f [μm]</td></tr>\n' % (info.vstep*1000., info.hstep*1000.)
plots += ' </table>\n'
for i, kind in enumerate(kinds):
pngout, mapstr = prepare_plot(plot_data, f, kind, wdir, rotate, plot_grid)
pngs.append(pngout) # rename later
adds = ""
if i == 0:
plots += ' <td><img name="%s" src="%s" usemap="#%smap" /><br />\n' % (scan_prefix, os.path.basename(pngout), scan_prefix)
plots += '<form>\n'
adds = ' checked="checked"'
plots += '<input type="radio" name="spot_mode" value="%s" onClick="changeplot(this, \'%s\')"%s />%s<br />\n' % (kind, scan_prefix, adds, kind)
plots += '</form>%s</td></tr></table><br>\n\n' % mapstr # The last mapstr is used. This is dirty way, though.
plots += '<table border=0 style="margin-bottom:20px">\n <tr><td>\n'
plots += '<td style="border:solid 1px #999"><canvas id="%scanvas" width=600 height=600></canvas>\n' % scan_prefix
plots += '<td id="%sinfo" valign="top"></tr></table>\n\n' % scan_prefix
result = current_stats.items()
if len(result) == 0:
shikalog.warning("No results found. Exiting. %s"% wdir)
return
dbfile = os.path.join(wdir, "shika.db")
con = sqlite3.connect(dbfile, timeout=10, isolation_level=None)
con.execute('pragma query_only = ON;')
print "Reading data from DB for making report html."
c = con.execute("select filename,spots from spots")
dbspots = dict(map(lambda x: (str(x[0]), pickle.loads(str(x[1]))), c.fetchall()))
spot_data = "var spot_data = {"
for i, (f, stat) in enumerate(result):
if stat is None: continue
bf = os.path.basename(f)
spots = dbspots[bf]["spots"]
thumb_posmag = dbspots[bf]["thumb_posmag"]
r = re.search("^(.*)_([0-9]+)\.[^0-9]+$", bf)
prefix, num = r.group(1), int(r.group(2))
spot_data += '"%s":[[' % bf
for y,x,snr,d in spots:
#x, y = spot.max_pxl_y(), spot.max_pxl_x()
pos = thumb_posmag[0:2]
mag = thumb_posmag[2]
x, y = (x - pos[0])*mag, (y - pos[1])*mag
spot_data += "[%d,%d]," % (x, y)
spot_data += "], %.1f, %.1f, %d, %d]," % (stat.stats[1], stat.stats[2], stat.stats[0], num)
spot_data += "};"
spot_data = spot_data.replace("inf,", "Infinity,").replace("nan,", "NaN,")
con.close()
# Determine img picture extension
img_ext = ".png" if os.path.exists(os.path.join(wdir, os.path.basename(result[0][0])+".png")) else ".jpg"
jpg_dirs = "var jpg_dirs = {"
flag_tiled_jpg = False
if glob.glob(os.path.join(wdir, "thumb_*")):
for res in result:
r = re.search("^(.*)_([0-9]+)\.[^0-9]+$", os.path.basename(res[0]))
prefix, num = r.group(1), int(r.group(2))
jd = os.path.join("thumb_%s_%.3d" % (prefix, num//1000))
if not os.path.exists(jd): flag_tiled_jpg = True # THIS MAY CAUSE A PROBLEM..
jpg_dirs += '"%s":"%s",' % (os.path.basename(res[0]), jd)
else:
for res in result:
jpg_dirs += '"%s":".",' % os.path.basename(res[0])
jpg_dirs += "};"
ofs = open(htmlout, "w")
ofs.write("""\
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<title>SHIKA report</title>
<script type="text/javascript">
<!--
function changeplot(obj, name){
document.images[name].src = "plot_"+name+obj.value+".png";
}
%(spot_data)s
%(jpg_dirs)s
""" % dict(spot_data=spot_data,
jpg_dirs=jpg_dirs if not flag_tiled_jpg else ""))
if flag_tiled_jpg: # FOR TILED JPEG
ofs.write("""\
function plotClick(scanprefix, imgfile) {
var f = imgfile;
var data = spot_data[f];
var img = new Image();
var idx = Math.floor((data[4]-1)/100);
var n1 = idx*100+1;
var n2 = (idx+1)*100;
img.src = "thumb_" + scanprefix.slice(0,-1) + "/" + scanprefix + ("00000"+n1).slice(-6) + "-" + ("00000"+n2).slice(-6) + ".jpg"; // prefix ends with _
var idx2 = (data[4]-1)%%100;
var sx = idx2%%10;
var sy = Math.floor(idx2/10);
img.onload = (function(fn){
return function(){
var td = document.getElementById(scanprefix+"info");
td.innerHTML = "<table border=0><tr><td>File name: <td>" + imgfile + "<tr><td>total signal: <td>" + data[1] + "<tr><td>median signal: <td>" + data[2] + "<tr><td>N_spots: <td>" + data[3] + "</table>";
var t = data[0];
var canvas = document.getElementById(scanprefix+"canvas");
var ctx = canvas.getContext('2d');
ctx.clearRect(0,0,canvas.width,canvas.height);
ctx.drawImage(this, sx*600, sy*600, 600, 600, 0, 0, 600, 600);
""" % dict(img_ext=img_ext))
else: # FOR SINGLE JPEGs
ofs.write("""\
function plotClick(scanprefix, imgfile) {
var f = imgfile;
var data = spot_data[f];
var img = new Image();
img.src = jpg_dirs[f] + "/" + f + "%(img_ext)s";
img.onload = (function(fn){
return function(){
var td = document.getElementById(scanprefix+"info");
td.innerHTML = "<table border=0><tr><td>File name: <td>" + imgfile + "<tr><td>total signal: <td>" + data[1] + "<tr><td>median signal: <td>" + data[2] + "<tr><td>N_spots: <td>" + data[3] + "</table>";
var t = data[0];
var canvas = document.getElementById(scanprefix+"canvas");
var ctx = canvas.getContext('2d');
ctx.clearRect(0,0,canvas.width,canvas.height);
ctx.drawImage(this, 0, 0);
""" % dict(img_ext=img_ext))
# Common parts
ofs.write("""\
for (var i = 0; i < t.length; i++) {
ctx.rect(t[i][0]-6, t[i][1]-6, 12, 12);
}
ctx.strokeStyle = "red";
ctx.lineWidth = 1;
ctx.stroke();
var center = [300,300];
ctx.beginPath();
ctx.strokeStyle = "blue";
ctx.moveTo(center[0]-10, center[1]);
ctx.lineTo(center[0]+10, center[1]);
ctx.moveTo(center[0], center[1]-10);
ctx.lineTo(center[0], center[1]+10);
ctx.stroke();
}
}(f));
}
//-->
</script>
<style type="text/css">
<!--
table.info {
border-collapse: separate;
border-spacing: 7px;
}
table.info th {
text-align: left;
}
table.images {
border-collapse: collapse;
border: solid 1px #999;
}
table.images caption {
margin-top: 1em;
text-align: left;
}
table.images th,
table.images td {
border: solid 1px #999;
}
table.images th {
background: #E6E6E6;
text-align: center;
white-space: nowrap;
}
-->
</style>
</head>
<body>
<h1>SHIKA report</h1>
<div align="right">
Created on %(date)s<br>
Original directory: %(wdir)s
</div>
<hr style="height: 1px;border: none;border-top: 1px #000000 dotted;" />
%(plots)s
</body>
</html>
""" % dict(plots=plots,
date=datetime.datetime.today().strftime("%Y/%m/%d %H:%M:%S"),
wdir=wdir,
))
shikalog.debug("Renaming png files in %s" % wdir)
for png in pngs:
os.rename(png+".tmp", png)
delt = time.time() - startt
shikalog.info("HTML making Done (took %f s). Open? firefox %s"% (delt, htmlout))
# make_html_report()
def load_results(target_dir):
current_stats = collections.OrderedDict()
dbfile = os.path.join(target_dir, "_spotfinder", "shika.db")
if not os.path.isfile(dbfile):
shikalog.error("%s not found." % dbfile)
return
scanlog = os.path.join(target_dir, "diffscan.log")
if not os.path.isfile(scanlog):
shikalog.error("diffscan.log not found in %s" % target_dir)
return
slog = bl_logfiles.BssDiffscanLog(scanlog)
slog.remove_overwritten_scans()
shikalog.info("Loading data: %s" % dbfile)
startt = time.time()
result = []
con = sqlite3.connect(dbfile, timeout=10, isolation_level=None)
shikalog.debug("Opening db with query_only = ON")
con.execute('pragma query_only = ON;')
cur = con.cursor()
for itrial in xrange(60):
try:
c = cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='status';")
if c.fetchone() is None:
shikalog.error("No 'status' in %s" % dbfile)
return
break
except sqlite3.DatabaseError:
shikalog.warning("DB failed. retrying (%d)" % itrial)
time.sleep(1)
continue
for itrial in xrange(60):
try:
c = con.execute("select filename,spots from spots")
results = dict(map(lambda x: (str(x[0]), pickle.loads(str(x[1]))), c.fetchall()))
break
except sqlite3.DatabaseError:
shikalog.warning("DB failed. retrying (%d)" % itrial)
time.sleep(1)
continue
print "DEBUG:: scans=", slog.scans
for scan in slog.scans:
for imgf, (gonio, gc) in scan.filename_coords:
#print imgf, (gonio, gc)
stat = Stat()
# extension should be always .img in shika.db if generated from EIGER stream
possible_imgfs = (imgf, os.path.splitext(imgf)[0] + ".img",
re.sub("(.*)_0([0-9]{6})\..*$", r"\1_\2.img", imgf), # too dirty fix!! for new bss which writes 7-digits filename..
)
imgfs_found = filter(lambda x: x in results, possible_imgfs)
if not imgfs_found: continue
imgf = imgfs_found[0]
snrlist = map(lambda x: x[2], results[imgf]["spots"])
stat.stats = (len(snrlist), sum(snrlist), numpy.median(snrlist) if snrlist else 0)
stat.spots = results[imgf]["spots"]
stat.gonio = gonio
stat.grid_coord = gc
stat.scan_info = scan
stat.thumb_posmag = results[imgf]["thumb_posmag"]
stat.params = results[imgf]["params"]
stat.img_file = os.path.join(target_dir, imgf)
result.append((stat.img_file, stat))
delt = time.time() - startt
shikalog.info("Data loaded: %s (took %f sec)" % (dbfile, delt))
for f, stat in result: current_stats[f] = stat
return current_stats
# load_results()
def decide_fpref(f, scaninfo):
fpref = re_pref_num_ext.search(os.path.basename(f)).group(1)
if scaninfo is not None:
if scaninfo.is_shutterless():
fpref += " (phi=%.2f)" % (scaninfo.fixed_spindle)
else:
fpref += " (phi=%.2f)" % (scaninfo.osc_start)
return fpref
# decide_fpref()
def run(params):
wdir = os.path.abspath(params.target_dir)
target_dir = os.path.normpath(os.path.join(wdir, ".."))
current_stats = load_results(target_dir)
zoo_mode = params.mode == "zoo"
htmlout = os.path.join(wdir, "report_zoo.html" if zoo_mode else "report.html")
make_html_report(current_stats, wdir, htmlout, zoo_mode, params.rotate, params.plot=="grid")
# run()
if __name__ == "__main__":
shikalog.config(None)
import sys
cmdline = iotbx.phil.process_command_line(args=sys.argv[1:],
master_string=master_params_str)
params = cmdline.work.extract()
args = cmdline.remaining_args
if not params.target_dir and len(args) >= 1:
params.target_dir = args[0]
run(params)
| bsd-3-clause |
getnamo/UnrealEnginePython | tutorials/PlottingGraphsWithMatplotlibAndUnrealEnginePython_Assets/graph_texture.py | 3 | 1064 | import unreal_engine as ue
# EPixelFormat defines the various pixel formats for a texture/image, we will use RGBA with 8bit per channel
from unreal_engine.enums import EPixelFormat
import matplotlib
# set the Agg renderer as we do not need any toolkit
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# set texture/plot dimensions and dpi, ensure dpi is a float !
width = 1024
height = 1024
dpi = 72.0
# create a new figure with the specified sizes
fig = plt.figure(1)
fig.set_dpi(dpi)
fig.set_figwidth(width/dpi)
fig.set_figheight(height/dpi)
# plot a simple graph with a label on the y axis
plt.plot([1, 2, 3, 4])
plt.ylabel('some numbers')
# draw the graph (in memory)
fig.canvas.draw()
# create a texture in memory (will be saved later)
texture = ue.create_transient_texture(width, height, EPixelFormat.PF_R8G8B8A8)
# copy pixels from matplotlib canvas to the texture as RGBA
texture.texture_set_data(fig.canvas.buffer_rgba())
# save the texture
texture.save_package('/Game/FirstGraphTexture')
# open its editor
ue.open_editor_for_asset(texture)
| mit |
VoigtLab/dnaplotlib | gallery/repressilator_animate/repressilator_figure.py | 1 | 8758 | #!/usr/bin/env python
"""
Animation of the repressilator gene circuit
"""
import numpy as np
from scipy.integrate import odeint
import dnaplotlib
import matplotlib.pyplot as plt
from matplotlib import gridspec
__author__ = 'Emerson Glassey <[email protected]>, Voigt Lab, MIT'
__license__ = 'MIT'
__version__ = '1.0'
# Initialize Simulation
# Initial concentration of mRNA and Protein for each repressor
mtet, mlac, mgamma, tet, lac, gamma = initial = [1, 1, 1, 2, 1, 1]
# Non-dimensionalized production rate
alpha = 15
# Degradation Rate
beta = 2000
# Repressor/Promoter Leak
leak = 1
# Hill Coefficient
n = 8
# Initialize Parts
# tetr is orange [1.00, 0.75, 0.17]
# lacI is green [0.38, 0.82, 0.32]
# gamma is blue [0.38, 0.65, 0.87]
plac = {'name':'P_lac', 'start':1, 'end':10, 'type':'Promoter', 'opts': {'color':[0.38, 0.82, 0.32]}}
rbs1 = {'name':'RBS', 'start':11, 'end':20, 'type':'RBS', 'opts':{'linewidth': 0, 'color':[0.0, 0.0, 0.0]}}
tetr = {'name':'tetR', 'start':21, 'end':40, 'type':'CDS', 'opts':{'label': 'tetR', 'fontsize': 8, 'label_y_offset': 0, 'label_x_offset': -2, 'label_style':'italic', 'color':[1.00, 0.75, 0.17]}}
term1 = {'name':'Term', 'start':41, 'end':55, 'type':'Terminator'}
pgamma = {'name':'P_gamma', 'start':56, 'end':65, 'type':'Promoter', 'opts': {'color':[0.38, 0.65, 0.87]}}
rbs2 = {'name':'RBS', 'start':66, 'end':75, 'type':'RBS', 'opts':{'linewidth': 0, 'color':[0.0, 0.0, 0.0]}}
laci = {'name':'lacI', 'start':76, 'end':95, 'type':'CDS', 'opts':{'label': 'lacI', 'fontsize': 8, 'label_y_offset': 0, 'label_x_offset': -2, 'label_style':'italic', 'color':[0.38, 0.82, 0.32]}}
term2 = {'name':'Term', 'start':96, 'end':110, 'type':'Terminator'}
ptet = {'name':'P_tet', 'start':111, 'end':120, 'type':'Promoter', 'opts': {'color':[1.00, 0.75, 0.17]}}
rbs3 = {'name':'RBS', 'start':121, 'end':130, 'type':'RBS', 'opts':{'linewidth': 0, 'color':[0.0, 0.0, 0.0]}}
gamma = {'name':'gamma', 'start':131, 'end':150, 'type':'CDS', 'opts':{'label': 'gamma', 'fontsize': 8, 'label_y_offset': 0, 'label_x_offset': -1, 'label_style':'italic', 'color':[0.38, 0.65, 0.87]}}
term3 = {'name':'Term', 'start':151, 'end':165, 'type':'Terminator'}
lac_repress = {'from_part':laci, 'to_part':plac, 'type':'Repression', 'opts':{'linewidth':1, 'color':[0.38, 0.82, 0.32]}}
gamma_repress = {'from_part':gamma, 'to_part':pgamma, 'type':'Repression', 'opts':{'linewidth':1, 'color':[0.38, 0.65, 0.87]}}
tet_repress = {'from_part':tetr, 'to_part':ptet, 'type':'Repression', 'opts':{'linewidth':1, 'color':[1.00, 0.75, 0.17]}}
def repressilator(y, t):
mtet, mlac, mgamma, tet, lac, gamma = y
dmtet = -mtet + (alpha / (1 + lac**n)) + leak
dtet = -beta * (tet - mtet)
dmlac = -mlac + (alpha / (1 + gamma**n)) + leak
dlac = -beta * (lac - mlac)
dmgamma = -mgamma + (alpha / (1 + tet**n)) + leak
dgamma = -beta * (gamma - mgamma)
return [dmtet, dmlac, dmgamma, dtet, dlac, dgamma]
def repression(val, Kd, power):
"""Function takes a value and Kd. Function fits the value to a hill function with n=power and Kd
and returns the fraction bound."""
new_val = val**power / (Kd**power + val** power)
return new_val
def expression(val, lims):
"""function takes a value between two limits (as a tuple) and returns the value normalized
by the limits to be between 0 and 1"""
new_val = (val - lims[0]) / (lims[1] - lims[0])
return new_val
def rescale(val, lims):
"""function takes a value between 0 and 1 and normalizes it between the limits in lims"""
new_val = (val*(lims[1]-lims[0])) + lims[0]
return new_val
def plot_construct(ax, t, ymtet, ymlac, ymgamma, ytet, ylac, ygamma):
tind = int(t*10)
exp_lims = (1.0, 4.0)
ax.set_title('t = {}'.format(t), fontsize=8)
# Set color for each of the CDSs
tetr['opts']['color'] = [rescale(1 - expression(ymtet[tind], exp_lims), (1.0, 1.0)),
rescale(1 - expression(ymtet[tind], exp_lims), (0.75, 1.0)),
rescale(1 - expression(ymtet[tind], exp_lims), (0.17, 1.0))]
laci['opts']['color'] = [rescale(1 - expression(ymlac[tind], exp_lims), (0.38, 1.0)),
rescale(1 - expression(ymlac[tind], exp_lims), (0.82, 1.0)),
rescale(1 - expression(ymlac[tind], exp_lims), (0.32, 1.0))]
gamma['opts']['color'] = [rescale(1 - expression(ymgamma[tind], exp_lims), (0.38, 1.0)),
rescale(1 - expression(ymgamma[tind], exp_lims), (0.65, 1.0)),
rescale(1 - expression(ymgamma[tind], exp_lims), (0.87, 1.0))]
# Set transparency for each of the regulatory lines
lac_repress['opts']['color'] = [0.38, 0.82, 0.32,
rescale(repression(ylac[tind], 2.0, 8), (0.2, 1.0))]
gamma_repress['opts']['color'] = [0.38, 0.65, 0.87,
rescale(repression(ygamma[tind], 2.0, 8), (0.2, 1.0))]
tet_repress['opts']['color'] = [1.00, 0.75, 0.17,
rescale(repression(ytet[tind], 2.0, 8), (0.2, 1.0))]
# Set width for each of the regulatory lines
lac_repress['opts']['linewidth'] = rescale(repression(ylac[tind], 2.0, 8), (0.5, 2.0))
gamma_repress['opts']['linewidth'] = rescale(repression(ygamma[tind], 2.0, 8), (0.5, 2.0))
tet_repress['opts']['linewidth'] = rescale(repression(ytet[tind], 2.0, 8), (0.5, 2.0))
dnaplotlib.plot_sbol_designs([ax], [[plac, rbs1, tetr, term1, pgamma, rbs2, laci, term2, ptet, rbs3, gamma, term3]],
[[lac_repress, gamma_repress, tet_repress]])
ax.set_ylim([-10, 31])
def movie(ts, ymtet, ymlac, ymgamma, ytet, ylac, ygamma):
for t in ts:
plt.close()
plt.figure(figsize=(4, 3.5))
gs = gridspec.GridSpec(3, 1, height_ratios=[2, 0.5, 1])
ax = plt.subplot(gs[0])
plt.plot(ts[:int(t*10)+1], ytet[:int(t*10)+1], color=[1.00, 0.75, 0.17])
plt.plot(ts[:int(t*10)+1], ylac[:int(t*10)+1], color=[0.38, 0.82, 0.32])
plt.plot(ts[:int(t*10)+1], ygamma[:int(t*10)+1], color=[0.38, 0.65, 0.87])
plt.xlim([0, 30])
plt.ylim([1,4])
ax.tick_params(axis='both', labelsize=8, width=0.8, length=3)
ax.yaxis.tick_left()
ax.xaxis.tick_bottom()
ax.set_xlabel('Time', fontsize=8, labelpad=3)
ax.set_ylabel('Protein Concentration', fontsize=8, labelpad=4)
plt.legend(['tetR', 'lacI', 'gamma'], frameon=False, fontsize=8, labelspacing=0.15, loc=(0.03,0.65))
plt.plot(ts[int(t*10)], ytet[int(t*10)], '.', color=[1.00, 0.75, 0.17], markersize=6.0)
plt.plot(ts[int(t*10)], ylac[int(t*10)], '.', color=[0.38, 0.82, 0.32], markersize=6.0)
plt.plot(ts[int(t*10)], ygamma[int(t*10)], '.', color=[0.38, 0.65, 0.87], markersize=6.0)
ax = plt.subplot(gs[2])
plot_construct(ax, t, ymtet, ymlac, ymgamma, ytet, ylac, ygamma)
plt.savefig("movie/repressilator_t{}.jpg".format(t), dpi=300)
def main():
t = np.arange(0, 30.1, 0.1)
ymtet, ymlac, ymgamma, ytet, ylac, ygamma = list(zip(*odeint(repressilator, initial, t)))
plt.close()
plt.figure(figsize=(3.5, 6.5))
gs = gridspec.GridSpec(8, 1, height_ratios=[1, 2.5, 0.1, 1, 1, 1, 1, 1])
# Plot of repressilator circuit
ax = plt.subplot(gs[0])
dnaplotlib.plot_sbol_designs([ax], [[plac, rbs1, tetr, term1, pgamma, rbs2, laci, term2, ptet, rbs3, gamma, term3]],
[[lac_repress, gamma_repress, tet_repress]])
ax.set_ylim([-10, 31])
# Plot of repressilator dynamics
ax = plt.subplot(gs[1])
plt.plot(t, ytet, color=[1.00, 0.75, 0.17])
plt.plot(t, ylac, color=[0.38, 0.82, 0.32])
plt.plot(t, ygamma, color=[0.38, 0.65, 0.87])
plt.axvline(x=1, color='k', linewidth=0.7)
plt.axvline(x=12, color='k', linewidth=0.7)
plt.axvline(x=25.3, color='k', linewidth=0.7)
plt.axvline(x=27.3, color='k', linewidth=0.7)
plt.axvline(x=29.4, color='k', linewidth=0.7)
plt.ylim([1,4])
ax.tick_params(axis='both', labelsize=8, width=0.8, length=3)
ax.yaxis.tick_left()
ax.xaxis.tick_bottom()
ax.set_xlabel('Time', fontsize=8, labelpad=1)
ax.set_ylabel('Protein Concentration', fontsize=8, labelpad=2)
plt.legend(['tetR', 'lacI', 'gamma'], frameon=False, fontsize=8, labelspacing=0.15, loc=(0.06,0.65))
# Plot of each timepoint
ax = plt.subplot(gs[3])
plot_construct(ax, 1, ymtet, ymlac, ymgamma, ytet, ylac, ygamma)
ax = plt.subplot(gs[4])
plot_construct(ax, 12, ymtet, ymlac, ymgamma, ytet, ylac, ygamma)
ax = plt.subplot(gs[5])
plot_construct(ax, 25.3, ymtet, ymlac, ymgamma, ytet, ylac, ygamma)
ax = plt.subplot(gs[6])
plot_construct(ax, 27.3, ymtet, ymlac, ymgamma, ytet, ylac, ygamma)
ax = plt.subplot(gs[7])
plot_construct(ax, 29.4, ymtet, ymlac, ymgamma, ytet, ylac, ygamma)
# Update subplot spacing
plt.subplots_adjust(hspace=0.4, left=0.12, right=0.95, top=0.99, bottom=0.01)
# Save the figure
plt.savefig('repressilator_animate.pdf', transparent=True)
plt.savefig('repressilator_animate.png', dpi=300)
# Generate the movie frames
movie(t, ymtet, ymlac, ymgamma, ytet, ylac, ygamma)
if __name__ == '__main__':
main()
| mit |
mehdidc/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.