repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
xuewei4d/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 4 | 37254 | import sys
from io import StringIO
import numpy as np
from numpy.testing import assert_allclose
import scipy.sparse as sp
import pytest
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors import kneighbors_graph
from sklearn.exceptions import EfficiencyWarning
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import skip_if_32bit
from sklearn.utils import check_random_state
from sklearn.manifold._t_sne import _joint_probabilities
from sklearn.manifold._t_sne import _joint_probabilities_nn
from sklearn.manifold._t_sne import _kl_divergence
from sklearn.manifold._t_sne import _kl_divergence_bh
from sklearn.manifold._t_sne import _gradient_descent
from sklearn.manifold._t_sne import trustworthiness
from sklearn.manifold import TSNE
# mypy error: Module 'sklearn.manifold' has no attribute '_barnes_hut_tsne'
from sklearn.manifold import _barnes_hut_tsne # type: ignore
from sklearn.manifold._utils import _binary_search_perplexity
from sklearn.datasets import make_blobs
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import cosine_distances
x = np.linspace(0, 1, 10)
xx, yy = np.meshgrid(x, x)
X_2d_grid = np.hstack([
xx.ravel().reshape(-1, 1),
yy.ravel().reshape(-1, 1),
])
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _, compute_error=True):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_, compute_error=True):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert error == 1.0
assert it == 0
assert("gradient norm" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert error == 0.0
assert it == 11
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert error == 0.0
assert it == 10
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
data = random_state.randn(50, 5)
distances = pairwise_distances(data).astype(np.float32)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 200
desired_perplexity = 25.0
random_state = check_random_state(0)
data = random_state.randn(n_samples, 2).astype(np.float32, copy=False)
distances = pairwise_distances(data)
P1 = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
# Test that when we use all the neighbors the results are identical
n_neighbors = n_samples - 1
nn = NearestNeighbors().fit(data)
distance_graph = nn.kneighbors_graph(n_neighbors=n_neighbors,
mode='distance')
distances_nn = distance_graph.data.astype(np.float32, copy=False)
distances_nn = distances_nn.reshape(n_samples, n_neighbors)
P2 = _binary_search_perplexity(distances_nn, desired_perplexity, verbose=0)
indptr = distance_graph.indptr
P1_nn = np.array([P1[k, distance_graph.indices[indptr[k]:indptr[k + 1]]]
for k in range(n_samples)])
assert_array_almost_equal(P1_nn, P2, decimal=4)
# Test that the highest P_ij are the same when fewer neighbors are used
for k in np.linspace(150, n_samples - 1, 5):
k = int(k)
topn = k * 10 # check the top 10 * k entries out of k * k entries
distance_graph = nn.kneighbors_graph(n_neighbors=k, mode='distance')
distances_nn = distance_graph.data.astype(np.float32, copy=False)
distances_nn = distances_nn.reshape(n_samples, k)
P2k = _binary_search_perplexity(distances_nn, desired_perplexity,
verbose=0)
assert_array_almost_equal(P1_nn, P2, decimal=2)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
idx = np.argsort(P2k.ravel())[::-1]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
n_neighbors = 10
n_samples = 100
random_state = check_random_state(0)
data = random_state.randn(n_samples, 5)
nn = NearestNeighbors().fit(data)
distance_graph = nn.kneighbors_graph(n_neighbors=n_neighbors,
mode='distance')
distances = distance_graph.data.astype(np.float32, copy=False)
distances = distances.reshape(n_samples, n_neighbors)
last_P = None
desired_perplexity = 3
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), desired_perplexity,
verbose=0)
P1 = _joint_probabilities_nn(distance_graph, desired_perplexity,
verbose=0)
# Convert the sparse matrix to a dense one for testing
P1 = P1.toarray()
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components).astype(np.float32)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
def fun(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[0]
def grad(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert trustworthiness(X, 5.0 + X / 10.0) == 1.0
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert trustworthiness(X, X_embedded) < 0.6
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
@pytest.mark.parametrize("method", ['exact', 'barnes_hut'])
@pytest.mark.parametrize("init", ('random', 'pca'))
def test_preserve_trustworthiness_approximately(method, init):
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
n_components = 2
X = random_state.randn(50, n_components).astype(np.float32)
tsne = TSNE(n_components=n_components, init=init, random_state=0,
method=method, n_iter=700)
X_embedded = tsne.fit_transform(X)
t = trustworthiness(X, X_embedded, n_neighbors=1)
assert t > 0.85
def test_optimization_minimizes_kl_divergence():
"""t-SNE should give a lower KL divergence with more iterations."""
random_state = check_random_state(0)
X, _ = make_blobs(n_features=3, random_state=random_state)
kl_divergences = []
for n_iter in [250, 300, 350]:
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
n_iter=n_iter, random_state=0)
tsne.fit_transform(X)
kl_divergences.append(tsne.kl_divergence_)
assert kl_divergences[1] <= kl_divergences[0]
assert kl_divergences[2] <= kl_divergences[1]
@pytest.mark.parametrize('method', ['exact', 'barnes_hut'])
def test_fit_csr_matrix(method):
# X can be a sparse matrix.
rng = check_random_state(0)
X = rng.randn(50, 2)
X[(rng.randint(0, 50, 25), rng.randint(0, 2, 25))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0, method=method, n_iter=750)
X_embedded = tsne.fit_transform(X_csr)
assert_allclose(trustworthiness(X_csr, X_embedded, n_neighbors=1),
1.0, rtol=1.1e-1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
for i in range(3):
X = random_state.randn(80, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
early_exaggeration=2.0, metric="precomputed",
random_state=i, verbose=0, n_iter=500,
square_distances=True)
X_embedded = tsne.fit_transform(D)
t = trustworthiness(D, X_embedded, n_neighbors=1, metric="precomputed")
assert t > .95
def test_trustworthiness_not_euclidean_metric():
# Test trustworthiness with a metric different from 'euclidean' and
# 'precomputed'
random_state = check_random_state(0)
X = random_state.randn(100, 2)
assert (trustworthiness(X, X, metric='cosine') ==
trustworthiness(pairwise_distances(X, metric='cosine'), X,
metric='precomputed'))
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
with pytest.raises(ValueError, match="early_exaggeration .*"):
tsne.fit_transform(np.array([[0.0], [0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
with pytest.raises(ValueError, match="n_iter .*"):
tsne.fit_transform(np.array([[0.0], [0.0]]))
@pytest.mark.parametrize('method, retype', [
('exact', np.asarray),
('barnes_hut', np.asarray),
('barnes_hut', sp.csr_matrix),
])
@pytest.mark.parametrize('D, message_regex', [
([[0.0], [1.0]], ".* square distance matrix"),
([[0., -1.], [1., 0.]], ".* positive.*"),
])
def test_bad_precomputed_distances(method, D, retype, message_regex):
tsne = TSNE(metric="precomputed", method=method,
square_distances=True)
with pytest.raises(ValueError, match=message_regex):
tsne.fit_transform(retype(D))
def test_exact_no_precomputed_sparse():
tsne = TSNE(metric='precomputed', method='exact', square_distances=True)
with pytest.raises(TypeError, match='sparse'):
tsne.fit_transform(sp.csr_matrix([[0, 5], [5, 0]]))
def test_high_perplexity_precomputed_sparse_distances():
# Perplexity should be less than 50
dist = np.array([[1., 0., 0.], [0., 1., 0.], [1., 0., 0.]])
bad_dist = sp.csr_matrix(dist)
tsne = TSNE(metric="precomputed", square_distances=True)
msg = "3 neighbors per samples are required, but some samples have only 1"
with pytest.raises(ValueError, match=msg):
tsne.fit_transform(bad_dist)
@ignore_warnings(category=EfficiencyWarning)
def test_sparse_precomputed_distance():
"""Make sure that TSNE works identically for sparse and dense matrix"""
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D_sparse = kneighbors_graph(X, n_neighbors=100, mode='distance',
include_self=True)
D = pairwise_distances(X)
assert sp.issparse(D_sparse)
assert_almost_equal(D_sparse.A, D)
tsne = TSNE(metric="precomputed", random_state=0, square_distances=True)
Xt_dense = tsne.fit_transform(D)
for fmt in ['csr', 'lil']:
Xt_sparse = tsne.fit_transform(D_sparse.asformat(fmt))
assert_almost_equal(Xt_dense, Xt_sparse)
def test_non_positive_computed_distances():
# Computed distance matrices must be positive.
def metric(x, y):
return -1
# Negative computed distances should be caught even if result is squared
tsne = TSNE(metric=metric, method='exact', square_distances=True)
X = np.array([[0.0, 0.0], [1.0, 1.0]])
with pytest.raises(ValueError, match="All distances .*metric given.*"):
tsne.fit_transform(X)
def test_init_not_available():
# 'init' must be 'pca', 'random', or numpy array.
tsne = TSNE(init="not available")
m = "'init' must be 'pca', 'random', or a numpy array"
with pytest.raises(ValueError, match=m):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_init_ndarray():
# Initialize TSNE with ndarray and test fit
tsne = TSNE(init=np.zeros((100, 2)))
X_embedded = tsne.fit_transform(np.ones((100, 5)))
assert_array_equal(np.zeros((100, 2)), X_embedded)
def test_init_ndarray_precomputed():
# Initialize TSNE with ndarray and metric 'precomputed'
# Make sure no FutureWarning is thrown from _fit
tsne = TSNE(init=np.zeros((100, 2)), metric="precomputed",
square_distances=True)
tsne.fit(np.zeros((100, 100)))
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available", method='exact', square_distances=True)
with pytest.raises(ValueError, match="Unknown metric not available.*"):
tsne.fit_transform(np.array([[0.0], [1.0]]))
tsne = TSNE(metric="not available", method='barnes_hut',
square_distances=True)
with pytest.raises(ValueError, match="Metric 'not available' not valid.*"):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_method_not_available():
# 'nethod' must be 'barnes_hut' or 'exact'
tsne = TSNE(method='not available')
with pytest.raises(ValueError, match="'method' must be 'barnes_hut' or "):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_square_distances_not_available():
# square_distances must be True or 'legacy'.
tsne = TSNE(square_distances="not_available")
with pytest.raises(ValueError, match="'square_distances' must be True or"):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_angle_out_of_range_checks():
# check the angle parameter range
for angle in [-1, -1e-6, 1 + 1e-6, 2]:
tsne = TSNE(angle=angle)
with pytest.raises(ValueError, match="'angle' must be between "
"0.0 - 1.0"):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca", square_distances=True)
with pytest.raises(ValueError, match="The parameter init=\"pca\" cannot"
" be used with"
" metric=\"precomputed\"."):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_n_components_range():
# barnes_hut method should only be used with n_components <= 3
tsne = TSNE(n_components=4, method="barnes_hut")
with pytest.raises(ValueError, match="'n_components' should be .*"):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_early_exaggeration_used():
# check that the ``early_exaggeration`` parameter has an effect
random_state = check_random_state(0)
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(25, n_components).astype(np.float32)
for method in methods:
tsne = TSNE(n_components=n_components, perplexity=1,
learning_rate=100.0, init="pca", random_state=0,
method=method, early_exaggeration=1.0, n_iter=250)
X_embedded1 = tsne.fit_transform(X)
tsne = TSNE(n_components=n_components, perplexity=1,
learning_rate=100.0, init="pca", random_state=0,
method=method, early_exaggeration=10.0, n_iter=250)
X_embedded2 = tsne.fit_transform(X)
assert not np.allclose(X_embedded1, X_embedded2)
def test_n_iter_used():
# check that the ``n_iter`` parameter has an effect
random_state = check_random_state(0)
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(25, n_components).astype(np.float32)
for method in methods:
for n_iter in [251, 500]:
tsne = TSNE(n_components=n_components, perplexity=1,
learning_rate=0.5, init="random", random_state=0,
method=method, early_exaggeration=1.0, n_iter=n_iter)
tsne.fit_transform(X)
assert tsne.n_iter_ == n_iter - 1
def test_answer_gradient_two_points():
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array([[-4.961291e-05, -1.072243e-04],
[9.259460e-05, 2.702024e-04]])
neighbors = np.array([[1],
[0]])
grad_output = np.array([[-2.37012478e-05, -6.29044398e-05],
[2.37012478e-05, 6.29044398e-05]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_answer_gradient_four_points():
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_skip_num_points_gradient():
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output,
False, 0.1, 2)
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64, copy=False)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
from scipy.sparse import csr_matrix
P = csr_matrix(pij_input)
neighbors = P.indices.astype(np.int64)
indptr = P.indptr.astype(np.int64)
_barnes_hut_tsne.gradient(P.data, pos_output, neighbors, indptr,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("nearest neighbors..." in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("early exaggeration" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev", square_distances=True)
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
@pytest.mark.parametrize('method', ['barnes_hut', 'exact'])
@pytest.mark.parametrize('dt', [np.float32, np.float64])
def test_64bit(method, dt):
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
X = random_state.randn(10, 2).astype(dt, copy=False)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method, verbose=0,
n_iter=300)
X_embedded = tsne.fit_transform(X)
effective_type = X_embedded.dtype
# tsne cython code is only single precision, so the output will
# always be single precision, irrespectively of the input dtype
assert effective_type == np.float32
@pytest.mark.parametrize('method', ['barnes_hut', 'exact'])
def test_kl_divergence_not_nan(method):
# Ensure kl_divergence_ is computed at last iteration
# even though n_iter % n_iter_check != 0, i.e. 1003 % 50 != 0
random_state = check_random_state(0)
X = random_state.randn(50, 2)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method, verbose=0, n_iter=503)
tsne.fit_transform(X)
assert not np.isnan(tsne.kl_divergence_)
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
data = random_state.randn(n_samples, n_features)
distances = pairwise_distances(data)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, verbose=0)
kl_exact, grad_exact = _kl_divergence(params, P, degrees_of_freedom,
n_samples, n_components)
n_neighbors = n_samples - 1
distances_csr = NearestNeighbors().fit(data).kneighbors_graph(
n_neighbors=n_neighbors, mode='distance')
P_bh = _joint_probabilities_nn(distances_csr, perplexity, verbose=0)
kl_bh, grad_bh = _kl_divergence_bh(params, P_bh, degrees_of_freedom,
n_samples, n_components,
angle=angle, skip_num_points=0,
verbose=0)
P = squareform(P)
P_bh = P_bh.toarray()
assert_array_almost_equal(P_bh, P, decimal=5)
assert_almost_equal(kl_exact, kl_bh, decimal=3)
@skip_if_32bit
def test_n_iter_without_progress():
# Use a dummy negative n_iter_without_progress and check output on stdout
random_state = check_random_state(0)
X = random_state.randn(100, 10)
for method in ["barnes_hut", "exact"]:
tsne = TSNE(n_iter_without_progress=-1, verbose=2, learning_rate=1e8,
random_state=0, method=method, n_iter=351, init="random")
tsne._N_ITER_CHECK = 1
tsne._EXPLORATION_N_ITER = 0
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the value of n_iter_without_progress
assert ("did not make any progress during the "
"last -1 episodes. Finished." in out)
def test_min_grad_norm():
# Make sure that the parameter min_grad_norm is used correctly
random_state = check_random_state(0)
X = random_state.randn(100, 2)
min_grad_norm = 0.002
tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2,
random_state=0, method='exact')
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
lines_out = out.split('\n')
# extract the gradient norm from the verbose output
gradient_norm_values = []
for line in lines_out:
# When the computation is Finished just an old gradient norm value
# is repeated that we do not need to store
if 'Finished' in line:
break
start_grad_norm = line.find('gradient norm')
if start_grad_norm >= 0:
line = line[start_grad_norm:]
line = line.replace('gradient norm = ', '').split(' ')[0]
gradient_norm_values.append(float(line))
# Compute how often the gradient norm is smaller than min_grad_norm
gradient_norm_values = np.array(gradient_norm_values)
n_smaller_gradient_norms = \
len(gradient_norm_values[gradient_norm_values <= min_grad_norm])
# The gradient norm can be smaller than min_grad_norm at most once,
# because in the moment it becomes smaller the optimization stops
assert n_smaller_gradient_norms <= 1
def test_accessible_kl_divergence():
# Ensures that the accessible kl_divergence matches the computed value
random_state = check_random_state(0)
X = random_state.randn(50, 2)
tsne = TSNE(n_iter_without_progress=2, verbose=2,
random_state=0, method='exact',
n_iter=500)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
# The output needs to contain the accessible kl_divergence as the error at
# the last iteration
for line in out.split('\n')[::-1]:
if 'Iteration' in line:
_, _, error = line.partition('error = ')
if error:
error, _, _ = error.partition(',')
break
assert_almost_equal(tsne.kl_divergence_, float(error), decimal=5)
@pytest.mark.parametrize('method', ['barnes_hut', 'exact'])
def test_uniform_grid(method):
"""Make sure that TSNE can approximately recover a uniform 2D grid
Due to ties in distances between point in X_2d_grid, this test is platform
dependent for ``method='barnes_hut'`` due to numerical imprecision.
Also, t-SNE is not assured to converge to the right solution because bad
initialization can lead to convergence to bad local minimum (the
optimization problem is non-convex). To avoid breaking the test too often,
we re-run t-SNE from the final point when the convergence is not good
enough.
"""
seeds = range(3)
n_iter = 500
for seed in seeds:
tsne = TSNE(n_components=2, init='random', random_state=seed,
perplexity=50, n_iter=n_iter, method=method)
Y = tsne.fit_transform(X_2d_grid)
try_name = "{}_{}".format(method, seed)
try:
assert_uniform_grid(Y, try_name)
except AssertionError:
# If the test fails a first time, re-run with init=Y to see if
# this was caused by a bad initialization. Note that this will
# also run an early_exaggeration step.
try_name += ":rerun"
tsne.init = Y
Y = tsne.fit_transform(X_2d_grid)
assert_uniform_grid(Y, try_name)
def assert_uniform_grid(Y, try_name=None):
# Ensure that the resulting embedding leads to approximately
# uniformly spaced points: the distance to the closest neighbors
# should be non-zero and approximately constant.
nn = NearestNeighbors(n_neighbors=1).fit(Y)
dist_to_nn = nn.kneighbors(return_distance=True)[0].ravel()
assert dist_to_nn.min() > 0.1
smallest_to_mean = dist_to_nn.min() / np.mean(dist_to_nn)
largest_to_mean = dist_to_nn.max() / np.mean(dist_to_nn)
assert smallest_to_mean > .5, try_name
assert largest_to_mean < 2, try_name
def test_bh_match_exact():
# check that the ``barnes_hut`` method match the exact one when
# ``angle = 0`` and ``perplexity > n_samples / 3``
random_state = check_random_state(0)
n_features = 10
X = random_state.randn(30, n_features).astype(np.float32)
X_embeddeds = {}
n_iter = {}
for method in ['exact', 'barnes_hut']:
tsne = TSNE(n_components=2, method=method, learning_rate=1.0,
init="random", random_state=0, n_iter=251,
perplexity=30.0, angle=0)
# Kill the early_exaggeration
tsne._EXPLORATION_N_ITER = 0
X_embeddeds[method] = tsne.fit_transform(X)
n_iter[method] = tsne.n_iter_
assert n_iter['exact'] == n_iter['barnes_hut']
assert_allclose(X_embeddeds['exact'], X_embeddeds['barnes_hut'], rtol=1e-4)
def test_gradient_bh_multithread_match_sequential():
# check that the bh gradient with different num_threads gives the same
# results
n_features = 10
n_samples = 30
n_components = 2
degrees_of_freedom = 1
angle = 3
perplexity = 5
random_state = check_random_state(0)
data = random_state.randn(n_samples, n_features).astype(np.float32)
params = random_state.randn(n_samples, n_components)
n_neighbors = n_samples - 1
distances_csr = NearestNeighbors().fit(data).kneighbors_graph(
n_neighbors=n_neighbors, mode='distance')
P_bh = _joint_probabilities_nn(distances_csr, perplexity, verbose=0)
kl_sequential, grad_sequential = _kl_divergence_bh(
params, P_bh, degrees_of_freedom, n_samples, n_components,
angle=angle, skip_num_points=0, verbose=0, num_threads=1)
for num_threads in [2, 4]:
kl_multithread, grad_multithread = _kl_divergence_bh(
params, P_bh, degrees_of_freedom, n_samples, n_components,
angle=angle, skip_num_points=0, verbose=0, num_threads=num_threads)
assert_allclose(kl_multithread, kl_sequential, rtol=1e-6)
assert_allclose(grad_multithread, grad_multithread)
def test_tsne_with_different_distance_metrics():
"""Make sure that TSNE works for different distance metrics"""
random_state = check_random_state(0)
n_components_original = 3
n_components_embedding = 2
X = random_state.randn(50, n_components_original).astype(np.float32)
metrics = ['manhattan', 'cosine']
dist_funcs = [manhattan_distances, cosine_distances]
for metric, dist_func in zip(metrics, dist_funcs):
X_transformed_tsne = TSNE(
metric=metric, n_components=n_components_embedding,
random_state=0, n_iter=300, square_distances=True).fit_transform(X)
X_transformed_tsne_precomputed = TSNE(
metric='precomputed', n_components=n_components_embedding,
random_state=0, n_iter=300,
square_distances=True).fit_transform(dist_func(X))
assert_array_equal(X_transformed_tsne, X_transformed_tsne_precomputed)
@pytest.mark.parametrize('method', ['exact', 'barnes_hut'])
@pytest.mark.parametrize('metric', ['euclidean', 'manhattan'])
@pytest.mark.parametrize('square_distances', [True, 'legacy'])
@ignore_warnings(category=FutureWarning)
def test_tsne_different_square_distances(method, metric, square_distances):
# Make sure that TSNE works for different square_distances settings
# FIXME remove test when square_distances=True becomes the default in 1.1
random_state = check_random_state(0)
n_components_original = 3
n_components_embedding = 2
# Used to create data with structure; this avoids unstable behavior in TSNE
X, _ = make_blobs(n_features=n_components_original,
random_state=random_state)
X_precomputed = pairwise_distances(X, metric=metric)
if metric == 'euclidean' and square_distances == 'legacy':
X_precomputed **= 2
X_transformed_tsne = TSNE(
metric=metric, n_components=n_components_embedding,
square_distances=square_distances, method=method,
random_state=0).fit_transform(X)
X_transformed_tsne_precomputed = TSNE(
metric='precomputed', n_components=n_components_embedding,
square_distances=square_distances, method=method,
random_state=0).fit_transform(X_precomputed)
assert_allclose(X_transformed_tsne, X_transformed_tsne_precomputed)
@pytest.mark.parametrize('metric', ['euclidean', 'manhattan'])
@pytest.mark.parametrize('square_distances', [True, 'legacy'])
def test_tsne_square_distances_futurewarning(metric, square_distances):
# Make sure that a FutureWarning is only raised when a non-Euclidean
# metric is specified and square_distances is not set to True.
random_state = check_random_state(0)
X = random_state.randn(5, 2)
tsne = TSNE(metric=metric, square_distances=square_distances)
if metric != 'euclidean' and square_distances is not True:
with pytest.warns(FutureWarning, match="'square_distances'.*"):
tsne.fit_transform(X)
else:
with pytest.warns(None) as record:
tsne.fit_transform(X)
assert not record
@pytest.mark.parametrize('method', ['exact', 'barnes_hut'])
def test_tsne_n_jobs(method):
"""Make sure that the n_jobs parameter doesn't impact the output"""
random_state = check_random_state(0)
n_features = 10
X = random_state.randn(30, n_features)
X_tr_ref = TSNE(n_components=2, method=method, perplexity=30.0,
angle=0, n_jobs=1, random_state=0).fit_transform(X)
X_tr = TSNE(n_components=2, method=method, perplexity=30.0,
angle=0, n_jobs=2, random_state=0).fit_transform(X)
assert_allclose(X_tr_ref, X_tr)
| bsd-3-clause |
dwiajik/twit-macet-mining-v3 | svm.py | 1 | 3582 | import csv
from os.path import dirname, join, exists
from random import shuffle
import resource
import time
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import precision_recall_fscore_support
from sklearn.svm import LinearSVC
from modules.cleaner import clean
## Initialize categories and count vectorizer
categories = ['traffic', 'non_traffic']
count_vect = CountVectorizer(preprocessor=clean)
calculations = [
'cosine',
'dice',
'jaccard',
'overlap',
'lcs',
]
thresholds = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
results = []
for calculation in calculations:
for threshold in thresholds:
print('{} - {}'.format(calculation, threshold))
if (exists('result/generated_datasets/{}/{}/traffic.csv'.format(calculation, threshold))):
## Open training set
with open(join(dirname(__file__), 'result/generated_datasets/{}/{}/traffic.csv'.format(calculation, threshold)), newline='\n') as csv_input:
dataset = csv.reader(csv_input, delimiter=',', quotechar='"')
traffic_tweets = [line[0] for line in dataset]
with open(join(dirname(__file__), 'result/generated_datasets/{}/{}/non_traffic.csv'.format(calculation, threshold)), newline='\n') as csv_input:
dataset = csv.reader(csv_input, delimiter=',', quotechar='"')
non_traffic_tweets = [line[0] for line in dataset]
tweets = {
'data': traffic_tweets + non_traffic_tweets,
'target': [True] * len(traffic_tweets) + [False] * len(non_traffic_tweets),
}
training_vectors = count_vect.fit_transform(tweets['data'])
# print(training_vectors.shape)
# print(len(tweets['target']))
# start_time = time.clock()
clf = LinearSVC(max_iter=10000).fit(training_vectors, tweets['target'])
# training_time = round(time.clock() - start_time, 2) / 100
# print(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
## Open Test set
with open(join(dirname(__file__), 'tweets_corpus/test_set_10000.csv'), newline='\n') as csv_input:
dataset = csv.reader(csv_input, delimiter=',', quotechar='"')
dataset = [(line[0], line[1]) for line in dataset]
shuffle(dataset)
test = {
'data': [line[0] for line in dataset],
'target': [line[1] == 'traffic' for line in dataset],
}
test_vectors = count_vect.transform(test['data'])
predicted = clf.predict(test_vectors)
accuracy = np.mean(predicted == test['target'])
prfs = precision_recall_fscore_support(test['target'], predicted)
# print('Training time: {}'.format(training_time))
print('Accuracy: {}'.format(accuracy))
print('Precision: {}'.format(prfs[0][0]))
print('Recall: {}'.format(prfs[1][0]))
print('F-score: {}'.format(prfs[2][0]))
results.append((calculation, threshold, accuracy, prfs[0][0], prfs[1][0], prfs[2][0]))
# for doc, category in zip(test_doc, predicted):
# print('%r => %s' % (doc, categories[category]))
with open(join(dirname(__file__), 'svm_test.csv'), 'a', newline='\n') as csv_output:
csv_writer = csv.writer(csv_output, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
for result in results:
csv_writer.writerow(result) | mit |
zihua/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 110 | 3768 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
anurag313/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
tjrivera/PyCap | test/test_project.py | 1 | 12505 | #! /usr/bin/env python
import unittest
from redcap import Project, RedcapError
import semantic_version
skip_pd = False
try:
import pandas as pd
except ImportError:
skip_pd = True
class ProjectTests(unittest.TestCase):
"""docstring for ProjectTests"""
url = 'https://redcap.vanderbilt.edu/api/'
bad_url = 'https://redcap.vanderbilt.edu/api'
reg_token = '8E66DB6844D58E990075AFB51658A002'
long_proj = Project(url, '1387872621BBF1C17CC47FD8AE25FF54')
reg_proj = Project(url, reg_token)
ssl_proj = Project(url, reg_token, verify_ssl=False)
survey_proj = Project(url, '37CAB1ABC2FEB3BB9D821DF13BA38A7B')
def setUp(self):
pass
def tearDown(self):
pass
def test_good_init(self):
"""Ensure basic instantiation """
self.assertIsInstance(self.long_proj, Project)
self.assertIsInstance(self.reg_proj, Project)
self.assertIsInstance(self.ssl_proj, Project)
def test_normal_attrs(self):
"""Ensure projects are created with all normal attrs"""
for attr in ('metadata', 'field_names', 'field_labels', 'forms',
'events', 'arm_names', 'arm_nums', 'def_field'):
self.assertTrue(hasattr(self.reg_proj, attr))
def test_long_attrs(self):
"proj.events/arm_names/arm_nums should not be empty in long projects"
self.assertIsNotNone(self.long_proj.events)
self.assertIsNotNone(self.long_proj.arm_names)
self.assertIsNotNone(self.long_proj.arm_nums)
def test_is_longitudinal(self):
"Test the is_longitudinal method"
self.assertFalse(self.reg_proj.is_longitudinal())
self.assertTrue(self.long_proj.is_longitudinal())
def test_regular_attrs(self):
"""proj.events/arm_names/arm_nums should be empty tuples"""
for attr in 'events', 'arm_names', 'arm_nums':
attr_obj = getattr(self.reg_proj, attr)
self.assertIsNotNone(attr_obj)
self.assertEqual(len(attr_obj), 0)
def test_json_export(self):
""" Make sure we get a list of dicts"""
data = self.reg_proj.export_records()
self.assertIsInstance(data, list)
for record in data:
self.assertIsInstance(record, dict)
def test_long_export(self):
"""After determining a unique event name, make sure we get a
list of dicts"""
unique_event = self.long_proj.events[0]['unique_event_name']
data = self.long_proj.export_records(events=[unique_event])
self.assertIsInstance(data, list)
for record in data:
self.assertIsInstance(record, dict)
def test_import_records(self):
"Test record import"
data = self.reg_proj.export_records()
response = self.reg_proj.import_records(data)
self.assertIn('count', response)
self.assertNotIn('error', response)
def test_import_exception(self):
"Test record import throws RedcapError for bad import"
data = self.reg_proj.export_records()
data[0]['non_existent_key'] = 'foo'
with self.assertRaises(RedcapError) as cm:
self.reg_proj.import_records(data)
exc = cm.exception
self.assertIn('error', exc.args[0])
def is_good_csv(self, csv_string):
"Helper to test csv strings"
return isinstance(csv_string, basestring)
def test_csv_export(self):
"""Test valid csv export """
csv = self.reg_proj.export_records(format='csv')
self.assertTrue(self.is_good_csv(csv))
def test_metadata_export(self):
"""Test valid metadata csv export"""
csv = self.reg_proj.export_metadata(format='csv')
self.assertTrue(self.is_good_csv(csv))
def test_bad_creds(self):
"Test that exceptions are raised with bad URL or tokens"
with self.assertRaises(RedcapError):
Project(self.bad_url, self.reg_token)
with self.assertRaises(RedcapError):
Project(self.url, '1')
def test_fem_export(self):
""" Test fem export in json format gives list of dicts"""
fem = self.long_proj.export_fem(format='json')
self.assertIsInstance(fem, list)
for arm in fem:
self.assertIsInstance(arm, dict)
def test_file_export(self):
"""Test file export and proper content-type parsing"""
record, field = '1', 'file'
#Upload first to make sure file is there
self.import_file()
# Now export it
content, headers = self.reg_proj.export_file(record, field)
self.assertIsInstance(content, basestring)
# We should at least get the filename in the headers
for key in ['name']:
self.assertIn(key, headers)
# needs to raise ValueError for exporting non-file fields
with self.assertRaises(ValueError):
self.reg_proj.export_file(record=record, field='dob')
# Delete and make sure we get an RedcapError with next export
self.reg_proj.delete_file(record, field)
with self.assertRaises(RedcapError):
self.reg_proj.export_file(record, field)
def import_file(self):
upload_fname = self.upload_fname()
with open(upload_fname, 'r') as fobj:
response = self.reg_proj.import_file('1', 'file', upload_fname, fobj)
return response
def upload_fname(self):
import os
this_dir, this_fname = os.path.split(__file__)
return os.path.join(this_dir, 'data.txt')
def test_file_import(self):
"Test file import"
# Make sure a well-formed request doesn't throw RedcapError
try:
response = self.import_file()
except RedcapError:
self.fail("Shouldn't throw RedcapError for successful imports")
self.assertTrue('error' not in response)
# Test importing a file to a non-file field raises a ValueError
fname = self.upload_fname()
with open(fname, 'r') as fobj:
with self.assertRaises(ValueError):
response = self.reg_proj.import_file('1', 'first_name',
fname, fobj)
def test_file_delete(self):
"Test file deletion"
# upload a file
fname = self.upload_fname()
with open(fname, 'r') as fobj:
self.reg_proj.import_file('1', 'file', fname, fobj)
# make sure deleting doesn't raise
try:
self.reg_proj.delete_file('1', 'file')
except RedcapError:
self.fail("Shouldn't throw RedcapError for successful deletes")
def test_user_export(self):
"Test user export"
users = self.reg_proj.export_users()
# A project must have at least one user
self.assertTrue(len(users) > 0)
req_keys = ['firstname', 'lastname', 'email', 'username',
'expiration', 'data_access_group', 'data_export',
'forms']
for user in users:
for key in req_keys:
self.assertIn(key, user)
def test_verify_ssl(self):
"""Test argument making for SSL verification"""
# Test we won't verify SSL cert for non-verified project
post_kwargs = self.ssl_proj._kwargs()
self.assertIn('verify', post_kwargs)
self.assertFalse(post_kwargs['verify'])
# Test we do verify SSL cert in normal project
post_kwargs = self.reg_proj._kwargs()
self.assertIn('verify', post_kwargs)
self.assertTrue(post_kwargs['verify'])
def test_export_data_access_groups(self):
"""Test we get 'redcap_data_access_group' in exported data"""
records = self.reg_proj.export_records(export_data_access_groups=True)
for record in records:
self.assertIn('redcap_data_access_group', record)
# When not passed, that key shouldn't be there
records = self.reg_proj.export_records()
for record in records:
self.assertNotIn('redcap_data_access_group', record)
def test_export_survey_fields(self):
"""Test that we get the appropriate survey keys in the exported
data.
Note that the 'demographics' form has been setup as the survey
in the `survey_proj` project. The _timestamp field will vary for
users as their survey form will be named differently"""
records = self.survey_proj.export_records(export_survey_fields=True)
for record in records:
self.assertIn('redcap_survey_identifier', record)
self.assertIn('demographics_timestamp', record)
# The regular project doesn't have a survey setup. Users should
# be able this argument as True but it winds up a no-op.
records = self.reg_proj.export_records(export_survey_fields=True)
for record in records:
self.assertNotIn('redcap_survey_identifier', record)
self.assertNotIn('demographics_timestamp', record)
@unittest.skipIf(skip_pd, "Couldn't import pandas")
def test_metadata_to_df(self):
"""Test metadata export --> DataFrame"""
df = self.reg_proj.export_metadata(format='df')
self.assertIsInstance(df, pd.DataFrame)
@unittest.skipIf(skip_pd, "Couldn't import pandas")
def test_export_to_df(self):
"""Test export --> DataFrame"""
df = self.reg_proj.export_records(format='df')
self.assertIsInstance(df, pd.DataFrame)
# Test it's a normal index
self.assertTrue(hasattr(df.index, 'name'))
# Test for a MultiIndex on longitudinal df
long_df = self.long_proj.export_records(format='df', event_name='raw')
self.assertTrue(hasattr(long_df.index, 'names'))
@unittest.skipIf(skip_pd, "Couldn't import pandas")
def test_export_df_kwargs(self):
"""Test passing kwargs to export DataFrame construction"""
df = self.reg_proj.export_records(format='df',
df_kwargs={'index_col': 'first_name'})
self.assertEqual(df.index.name, 'first_name')
self.assertTrue('study_id' in df)
@unittest.skipIf(skip_pd, "Couldn't import pandas")
def test_metadata_df_kwargs(self):
"""Test passing kwargs to metadata DataFrame construction"""
df = self.reg_proj.export_metadata(format='df',
df_kwargs={'index_col': 'field_label'})
self.assertEqual(df.index.name, 'field_label')
self.assertTrue('field_name' in df)
@unittest.skipIf(skip_pd, "Couldn't import pandas")
def test_import_dataframe(self):
"""Test importing a pandas.DataFrame"""
df = self.reg_proj.export_records(format='df')
# grrr coerce implicilty converted floats to str(int())
for col in ['matrix1', 'matrix2', 'matrix3', 'sex']:
df[col] = map(lambda x: str(int(x)) if pd.notnull(x) else '', df[col])
response = self.reg_proj.import_records(df)
self.assertIn('count', response)
self.assertNotIn('error', response)
long_df = self.long_proj.export_records(event_name='raw', format='df')
response = self.long_proj.import_records(long_df)
self.assertIn('count', response)
self.assertNotIn('error', response)
def test_date_formatting(self):
"""Test date_format parameter"""
def import_factory(date_string):
return [{'study_id': '1',
'dob': date_string}]
# Default YMD with dashes
import_ymd = import_factory('2000-01-01')
response = self.reg_proj.import_records(import_ymd)
self.assertEqual(response['count'], 1)
# DMY with /
import_dmy = import_factory('31/01/2000')
response = self.reg_proj.import_records(import_dmy, date_format='DMY')
self.assertEqual(response['count'], 1)
import_mdy = import_factory('12/31/2000')
response = self.reg_proj.import_records(import_mdy, date_format='MDY')
self.assertEqual(response['count'], 1)
def test_get_version(self):
"""Testing retrieval of REDCap version associated with Project"""
self.assertTrue(isinstance(semantic_version.Version('1.0.0'), type(self.long_proj.redcap_version)))
def test_export_checkbox_labels(self):
"""Testing the export of checkbox labels as field values"""
self.assertEqual(
self.reg_proj.export_records(
raw_or_label='label',
export_checkbox_labels=True)[0]['matcheck1___1'],
'Foo'
)
| mit |
kaylanb/SkinApp | machine_learn/HOG/features_ouput_TestData.py | 1 | 5253 | '''outputs features for HOG machine learning'''
from matplotlib import pyplot as plt
from matplotlib import image as mpimg
from scipy import sqrt, pi, arctan2, cos, sin, ndimage, fftpack, stats
from skimage import exposure, measure, feature
import pandas as pd
from PIL import Image
import cStringIO
import urllib2
import numpy as np
from pylab import *
#http://pydoc.net/Python/scikits-image/0.4.2/skimage.feature.hog/
#http://pydoc.net/Python/scikits-image/0.4.2/skimage.feature.hog/
#labels_df = pd.DataFrame(columns=['blocks'])
#get url
file="../training_image_urls/NewTraining_SkinNoFaces_everyones.txt"
urls=np.loadtxt(file,dtype="str")
url_good_list=[]
for url in urls:
try:
read= urllib2.urlopen(url).read()
url_good_list.append(url)
except urllib2.URLError:
continue
nrow = len(url_good_list)
#labels_df = np.zeros((nrow, 8100)) #People_Feature_7.csv Food_Feature_7.csv
#labels_df = np.zeros((nrow, 15390)) #People_Feature_8.csv Food_Feature_8.csv
#labels_df = np.zeros((nrow, 1296)) #People_Feature_9.csv Food_Feature_9.csv
#labels_df = np.zeros((nrow, 29241)) #People_Feature_10.csv Food_Feature_10.csv
#labels_df = np.zeros((nrow, 29241)) #People_All_1.csv Food_All_1.csv
#labels_df = np.zeros((nrow, 46656)) #People_All_2.csv Food_All_2.csv
#labels_df = np.zeros((nrow, 65025)) #People_All_3.csv Food_All_3.csv
#labels_df = np.zeros((nrow, 8100)) #People_All_4.csv Food_All_4.csv
#labels_df = np.zeros((nrow, 11664)) #People_All_5.csv Food_All_5.csv
#labels_df = np.zeros((nrow, 14400)) #People_All_6.csv Food_All_6.csv
#labels_df = np.zeros((nrow, 1296)) #People_All_7.csv Food_All_7.csv
#labels_df = np.zeros((nrow, 1296)) #People_All_8.csv Food_All_8.csv
feat = np.zeros((nrow, 900)) #People_All_9.csv Food_All_9.csv
count=0
for url in url_good_list:
read= urllib2.urlopen(url).read()
obj = Image.open( cStringIO.StringIO(read) )
img = np.array(obj.convert('L'))
#blocks = feature.hog(img, pixels_per_cell=(50, 50), cells_per_block=(3, 3), visualise=False, normalise=True) #People_Feature_7.csv Food_Feature_7.csv
#blocks = feature.hog(img, orientations=9, pixels_per_cell=(50,30), cells_per_block=(3,3), visualise=False, normalise=True) #People_Feature_8.csv Food_Feature_8.csv
#blocks = feature.hog(img, orientations=9, pixels_per_cell=(100,100), cells_per_block=(3,3), visualise=False, normalise=True) #People_Feature_9.csv Food_Feature_9.csv
#blocks = feature.hog(img, orientations=9, pixels_per_cell=(30,30), cells_per_block=(3,3), visualise=False, normalise=True) #People_Feature_10.csv Food_Feature_10.csv
#blocks = feature.hog(img, orientations=9, pixels_per_cell=(30,30), cells_per_block=(3,3), visualise=False, normalise=True) #People_All_1.csv Food_All_1.csv
#blocks = feature.hog(img, orientations=9, pixels_per_cell=(30,30), cells_per_block=(4,4), visualise=False, normalise=True) #People_All_2.csv Food_All_2.csv
#blocks = feature.hog(img, orientations=9, pixels_per_cell=(30,30), cells_per_block=(5,5), visualise=False, normalise=True) #People_All_3.csv Food_All_3.csv
#blocks = feature.hog(img, orientations=9, pixels_per_cell=(50,50), cells_per_block=(3,3), visualise=False, normalise=True) #People_All_4.csv Food_All_4.csv
#blocks = feature.hog(img, orientations=9, pixels_per_cell=(50,50), cells_per_block=(4,4), visualise=False, normalise=True) #People_All_5.csv Food_All_5.csv
#blocks = feature.hog(img, orientations=9, pixels_per_cell=(50,50), cells_per_block=(5,5), visualise=False, normalise=True) #People_All_6.csv Food_All_6.csv
#blocks = feature.hog(img, orientations=9, pixels_per_cell=(100,100), cells_per_block=(3,3), visualise=False, normalise=True) #People_All_7.csv Food_All_7.csv
#blocks = feature.hog(img, orientations=9, pixels_per_cell=(100,100), cells_per_block=(4,4), visualise=False, normalise=True) #People_All_8.csv Food_All_8.csv
blocks = feature.hog(img, orientations=9, pixels_per_cell=(100,100), cells_per_block=(5,5), visualise=False, normalise=True) #People_All_9.csv Food_All_9.csv
print "hogs done"
#labels_df.loc[len(labels_df.index)]=([blocks])
#if(len(blocks) == 29241): #People_Feature_10.csv Food_Feature_10.csv
#if(len(blocks) == 1296): #People_Feature_9.csv Food_Feature_9.csv
#if(len(blocks) == 15390): #People_Feature_8.csv Food_Feature_8.csv
#if(len(blocks) == 8100): #People_Feature_7.csv Food_Feature_7.csv
#if(len(blocks) == 29241): #People_All_1.csv Food_All_1.csv
#if(len(blocks) == 46656): #People_All_2.csv Food_All_2.csv
#if(len(blocks) == 65025): #People_All_3.csv Food_All_3.csv
#if(len(blocks) == 8100): #People_All_4.csv Food_All_4.csv
#if(len(blocks) == 11664): #People_All_5.csv Food_All_5.csv
#if(len(blocks) == 14400): #People_All_6.csv Food_All_6.csv
#if(len(blocks) == 1296): #People_All_7.csv Food_All_7.csv
#if(len(blocks) == 1296): #People_All_8.csv Food_All_8.csv
if(len(blocks) == 900): #People_All_9.csv Food_All_9.csv
feat[count] = blocks
count += 1
urls_df=pd.DataFrame()
urls_df["urls"]= url_good_list
feat_df= pd.DataFrame(feat)
final_df=pd.concat([urls_df,feat_df],axis=1)
name="csv_features/hog_features_9_NewTraining_SkinNoFaces_everyones.csv"
final_df.to_csv(name) | bsd-3-clause |
leonidk/centest | github_census/fit_noise.py | 1 | 5121 | #!/usr/bin/env python
import numpy as np
import random
import copy
from collections import defaultdict
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from scipy.optimize import minimize_scalar
import os
import sys
from util import load_psm
from scipy.ndimage.filters import convolve
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.filters import sobel
from scipy.ndimage.interpolation import rotate
from scipy.ndimage.interpolation import shift
from scipy.special import erf
well_capacity = 1000.0
read_noise = 120.0
sensor_depth = 1024.0
min_vals = 20
edge_pixels = 3
def fit_ge(xspan,span):
if span[-1] < span[0]:
span = span[::-1]
#print xspan,[x for x in span]
span -= span.min()
span /= span.max()
opt= minimize(lambda p: sum([abs(float(yv)-(erf(float(xv+p[1])/p[0])+1.0)/2.0) for xv,yv in zip(xspan,span)]),[10,0],bounds=[[1e-6,None],[-1,1]])
#print opt
return opt.x[0]
def plot_span(xspan,span,sigma):
if span[-1] < span[0]:
span = span[::-1]
span = np.array(span)
span -= span.min()
span /= span.max()
plt.plot(xspan,span,c='b')
plt.plot(xspan,[(erf(float(xv)/sigma)+1.0)/2.0 for x in xspan],c='r')
plt.show()
def fit_mtf(good_image):
if len(good_image.shape) == 3:
good_image = good_image.mean(axis=2)
blur_est = []
img = good_image
ye = sobel(img,axis=0)
xe = sobel(img,axis=1)
#e1 = convolve(img,np.array([[0,-1,0],[0,0,0],[0,1,0]]))
#e2 = convolve(img,np.array([[0,0,0],[-1,0,1],[0,0,0]]))
gs = np.sqrt(xe**2 + ye**2)
largest_edges = np.argsort(-gs.ravel())[:edge_pixels]
yi,xi = np.unravel_index(largest_edges,gs.shape)
for y,x in zip(yi,xi):
m = gs[y,x]
yx = ye[y,x]
xx = xe[y,x]
a = np.arctan2(yx,xx)
gr = rotate(img,a*180.0/3.14159,mode='nearest')
xer = sobel(gr,axis=1)
#e1 = convolve(img,np.array([[0,-1,0],[0,0,0],[0,1,0]]))
#e2 = convolve(img,np.array([[0,0,0],[-1,0,1],[0,0,0]]))
gsr = np.sqrt(xer**2)
ler = np.argsort(-gsr.ravel())[:1]
yir,xir = np.unravel_index(ler,gsr.shape)
for y2,x2 in zip(yir,xir):
cur = gr[y2,x2]
xp = 0.0
xm = 0.0
for plus in xrange(1,gr.shape[1]-x2):
diff = cur - gr[y2,x2+plus]
if abs(diff) > abs(xp):
xp = diff
else:
plus -=1
break
for minus in xrange(1,x2):
diff = cur - gr[y2,x2-minus]
if abs(diff) > abs(xm):
xm = diff
else:
minus -=1
break
xspan = range(-minus,plus+1)
span = gr[y2,x2-minus:x2+plus+1]
res = fit_ge(xspan,span)
blur_est.append(res)
return blur_est
#print m,a,y,x,a*180.0/3.14159,xx,yx
def add_noise(x):
x *= well_capacity
x = np.random.poisson(x).astype(np.float64)
x += np.random.standard_normal(x.shape)*read_noise
#x = x + np.random.poisson(np.ones(x.shape)*read_noise).astype(np.float64)
x /= well_capacity
return x
def tr(x,n=0):
fft_of_signal = np.fft.fft(x)
if n > 0:
fft_of_signal[0:n] = 0
return np.real(np.fft.ifft(fft_of_signal))
if len(sys.argv) == 1:
num_pixels = 10000
num_int = 1024
num_samples = 1024
a = np.random.rand(num_pixels)
r = np.empty(shape=(num_samples,num_pixels))
for i in xrange(num_samples):
n = add_noise(copy.copy(a))
r[i,:] = n
#x,y = cont_to_hist(n,num_int)
x = r.mean(axis=0)
y = r.var(axis=0)
opt= minimize(lambda p: sum([abs(yv-p[0]*(xv + p[1])) for xv,yv in zip(x,y)]), [0,0])
res = opt.x
print 1.0/res[0],np.sqrt(res[1]*(1.0/res[0]))
plt.scatter(x,y)
plt.show()
else:
target_dir = sys.argv[1]
imgs = []
for f in os.listdir(target_dir):
fl = os.path.join(target_dir,f)
img = load_psm(fl)
imgs.append(img[0].astype(np.float64))
imgs = np.array(imgs)
img = imgs.mean(axis=0)
img = convolve(img/sensor_depth,np.array([[1,2,1],[2,4,2],[1,2,1]])/16.0)
sigmas = fit_mtf(img)
print sigmas, sum(sigmas)/len(sigmas)
print 'Gaussian Sigma: {0:.2f}'.format(sigmas[0])
r = imgs.reshape((imgs.shape[0],-1))/sensor_depth
xo = r.mean(axis=0)
for row in r:
row = tr(row,5)
yo = r.var(axis=0)
d = defaultdict(list)
for x,y in zip(xo,yo):
d[round(x*sensor_depth)].append(y)
d2 = [(k,sum(v)/float(len(v))) for k,v in d.iteritems() if len(v) > min_vals]
x = np.array([t[0]/sensor_depth for t in d2])
y = np.array([t[1] for t in d2])
opt= minimize(lambda p: sum([abs(yv-p[0]*(xv + p[1])) for xv,yv in zip(x,y)]), [0,0])
res = opt.x
well_cap = 1.0/res[0]
sn = np.sqrt(max(res[1],0)*well_cap)
print 'Well Capacity: {0:.0f} \n Shot Noise: {1:.2f}'.format(well_cap,sn)
plt.scatter(x,y,s=2,lw=0)
plt.xlim(x.min(),x.max())
plt.ylim(y.min(),y.max())
plt.show()
| mpl-2.0 |
quheng/scikit-learn | sklearn/metrics/tests/test_ranking.py | 127 | 40813 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
nelson-liu/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 28 | 3792 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
from sklearn.covariance import fast_mcd
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def test_fast_mcd_on_invalid_input():
X = np.arange(100)
assert_raise_message(ValueError, 'Got X with X.ndim=1',
fast_mcd, X)
def test_mcd_class_on_invalid_input():
X = np.arange(100)
mcd = MinCovDet()
assert_raise_message(ValueError, 'Got X with X.ndim=1',
mcd.fit, X)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
procoder317/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
mvfcopetti/pySSN | pyssn/qt/pyssn_qt.py | 1 | 219113 | """
This is the window manager part of pySSN
pySSN is available under the GNU licence providing you cite the developpers names:
Ch. Morisset (Instituto de Astronomia, Universidad Nacional Autonoma de Mexico)
D. Pequignot (Meudon Observatory, France)
Inspired by a demo code by:
Eli Bendersky ([email protected])
"""
import sys, os
from PyQt4 import QtCore, QtGui
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
#from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
from pyssn import log_, __version__
from ..core.spectrum import spectrum
from ..utils.misc import get_parser
from collections import OrderedDict
from ..utils.physics import CST
log_.level = 4
#ToDo :
class NavigationToolbar( NavigationToolbar2QT ):
curs = QtCore.pyqtSignal(bool)
def __init__(self, canvas, parent ):
NavigationToolbar2QT.__init__(self,canvas,parent)
self.clearButtons=[]
# Search through existing buttons
# next use for placement of custom button
next=None
for c in self.findChildren(QtGui.QToolButton):
if next is None:
next=c
# Don't want to see subplots and customize
"""
if str(c.text()) in ('Subplots', 'Customize'):
c.defaultAction().setVisible(False)
continue
"""
# Need to keep track of pan and zoom buttons
# Also grab toggled event to clear checked status of picker button
if str(c.text()) in ('Pan','Zoom'):
c.toggled.connect(self.clearCurs)
self.clearButtons.append(c)
next=None
# create custom button
pm=QtGui.QPixmap(32,32)
pm.fill(QtGui.QApplication.palette().color(QtGui.QPalette.Normal,QtGui.QPalette.Button))
painter=QtGui.QPainter(pm)
painter.fillRect(6,6,20,20,QtCore.Qt.red)
painter.fillRect(15,3,3,26,QtCore.Qt.blue)
painter.fillRect(3,15,26,3,QtCore.Qt.blue)
painter.end()
icon=QtGui.QIcon(pm)
ac = self.addAction(icon, "Toggle Curs")
ac.setCheckable(True)
#Ver como inicializar
#ac.setChecked(True)
ac.toggled.connect(self.curs_toggle)
self.ac = ac
#button=QtGui.QToolButton(self)
#button.setDefaultAction(self.ac)
# Add it to the toolbar, and connect up event
#self.insertWidget(next.defaultAction(),button)
# Grab the picked event from the canvas
canvas.mpl_connect('pick_event',self.canvasPicked)
def clearCurs(self, checked):
if checked:
self.ac.setChecked(False)
def curs_toggle(self, checked):
self.curs.emit(checked)
def canvasPicked(self, event):
if self.ac.isChecked():
self.curs.emit(event.ind)
class AppForm(QtGui.QMainWindow):
def __init__(self, parent=None, init_filename=None, post_proc_file=None, use_workspace=False):
self.calling = 'pySSN GUI'
self.use_workspace = use_workspace
QtGui.QMainWindow.__init__(self, parent)
self.setWindowTitle('pySSN')
self.sp = None
self.axes = None
self.axes2 = None
self.axes3 = None
self.fig = None
self.init_file_name = init_filename
self.init_line_num = None
self.init_ion = None
self.init_xmin = None
self.init_xmax = None
self.init_y1min = None
self.init_y1max = None
self.init_y3min = None
self.init_y3max = None
self.init_legend_fontsize = None
self.init_legend_loc = None
self.init_nearby_line_num = None
self.init_nearby_ion = None
self.init_nearby_xmin = None
self.init_nearby_xmax = None
self.init_nearby_y1min = None
self.init_nearby_y1max = None
self.init_nearby_y3min = None
self.init_nearby_y3max = None
self.init_nearby_legend_fontsize = None
self.init_nearby_legend_loc = None
self.init_cont_line_num = None
self.init_cont_ion = None
self.init_cont_xmin = None
self.init_cont_xmax = None
self.init_cont_y1min = None
self.init_cont_y1max = None
self.init_cont_y3min = None
self.init_cont_y3max = None
self.init_cont_legend_fontsize = None
self.init_cont_legend_loc = None
self.call_on_draw = True
self.cursor_on = False
self.line_info_ref = 0
self.x_plot_lims = None
self.y1_plot_lims = None
self.y2_plot_lims = None
self.y3_plot_lims = None
self.xscale = None
self.yscale = None
self.post_proc_file = post_proc_file
self.tick_file = None
self.save_parameters_file = None
self.do_save = True
self.cont_par_changed = False
self.axes_fixed = False
self.showErrorBox = True
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.exec_init()
self.cont_pars_dialog = None
self.cursor_w1 = None
self.cursor_w2 = None
self.nearbyLines = None
self.nearbyLines_sort_by = 'i_tot'
self.nearbyLines_sort_reverse = True
self.nearbyLines_dialog = None
self.nearbyLines_selected_ions = None
self.line_info_dialog = None
self.instr_prof_dialog = None
self.refine_wave_dialog = None
self.refine_wave_as_table = False
self.interpol_cont_dialog = None
self.interpol_cont_as_table = False
self.fig_prof = None
self.green_tick_shown = False
self.magenta_tick_shown = False
self.addGreenTickToLegend = True
self.show_true_ions = False
self.nearbyDialogFilterIsActive = False
self.get_user_cont_points = False
self.del_user_cont_points = False
self.user_cont_editBox = None
self.showHelpBrowser = False
def closeEvent(self, evnt):
if self.sp.get_conf('save_parameters_on_exit'):
self.save_pars_as()
if self.cont_pars_dialog is not None:
self.cont_pars_dialog.close()
if self.nearbyLines_dialog is not None:
self.nearbyLines_dialog.close()
if self.line_info_dialog is not None:
self.line_info_dialog.close()
self.line_info_table.close()
if self.instr_prof_dialog is not None:
self.instr_prof_dialog.close()
if self.refine_wave_dialog is not None:
self.refine_wave_dialog.close()
if self.interpol_cont_dialog is not None:
self.interpol_cont_dialog.close()
def image_extension_list(self):
filetypes = self.canvas.get_supported_filetypes()
file_extensions = filetypes.keys()
file_extensions.sort()
return file_extensions
def image_filter(self, fileExt=''):
filetypes = self.canvas.get_supported_filetypes_grouped()
imagetype_list = filetypes.keys()
imagetype_list.sort()
s = ''
k = 0
for imagetype in imagetype_list:
extension_list = filetypes[ imagetype ]
if fileExt in extension_list:
k = imagetype_list.index(imagetype)
s = s + str(imagetype)
s1 = ' (*.' + str(extension_list[0])
for extension in extension_list[1:]:
s1 = s1 + ' *.' + str(extension)
s1 = s1 + ')'
s = s + s1 + s1 + ';;'
filter_str = s[:-2]
selectedFilter = s.split(';;')[k]
return filter_str, selectedFilter
def save_plot(self):
path = self.sp.get_conf('plot_filename')
self.canvas.print_figure(path, dpi=self.dpi)
self.statusBar().showMessage('Plot saved to file %s' % path, 2000)
def save_plot_as(self):
path = self.sp.get_conf('plot_filename')
extension = os.path.splitext(path)[1][1:].lower()
file_choices, selectedFilter = self.image_filter(extension)
path = unicode(QtGui.QFileDialog.getSaveFileName(self, 'Save plot to file', path, file_choices, selectedFilter))
if path:
extension = os.path.splitext(path)[1][1:].lower()
if extension in self.image_extension_list():
self.sp.set_conf('plot_filename', path)
self.canvas.print_figure(path, dpi=self.dpi)
self.statusBar().showMessage('Plot saved to file %s' % path, 2000)
else:
title = 'Error saving plot'
msg = 'Format "{0}" not supported.'.format(extension)
msg = msg + '\nSupported formats: '
extension_list = self.image_extension_list()
n = len(extension_list)-1
s = ''
for i in range(0,n):
s = s + extension_list[i] + ', '
s = s + extension_list[n] + '.'
msg = msg + s
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
def on_about(self):
msg = """ pySSN (Spectral Synthesis for Nebulae):
"""
QtGui.QMessageBox.about(self, "About the demo", msg.strip())
def set_cursor(self, checked):
self.cursor_on = checked
self.sp.firstClick = True
def on_click(self, event):
if self.get_user_cont_points and self.user_cont_editBox is not None:
wave = event.xdata
i_list = [i for i in range(len(self.sp.w)-1) if self.sp.w[i] <= wave <= self.sp.w[i+1] or self.sp.w[i+1] <= wave <= self.sp.w[i]]
if len(i_list) == 1:
i = i_list[0]
c = self.sp.cont[i] - self.sp.conts['user'][i]
self.user_cont_editBox.append('{:<7.1f} {:.2f}'.format(event.xdata, event.ydata-c))
self.update_user_cont()
elif ( self.del_user_cont_points and
self.user_cont_editBox is not None and
self.sp.get_conf('cont_user_table') is not None ):
wave = event.xdata
points = self.sp.get_conf('cont_user_table')[:]
if points is not None and len(points) > 0:
points.remove(min(points, key=lambda x:abs(x[0]-wave)))
self.user_cont_list2table(points)
self.update_user_cont()
elif self.cursor_on:
do_print = not self.sp.get_conf('qt_show_dialogs', True)
nearbyLines = self.sp.nearby_lines(event, do_print, sort='i_tot', reverse=True)
if nearbyLines is None:
return
self.nearbyLines = nearbyLines
if not do_print:
self.show_nearbyLines_dialog()
def sort_nearbyLines(self, sort, reverse=False):
if self.nearbyLines is None:
return
if sort == 'proc':
sorts = np.argsort([ self.sp.process[str(line_num)[-9]] for line_num in self.nearbyLines['num'] ])
else:
sorts = np.argsort(self.nearbyLines[sort])
if reverse:
sorts = sorts[::-1]
self.nearbyLines = np.array(self.nearbyLines)[sorts]
def create_main_frame(self):
if self.use_workspace:
self.main_frame = QtGui.QWorkspace()
else:
self.main_frame = QtGui.QWidget()
# Create the mpl Figure and FigCanvas objects.
#
self.dpi = 100
#self.fig = plt.figure(figsize=(15,15))
self.fig = plt.figure(figsize=(15,15))
# self.fig = plt.figure(figsize=(20.0, 15.0), dpi=self.dpi)
log_.debug('creating figure {}'.format(id(self.fig)), calling=self.calling)
self.canvas = FigureCanvas(self.fig)
if self.use_workspace:
self.main_frame.addWindow(self.canvas)
self.fig2 = Figure((20.0, 15.0), dpi=self.dpi)
self.canvas2 = FigureCanvas(self.fig2)
#self.main_frame.addWindow(self.canvas2)
else:
self.canvas.setParent(self.main_frame)
self.canvas.mpl_connect('button_press_event', self.on_click)
self.canvas.mpl_connect('figure_leave_event', self.leave_fig)
# Create the navigation toolbar, tied to the canvas
#
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
self.mpl_toolbar.curs.connect(self.set_cursor)
# Other GUI controls
#
self.fix_axes_cb = QtGui.QCheckBox("fix")
self.fix_axes_cb.setChecked(False)
self.connect(self.fix_axes_cb, QtCore.SIGNAL('stateChanged(int)'), self.fix_axes)
self.xlim_min_box = QtGui.QLineEdit()
self.xlim_min_box.setMinimumWidth(50)
#self.connect(self.xlim_min_box, QtCore.SIGNAL('editingFinished()'), self.validate_xlim_min)
self.connect(self.xlim_min_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.xlim_max_box = QtGui.QLineEdit()
self.xlim_max_box.setMinimumWidth(50)
#self.connect(self.xlim_max_box, QtCore.SIGNAL('editingFinished()'), self.validate_xlim_max)
#self.xlim_max_box.editingFinished.connect(self.validate_xlim_max)
self.connect(self.xlim_max_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.y1lim_min_box = QtGui.QLineEdit()
self.y1lim_min_box.setMinimumWidth(50)
#self.connect(self.y1lim_min_box, QtCore.SIGNAL('editingFinished()'), self.validate_y1lim_min)
self.connect(self.y1lim_min_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.y1lim_max_box = QtGui.QLineEdit()
self.y1lim_max_box.setMinimumWidth(50)
#self.connect(self.y1lim_max_box, QtCore.SIGNAL('editingFinished()'), self.validate_y1lim_max)
self.connect(self.y1lim_max_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.y3lim_min_box = QtGui.QLineEdit()
self.y3lim_min_box.setMinimumWidth(50)
#self.connect(self.y3lim_min_box, QtCore.SIGNAL('editingFinished()'), self.validate_y3lim_min)
self.connect(self.y3lim_min_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.y3lim_max_box = QtGui.QLineEdit()
self.y3lim_max_box.setMinimumWidth(50)
#self.connect(self.y3lim_max_box, QtCore.SIGNAL('editingFinished()'), self.validate_y3lim_max)
self.connect(self.y3lim_max_box, QtCore.SIGNAL('returnPressed()'), self.set_plot_limits_and_draw)
self.run_button = QtGui.QPushButton("Run")
self.connect(self.run_button, QtCore.SIGNAL('clicked()'), self.rerun)
self.draw_button = QtGui.QPushButton("Draw")
self.connect(self.draw_button, QtCore.SIGNAL('clicked()'), self.on_draw)
self.Command_GroupBox = QtGui.QGroupBox("Execute")
self.Command_GroupBox.setCheckable(False)
self.ObsSpec_GroupBox = QtGui.QGroupBox("Parameters of the synthetic spectrum")
self.ObsSpec_GroupBox.setCheckable(False)
self.SpecPlot_GroupBox = QtGui.QGroupBox("Plot of spectra")
self.SpecPlot_GroupBox.setCheckable(False)
self.lineIDs_GroupBox = QtGui.QGroupBox("Show lines")
self.lineIDs_GroupBox.setCheckable(True)
self.lineIDs_GroupBox.setChecked(True)
self.connect(self.lineIDs_GroupBox, QtCore.SIGNAL('clicked()'), self.show_lines_clicked)
self.lineIDs_GroupBox_ToolTip = 'Check to show ticks at the central positions of the spectral lines and plot the lines of selected ions'
self.residual_GroupBox = QtGui.QGroupBox("Plot of residuals")
self.residual_GroupBox.setCheckable(True)
self.residual_GroupBox.setChecked(True)
self.connect(self.residual_GroupBox, QtCore.SIGNAL('clicked()'), self.residual_box_clicked)
self.residual_GroupBox_ToolTip = 'Check to display the residual plot'
self.adjust_button = QtGui.QPushButton("Update")
self.adjust_button.setChecked(False)
self.connect(self.adjust_button, QtCore.SIGNAL('clicked()'), self.adjust)
self.post_proc_button = QtGui.QPushButton("Post proc")
self.post_proc_button.setChecked(False)
self.connect(self.post_proc_button, QtCore.SIGNAL('clicked()'), self.apply_post_proc)
self.update_profile_button = QtGui.QPushButton("Update profiles")
self.update_profile_button.setChecked(False)
self.connect(self.update_profile_button, QtCore.SIGNAL('clicked()'), self.update_profile)
self.sp_min_box = QtGui.QLineEdit()
self.sp_min_box.setMinimumWidth(50)
#self.connect(self.sp_min_box, QtCore.SIGNAL('editingFinished()'), self.set_limit_sp)
self.connect(self.sp_min_box, QtCore.SIGNAL('returnPressed()'), self.set_limit_sp_and_run)
self.sp_max_box = QtGui.QLineEdit()
self.sp_max_box.setMinimumWidth(50)
#self.connect(self.sp_max_box, QtCore.SIGNAL('editingFinished()'), self.set_limit_sp)
self.connect(self.sp_max_box, QtCore.SIGNAL('returnPressed()'), self.set_limit_sp_and_run)
self.sp_norm_box = QtGui.QLineEdit()
self.sp_norm_box.setMinimumWidth(50)
self.connect(self.sp_norm_box, QtCore.SIGNAL('returnPressed()'), self.sp_norm)
self.obj_velo_box = QtGui.QLineEdit()
self.obj_velo_box.setMinimumWidth(50)
self.connect(self.obj_velo_box, QtCore.SIGNAL('returnPressed()'), self.obj_velo)
self.ebv_box = QtGui.QLineEdit()
self.ebv_box.setMinimumWidth(50)
self.connect(self.ebv_box, QtCore.SIGNAL('returnPressed()'), self.ebv)
self.resol_box = QtGui.QLineEdit()
self.resol_box.setMinimumWidth(50)
self.connect(self.resol_box, QtCore.SIGNAL('returnPressed()'), self.resol)
self.cut2_box = QtGui.QLineEdit()
self.cut2_box.setMinimumWidth(50)
self.connect(self.cut2_box, QtCore.SIGNAL('returnPressed()'), self.cut2)
self.cut_cb = QtGui.QCheckBox('')
self.cut_cb.setChecked(False)
self.connect(self.cut_cb, QtCore.SIGNAL('clicked()'), self.cut_cb_changed)
self.ion_box = QtGui.QLineEdit()
self.ion_box.setMinimumWidth(70)
self.connect(self.ion_box, QtCore.SIGNAL('returnPressed()'), self.draw_ion)
self.ion_cb = QtGui.QCheckBox('')
self.ion_cb.setChecked(False)
self.connect(self.ion_cb, QtCore.SIGNAL('clicked()'), self.ion_cb_changed)
self.line_info_box = QtGui.QLineEdit()
self.line_info_box.setFixedWidth(130)
self.connect(self.line_info_box, QtCore.SIGNAL('returnPressed()'), self.line_info)
self.mpl_toolbar.addSeparator()
self.mpl_toolbar.addWidget(QtGui.QLabel(' line number '))
self.mpl_toolbar.addWidget(self.line_info_box)
self.magenta_box = QtGui.QLineEdit()
self.magenta_box.setMinimumWidth(50)
self.connect(self.magenta_box, QtCore.SIGNAL('returnPressed()'), self.magenta_line)
self.magenta_label_box = QtGui.QLineEdit()
self.magenta_label_box.setMinimumWidth(50)
self.connect(self.magenta_label_box, QtCore.SIGNAL('returnPressed()'), self.magenta_line)
self.cyan_box = QtGui.QLineEdit()
self.cyan_box.setMinimumWidth(50)
self.connect(self.cyan_box, QtCore.SIGNAL('returnPressed()'), self.cyan_line)
self.cyan_label_box = QtGui.QLineEdit()
self.cyan_label_box.setMinimumWidth(50)
self.connect(self.cyan_label_box, QtCore.SIGNAL('returnPressed()'), self.cyan_line)
self.setStyleSheet("""QToolTip {
background-color: black;
color: lightgray;
min-width: 20em;
font-size: 14px;
font-family: "sans-serif";
border: black solid 10px
}""")
s = 'Click to execute the synthesis from the beginning.'
self.run_button_ToolTip = s
s = 'Click to update synthesis with changes in line intensities, profiles, and continuum parameters.'
self.adjust_button_ToolTip = s
s = 'Enter line number to get information on\n' \
'the reference line and on its satellites.'
self.line_info_box_ToolTip = s
s = 'Color excess E(B-V)\n\n' \
'Set with: \n' \
' e_bv = <float>\n\n' \
'Comment: \n' \
u' E(B-V) \u2248 C(H\u03B2) / 1.5'
self.ebv_box_ToolTip = s
s = 'Radial velocity in km/s\n\n' \
'Set with: \n' \
' obj_velo = <float>'
self.obj_velo_box_ToolTip = s
s = 'Minimum wavelength of the synthetic spectrum (in angstroms)\n\n' \
'Set with: \n' \
' limit_sp = (<xmin>, <xmax>)'
self.sp_min_box_ToolTip = s
s = 'Maximum wavelength of the synthetic spectrum (in angstroms)\n\n' \
'Set with: \n' \
' limit_sp = (<xmin>, <xmax>)'
self.sp_max_box_ToolTip = s
s = 'Minimum wavelength in the plots of spectra and residuals (in angstroms)\n\n' \
'Set with: \n' \
' x_plot_lims = (<xmin>, <xmax>)'
self.xlim_min_box_ToolTip = s
s = 'Maximum wavelength in the plots of spectra and residuals (in angstroms)\n\n' \
'Set with: \n' \
' x_plot_lims = (<xmin>, <xmax>)'
self.xlim_max_box_ToolTip = s
s = 'Minimum ordinate in the plot of spectra, in units of relative intensity \n\n' \
'Set with: \n' \
' y1_plot_lims = (<ymin>, <ymax>)'
self.y1lim_min_box_ToolTip = s
s = 'Maximum ordinate in the plot of spectra, in units of relative intensity\n\n' \
'Set with: \n' \
' y1_plot_lims = (<ymin>, <ymax>)'
self.y1lim_max_box_ToolTip = s
s = 'Minimum ordinate in the plot of residuals, in units of relative intensity\n\n' \
'Set with: \n' \
' y3_plot_lims = (<ymin>, <ymax>)'
self.y3lim_min_box_ToolTip = s
s = 'Maximum ordinate in the plot of residuals, in units of relative intensity\n\n' \
'Set with: \n' \
' y3_plot_lims = (<ymin>, <ymax>)'
self.y3lim_max_box_ToolTip = s
s = 'Check to retain the current limits of the plots while zooming and panning.'
self.fix_axes_cb_ToolTip = s
s = 'Check to show only lines with intensities above cut. \n\n' \
'Set with: \n' \
' show_selected_intensities_only = <boolean>'
self.cut_cb_ToolTip = s
s = 'Check to show only lines of selected ions. \n\n' \
'Set with: \n' \
' show_selected_ions_only = <boolean>'
self.ion_cb_ToolTip = s
s = 'Normalization factor, ratio between the intensity and the \n' \
u'observed flux of the reference line, usually 10\u2074/F(H\u03B2)\n\n' \
'Set with: \n' \
' sp_norm = <float>'
self.sp_norm_box_ToolTip = s
s = 'Rebinning factor, the odd integer factor by which the number of points \n' \
'of the original spectrum is multiplied in the rebinning process\n\n' \
'Set with: \n' \
' resol = <integer>\n\n' \
'Usage: \n' \
' Set to \'1\' if the resolution of the observed spectrum is large enough'
self.resol_box_ToolTip = s
s = 'Minimum relative intensity of lines to be shown. \n\n' \
'Set with: \n' \
' cut_plot2 = <float>'
self.cut2_box_ToolTip = s
s = 'Comma-separated list of selected ions, elements, or line numbers to be shown. \n\n' \
'Set with: \n' \
' selected_ions = [<ion1>,<ion2>,...]\n\n' \
'Examples: \n' \
' \'O III\' (or \'O_III\') to show the lines of O III\n' \
' \'O III*\' (or \'O_III*\') to show the lines of O III, O IIIfl, O III5g, etc\n' \
' \'O III, O IV\' to show the lines of O III and O IV\n' \
' \'O\' to show the lines of all O ions\n' \
' \'Fe, N\' to show the lines of all Fe and N ions\n' \
' <line number> to show the lines of that same ion'
self.ion_box_ToolTip = s
#
# Layout with box sizers
#
CommandLayout = QtGui.QGridLayout()
wList = [self.run_button,self.adjust_button]
Nrow = 2
for w in wList:
k = wList.index( w )
i = k%Nrow
j = 1+2*(k/Nrow)
CommandLayout.addWidget(w,i,j)
CommandLayout.setAlignment(w,QtCore.Qt.AlignCenter)
self.Command_GroupBox.setLayout(CommandLayout)
ObsSpecLayout = QtGui.QGridLayout()
lList = ['xmin', 'xmax', u'10\u2074/F(H\u03B2)', 'radial vel.', 'E(B-V)', 'N']
wList = [self.sp_min_box, self.sp_max_box, self.sp_norm_box, self.obj_velo_box, self.ebv_box, self.resol_box ]
Nrow = 2
for l in lList:
w = QtGui.QLabel(l)
k = lList.index( l )
i = k%Nrow
j = 2*(k/Nrow)
ObsSpecLayout.addWidget(w,i,j)
ObsSpecLayout.setAlignment(w,QtCore.Qt.AlignRight)
for w in wList:
k = wList.index( w )
i = k%Nrow
j = 1+2*(k/Nrow)
ObsSpecLayout.addWidget(w,i,j)
ObsSpecLayout.setAlignment(w,QtCore.Qt.AlignRight)
self.ObsSpec_GroupBox.setLayout(ObsSpecLayout)
SpecPlotLayout = QtGui.QGridLayout()
SpecPlotLayout.addWidget(QtGui.QLabel('xmin'),0,0)
SpecPlotLayout.addWidget(QtGui.QLabel('xmax'),1,0)
SpecPlotLayout.addWidget(QtGui.QLabel('ymin'),0,2)
SpecPlotLayout.addWidget(QtGui.QLabel('ymax'),1,2)
SpecPlotLayout.addWidget(self.xlim_min_box,0,1)
SpecPlotLayout.addWidget(self.xlim_max_box,1,1)
SpecPlotLayout.addWidget(self.y1lim_min_box,0,3)
SpecPlotLayout.addWidget(self.y1lim_max_box,1,3)
SpecPlotLayout.addWidget(self.fix_axes_cb,0,4)
self.SpecPlot_GroupBox.setLayout(SpecPlotLayout)
LineIDLayout = QtGui.QGridLayout()
LineIDLayout.addWidget(QtGui.QLabel('cut'),0,0)
LineIDLayout.addWidget(self.cut2_box,0,1)
LineIDLayout.addWidget(self.cut_cb,0,2)
LineIDLayout.addWidget(QtGui.QLabel('ion'),1,0)
LineIDLayout.addWidget(self.ion_box,1,1)
LineIDLayout.addWidget(self.ion_cb,1,2)
self.lineIDs_GroupBox.setLayout(LineIDLayout)
ResidualLayout = QtGui.QGridLayout()
ResidualLayout.addWidget(QtGui.QLabel('ymin'),0,0)
ResidualLayout.addWidget(QtGui.QLabel('ymax'),1,0)
ResidualLayout.addWidget(self.y3lim_min_box,0,1)
ResidualLayout.addWidget(self.y3lim_max_box,1,1)
self.residual_GroupBox.setLayout(ResidualLayout)
grid = QtGui.QGridLayout()
grid.addWidget(self.Command_GroupBox, 0, 1 )
grid.addWidget(self.ObsSpec_GroupBox, 0, 2 )
grid.addWidget(self.SpecPlot_GroupBox, 0, 3 )
grid.addWidget(self.residual_GroupBox, 0, 4 )
grid.addWidget(self.lineIDs_GroupBox, 0, 5 )
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.canvas)
vbox.addWidget(self.mpl_toolbar)
vbox.addLayout(grid)
#vbox.setAlignment(QtCore.Qt.AlignBottom)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def create_status_bar(self):
self.status_text = QtGui.QLabel("pySSN, v{}".format(__version__))
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("File")
open_init_action = self.create_action("Open init file",
shortcut="",
slot=self.select_init,
tip="Open the initialization file and run the synthesis")
save_pars_action = self.create_action("Save parameters",
shortcut="Ctrl+S",
slot=self.save_pars_as,
tip="Save synthesis and plot parameters to file")
save_pars_as_action = self.create_action("Save parameters as",
shortcut="Ctrl+Shift+S",
slot=self.save_pars_as,
tip="Select file name and save parameters of the synthesis")
self.save_plot_action = self.create_action("Save plot",
shortcut="Ctrl+P",
slot=self.save_plot_as,
tip="Save plot to file")
save_plot_as_action = self.create_action("Save plot as",
shortcut="Ctrl+Shift+P",
slot=self.save_plot_as,
tip="Select file name and save plot")
save_lines_action = self.create_action("Save lines",
shortcut="Ctrl+L",
slot=self.save_lines_as,
tip="Save list of lines to file")
save_lines_as_action = self.create_action("Save lines as",
shortcut="Ctrl+Shift+L",
slot=self.save_lines_as,
tip="Select file name and save list of lines")
self.add_actions(self.file_menu,
(open_init_action, save_pars_action, None, self.save_plot_action, None, save_lines_action))
#(open_init_action, save_pars_action, save_pars_as_action, None, self.save_plot_action, save_plot_as_action, None, save_lines_action, save_lines_as_action))
self.line_sort_list = ['wavelength', 'decreasing wavelength', 'intensity', 'decreasing intensity', 'ion' , 'decreasing ion' ]
s = 'Sort lines by:\n'
for i in range(len(self.line_sort_list)):
s = s + ' ' + str(i) + ' - ' + self.line_sort_list[i] + '\n'
s = s + '\nSet with:\n' + ' save_lines_sort = <integer>'
self.line_sort_ag = QtGui.QActionGroup(self, exclusive=True)
self.line_sort_menu = self.file_menu.addMenu("Sort lines by")
self.line_sort_menu_ToolTip = ''
for i in range(len(self.line_sort_list)):
a = self.line_sort_ag.addAction(QtGui.QAction(self.line_sort_list[i], self, checkable=True))
self.line_sort_menu.addAction(a)
self.line_sort_ag.triggered.connect(self.line_sort)
self.line_print_dic = OrderedDict( [
( 'num' , 'line number' ),
( 'id' , 'ion' ),
( 'lambda' , 'wavelength' ),
( 'l_shift' , 'wavelength shift' ),
( 'l_tot' , 'corrected wavelength' ),
( 'i_rel' , 'intensity' ),
( 'i_cor' , 'intensity correction factor' ),
( 'i_tot' , 'corrected intensity' ),
( 'ref' , 'reference line number' ),
( 'profile' , 'line profile code number' ),
( 'vitesse' , 'natural line width' ),
( 'comment' , 'comment' ) ])
items = list(self.line_print_dic.values())
s = 'Fields to be printed:\n'
for i in range(len(items)):
s = s + ' ' + str(i) + ' - ' + items[i] + '\n'
s = s + '\nSet with:\n' + ' save_lines_fields = <list>'
self.line_field_menu = self.file_menu.addMenu("Show fields")
self.line_field_menu_ToolTip = ''
for i in range(len(items)):
a = self.create_action(items[i],
shortcut='', slot=self.set_line_fields_to_print, checkable=True,
tip=None)
self.line_field_menu.addAction(a)
self.file_menu.addMenu(self.line_field_menu)
self.show_header_action = self.create_action("Show header",
slot=self.set_show_header,
shortcut="",
checkable=True,
tip="Show header in list of lines")
self.file_menu.addAction(self.show_header_action)
self.open_cosmetic_file_action = self.create_action("Open cosmetic file",
slot=self.set_cosmetic_file,
shortcut="",
tip="Open the cosmetic file")
self.clean_cosmetic_file_action = self.create_action("Clean cosmetic file",
slot=self.clean_cosmetic_file,
shortcut="",
tip="Remove the unchanged lines from the cosmetic file")
self.empty_cosmetic_file_action = self.create_action("Empty cosmetic file",
slot=self.empty_cosmetic_file,
shortcut="",
tip="Remove all lines from the cosmetic file")
self.order_cosmetic_file_action = self.create_action("Order cosmetic file",
slot=self.order_cosmetic_file,
shortcut="",
tip="Order the cosmetic file by line number and remove duplicate lines")
quit_action = self.create_action("&Quit",
slot=self.fileQuit,
shortcut="Ctrl+Q",
tip="Close the application")
self.add_actions(self.file_menu, (None, self.open_cosmetic_file_action, self.clean_cosmetic_file_action,
self.order_cosmetic_file_action, self.empty_cosmetic_file_action, None, quit_action))
self.run_menu = self.menuBar().addMenu("Execute")
run_action = self.create_action("Run",
shortcut="Ctrl+F9",
slot=self.rerun,
tip="Execute synthesis from the beginning")
update_action = self.create_action("Update",
shortcut="F9",
slot=self.adjust,
tip="Update synthesis with changes in line intensities, profiles, and continuum parameters")
draw_action = self.create_action("Draw",
shortcut="F8",
slot=self.set_plot_limits_and_draw,
tip="Redraw plots")
post_proc_action = self.create_action("Post-process",
shortcut="Ctrl+F8",
slot=self.apply_post_proc,
tip="Edit the plots with python commands defined in an external file")
open_profile_action = self.create_action("Instrumental profile",
shortcut="F7",
slot=self.apply_instr_prof,
tip="Open the instrumental profile file and run the synthesis")
refine_wavelengths_action = self.create_action("Wavelength-refining",
slot=self.refine_wavelengths,
shortcut="F6",
tip="Refine the wavelength calibration")
self.add_actions(self.run_menu, (update_action, run_action, draw_action, None,
post_proc_action, open_profile_action, refine_wavelengths_action))
self.line_menu = self.menuBar().addMenu('Lines')
self.show_line_ticks_action = self.create_action('Plot line ticks',
shortcut='Alt+L', slot=self.show_line_ticks_action_clicked, checkable=True,
tip='Check to show line ticks')
self.plot_lines_action = self.create_action('Plot spectra of selected ions',
shortcut='Alt+P', slot=self.show_line_ticks_action_clicked, checkable=True,
tip='Check to plot spectra of selected ions')
self.selected_intensities_action = self.create_action('Only above the cut',
shortcut='Alt+K', slot=self.selected_lines_clicked, checkable=True,
tip='Check to show the ticks for lines with intensities above cut only')
self.selected_ions_action = self.create_action('Only for selected ions',
shortcut='Alt+I', slot=self.selected_lines_clicked, checkable=True,
tip='Check to show the line ticks for selected ions only')
self.add_actions(self.line_menu,
(self.plot_lines_action, None, self.show_line_ticks_action, self.selected_intensities_action, self.selected_ions_action))
self.diff_lines_list = ['ion and reference line', 'ion and process', 'ion', 'element' ]
s = 'Differentiate lines by:\n'
for i in range(len(self.diff_lines_list)):
s = s + ' ' + str(i) + ' - ' + self.diff_lines_list[i] + '\n'
s = s + '\nSet with:\n' + ' diff_lines_by = <integer>'
self.diff_lines_ag = QtGui.QActionGroup(self, exclusive=True)
self.diff_lines_menu = self.line_menu.addMenu("Differentiate lines by")
self.diff_lines_menu_ToolTip = ''
for i in range(len(self.diff_lines_list)):
a = self.diff_lines_ag.addAction(QtGui.QAction(self.diff_lines_list[i], self, checkable=True))
a.setShortcut('Alt+' + str(i+1))
self.diff_lines_menu.addAction(a)
self.diff_lines_ag.triggered.connect(self.diff_lines)
self.cycle_forwards_ions_action = self.create_action('Cycle forwards selected ions',
shortcut='Alt+0', slot=self.cycle_forwards_ions, checkable=False,
tip='Click to cycle forwards the selected ions')
self.cycle_backwards_ions = self.create_action('Cycle backwards selected ions',
shortcut='Alt+9', slot=self.cycle_backwards_ions, checkable=False,
tip='Click to cycle backwards the selected ions')
self.add_actions(self.line_menu,
(None, self.cycle_forwards_ions_action, self.cycle_backwards_ions, None))
self.line_tick_ax_menu = self.line_menu.addMenu('Window of line ticks')
self.line_tick_ax_list = ['Plot of spectra', 'Plot of residuals', 'Separate plot' ]
s = 'Show line ticks on:\n'
for i in range(len(self.line_tick_ax_list)):
s = s + ' ' + str(i) + ' - ' + self.line_tick_ax_list[i] + '\n'
s = s + '\nSet with:\n' + ' line_tick_ax = <integer>'
self.line_tick_ax_ag = QtGui.QActionGroup(self, exclusive=True)
self.line_tick_ax_menu_ToolTip = ''
for i in range(len(self.line_tick_ax_list)):
a = self.line_tick_ax_ag.addAction(QtGui.QAction(self.line_tick_ax_list[i], self, checkable=True))
self.line_tick_ax_menu.addAction(a)
self.line_tick_ax_ag.triggered.connect(self.set_plot_ax2)
self.line_tick_pos_menu = self.line_menu.addMenu('Position of line ticks')
self.line_tick_pos_list = ['Top', 'Middle', 'Bottom' ]
s = 'Position line ticks:\n'
for i in range(len(self.line_tick_pos_list)):
s = s + ' ' + str(i) + ' - ' + self.line_tick_pos_list[i] + '\n'
s = s + '\nSet with:\n' + ' line_tick_pos = <integer>'
self.line_tick_pos_ag = QtGui.QActionGroup(self, exclusive=True)
self.line_tick_pos_menu_ToolTip = ''
for i in range(len(self.line_tick_pos_list)):
a = self.line_tick_pos_ag.addAction(QtGui.QAction(self.line_tick_pos_list[i], self, checkable=True))
self.line_tick_pos_menu.addAction(a)
self.line_tick_pos_ag.triggered.connect(self.set_plot_ax2)
self.line_tick_color_action = self.create_action('Color of line ticks',
shortcut=None, slot=self.line_tick_color_clicked, checkable=False,
tip='Set color of line ticks')
self.toggle_legend_action = self.create_action('Toggle legend position and zoom',
shortcut='Alt+Shift+L', slot=self.toggle_legend_clicked, checkable=False,
tip='Toggle the legend position and zoom')
self.line_menu.addAction(self.toggle_legend_action)
self.editing_lines_action = self.create_action('Allow editing line parameters',
slot=self.editing_lines_clicked, checkable=True,
tip='Check to allow editing line parameters in line info dialog')
self.update_lines_action = self.create_action('Update after editing line parameters',
shortcut='Alt+U', slot=self.update_lines_clicked, checkable=True,
tip='Check to update synthesis after editing line parameters in line info dialog')
self.show_line_ticks_from_file_action = self.create_action('Plot line ticks from file',
shortcut='F4', slot=self.show_line_ticks_from_file,
tip='Check to show line ticks defined in an external file')
self.ask_tickfile_action = self.create_action("Ask for file name",
checkable=True, tip="Check to be always asked for the text file containing a list of wavelengths to be ticked")
self.add_actions(self.line_menu, (None, self.show_line_ticks_from_file_action))
self.cont_menu = self.menuBar().addMenu('Continuum')
self.plot_cont_action = self.create_action('Plot continuum',
shortcut="Alt+C",
slot=self.plot_cont_action_clicked,
checkable=True,
tip='Check to plot the different components of the continuum spectrum')
self.cont_action = self.create_action('Parameters',
shortcut="Shift+Alt+C",
slot=self.cont_dialog,
tip='Parameters of the continuum spectrum')
self.interpol_cont_action = self.create_action('User-defined continuum',
shortcut="F5",
slot=self.user_continuum,
tip='Open dialog to set the user-defined continuum spectrum')
self.add_actions(self.cont_menu,
(self.plot_cont_action, self.cont_action, self.interpol_cont_action,))
self.settings_menu = self.menuBar().addMenu('Settings')
self.verbosity_list = ['None', 'Errors', 'Errors and warnings', 'Errors, warnings, and comments', 'Debug messages' ]
s = 'Verbosity level:\n'
for i in range(len(self.verbosity_list)):
s = s + ' ' + str(i) + ' - ' + self.verbosity_list[i] + '\n'
s = s + '\nSet with:\n' + ' log_level = <integer>'
self.verbosity_ag = QtGui.QActionGroup(self, exclusive=True)
#self.verbosity_menu = self.menuBar().addMenu("Verbosity")
self.verbosity_menu = self.settings_menu.addMenu("Verbosity")
self.verbosity_menu_ToolTip = ''
for i in range(len(self.verbosity_list)):
a = self.verbosity_ag.addAction(QtGui.QAction(self.verbosity_list[i], self, checkable=True))
self.verbosity_menu.addAction(a)
self.verbosity_ag.triggered.connect(self.verbosity)
self.style_list = list(QtGui.QStyleFactory.keys())
s = 'Widget styles:\n'
for i in range(len(self.style_list)):
s = s + ' ' + str(i) + ' - ' + self.style_list[i] + '\n'
s = s + '\nSet with:\n' + ' qt_style = <integer>'
self.style_ag = QtGui.QActionGroup(self, exclusive=True)
self.style_menu = self.settings_menu.addMenu('Widget style')
self.style_menu_ToolTip = ''
for i in range(len(self.style_list)):
a = self.style_ag.addAction(QtGui.QAction(self.style_list[i], self, checkable=True))
self.style_menu.addAction(a)
self.style_ag.triggered.connect(self.style)
self.enable_tooltips_action = self.create_action('Enable tooltips',
slot=self.enable_tooltips_action_clicked, checkable=True,
tip='Check to enable tooltips')
self.adjust_fig_action = self.create_action('Adjust figure',
slot=self.adjust_fig_action_clicked, checkable=True,
tip='Automatically adjust figure to avoid overlaps and to minimize the empty borders.')
self.show_uncor_obs_action = self.create_action('Show uncorrected spectrum',
slot=self.show_uncor_obs_action_clicked, checkable=True,
tip='Show observational spectrum without the wavelength refining.')
self.add_actions(self.settings_menu,
(None, self.enable_tooltips_action, self.adjust_fig_action, None, self.editing_lines_action, self.update_lines_action, self.show_uncor_obs_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def fileQuit(self):
self.close()
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QtGui.QAction(text, self)
if icon is not None:
action.setIcon(QtGui.QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, QtCore.SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
def isInteger(self, str_):
try:
int(str_)
return True
except ValueError:
return False
def isPositiveInteger(self, str_):
if self.isInteger(str_):
n = int(str_)
if n > 0:
return True
else:
return False
else:
return False
def isPositiveOdd(self, str_):
if self.isInteger(str_):
n = int(str_)
if n%2 == 1 and n > 0:
return True
else:
return False
else:
return False
def isFloat(self, str_):
try:
np.float(str_)
return True
except ValueError:
return False
def floatFixFormat(self, r, fix_fmt, align='>'):
"""
floatFixFormat(1.23456789, '{:7.3f}') = ' 1.234'
floatFixFormat(-1.23456789, '{:7.3f}') = ' -1.234'
floatFixFormat(123.456789, '{:7.3f}') = ' 1.23e2'
floatFixFormat(-123.456789, '{:7.3f}') = '-1.23e2'
floatFixFormat(1.23456789e+04, '{:7.3f}') = ' 1.23e4'
floatFixFormat(1.23456789e-04, '{:7.3f}') = ' 1.2e-4'
floatFixFormat(1.23456789e+34, '{:7.3f}') = ' 1.2e34'
floatFixFormat(99.999999, '{:7.3f}') = ' 1.2e34'
"""
if not ( 'f' in fix_fmt and self.isFloat(r) ):
return None
s = fix_fmt.strip('{')
s = s.strip('}')
s = s.strip(':')
s = s.strip('f')
k = s.index('.')
w = int(s[:k])
p = int(s[k+1:])
s0 = '{:{align}{w}.{p}f}'.format(float(abs(r)), w=w-1, p=p, align=align)
s = '{:0.{w}e}'.format(float(abs(r)), w=w)
if r < 0:
sgn = '-'
else:
sgn = ''
k = s.index('e')
mantissa = s[:k]
mantissa = mantissa[:p+2]
e = int(s[k+1:])
if p+e+2>w-3-len(str(e)) and len(s0) < w:
s = s0.strip()
else:
s = '{:0.{p}e}'.format(float(abs(r)), p=min(p,w-4-len(str(e))))
k = s.index('e')
mantissa = s[:k]
exponent = str(int(s[k+1:]))
s = mantissa + 'e' + exponent
s = '{:{align}{w}}'.format(sgn+s, w=w, align=align)
return s
def rightFormat(self, s, field):
if field == 'comment':
output = s.strip()
return output
try:
if field == 'profile':
r = int(s)
else:
r = np.float(s)
fmt = self.sp.field_format[field]
if 'f' in fmt:
s = self.floatFixFormat(r, fmt)
else:
s = fmt.format(r)
if len(s) == self.sp.field_width[field] and not np.isinf(r):
if field == 'vitesse' and (r < 0 or s.strip() == '0.00'):
output = None
else:
output = s
else:
output = None
except:
output = None
return output
def ConvStrToValidTypes(self, str_):
str_ = str_.replace('Error in ','')
str_ = str_.replace(' ','')
if str_ == '':
result = None
elif str_.isdigit():
result = int(str_)
elif self.isFloat(str_):
result = np.float(str_)
elif str_.capitalize() == 'True':
result = True
elif str_.capitalize() == 'False':
result = False
elif str_.find('(') >= 0:
try:
str_ = str_.replace('[','')
str_ = str_.replace(']','')
str_ = str_.strip('[]()')
result = [(float(s.split(',')[0]),float(s.split(',')[1])) for s in str_.split('),(')]
except:
result = None
elif str_.find(',') >= 0:
try:
str_ = str_.replace('[','')
str_ = str_.replace(']','')
result = [float(i) for i in str_.split(',')]
except:
result = None
else:
result = str_
return result
def save_par_in_file(self, field, value, path, help_=None):
if self.isValidFilename(path):
if os.path.isfile(path):
f = open(path, 'r')
lines = f.readlines()[::-1]
f.close()
else:
lines = []
j = 0
found = False
while ( j < len(lines) ) and ( not found ):
line = str(lines[j])
if line.find(field) == 0:
if type(value) is str:
s0 = ' = \''
s1 = '\'\n'
else:
s0 = ' = '
s1 = '\n'
line = '# ' + line + field + s0 + value + s1
lines[j] = line
found = True
break
j += 1
if not found:
if help_ is not None:
lines.insert(0, '\n# ' + help_ + '\n')
lines.insert(0, field + ' = ' + value + '\n')
lines = lines[::-1]
f = open(path, 'w')
f.writelines(lines)
f.close()
def save_cont_pars(self):
file_choices = "Python files (*.py) (*.py);;Text files (*.txt *.dat) (*.txt *.dat);;All Files (*) (*)"
filename = self.sp.config_file.split('/')[-1]
path = unicode(QtGui.QFileDialog.getSaveFileName(self, 'Save to file', filename, file_choices))
if path:
if os.path.isfile(path):
f = open(path, 'r')
lines = f.readlines()[::-1]
f.close()
else:
lines = []
for i in range(0, self.table.rowCount()):
field = str(self.table.item(i,0).text())
value = str(self.table.item(i,1).text())
help_ = str(self.table.item(i,2).text().toUtf8())
help_ = help_.replace('\xce\xb2', 'beta')
help_ = help_.replace('\xe2\x81\xbb\xc2\xb3', '-3')
help_ = help_.replace('\xce\xb1', 'alpha')
help_ = help_.replace('\xce\xbb/5000 \xe2\x84\xab', 'lambda/5000 A')
j = 0
found = False
while ( j < len(lines) ) and ( not found ):
line = str(lines[j])
if line.find(field) == 0:
k = line.find('#')
if k > 0:
comment = ' ' + line[k:]
else:
comment = '\n'
line = field + ' = ' + value + comment
lines[j] = line
found = True
break
j += 1
if not found:
lines.insert(0, '\n# ' + help_ + '\n')
lines.insert(0, field + ' = ' + value + '\n')
lines = lines[::-1]
f = open(path, 'w')
f.writelines(lines)
f.close()
def get_shifts_from_profile(self, profile_key):
if profile_key not in self.sp.emis_profiles:
profile_key = '1'
vel = self.sp.emis_profiles[profile_key]['vel']
par_list = self.sp.emis_profiles[profile_key]['params']
shift_list = []
for item in par_list:
shift = np.float(item[2])
intensity = np.float(item[1])
if item[0]=='G' and ( intensity > 0.2 ):
shift_list.append(shift)
shift_list.sort()
return shift_list, vel
def plot_tick_at(self, wavelength, ion, line_num):
if self.green_tick_shown:
self.on_draw()
color = 'green'
ion = ion.replace('_',' ').strip()
to_select = (self.sp.liste_raies['num'] == np.int(line_num))
vitesse = self.sp.liste_raies[to_select]['vitesse']
profile_key = str(self.sp.liste_raies[to_select]['profile'][0])
shift_list, vel = self.get_shifts_from_profile(profile_key)
line_num = line_num.strip().strip('0')
# label = ion + ' (' + line_num.strip() + ')'
label = ion + ' {:.2f}'.format(wavelength)
posTick = self.getTickPosOfSelectedLine()
y1, y2 = self.get_line_tick_lim(posTick)
k = self.sp.get_conf('line_tick_ax')
if not (k == 1 and self.residual_GroupBox.isChecked()):
k = 0
if len(shift_list) > 0:
if posTick == 0:
ys1 = 2*y1-y2
ys2 = y1
ym = y1
else:
ys1 = y2
ys2 = 2*y2-y1
ym = y2
if k == 0:
yy1 = self.y1_plot_lims[0] + ym*(self.y1_plot_lims[1] - self.y1_plot_lims[0])
else:
yy1 = self.y3_plot_lims[0] + ym*(self.y3_plot_lims[1] - self.y3_plot_lims[0])
current_legend_loc = self.sp.legend_loc
f = 0.15
r = (self.x_plot_lims[1] - self.x_plot_lims[0])/2
if wavelength - self.x_plot_lims[0] < 2*r*f:
current_legend_loc = 1
if self.x_plot_lims[1] - wavelength < 2*r*f:
current_legend_loc = 2
self.fig.axes[k].axvline( wavelength, y1, y2, color = color, linestyle = 'solid', linewidth = 2.5 )
wave_shifts = -vitesse*wavelength*shift_list / CST.CLIGHT * 1e5 + wavelength*vel / CST.CLIGHT * 1e5
if len(wave_shifts) > 0:
max_wave_shift = max(abs(wave_shifts))
else:
max_wave_shift = 0
# Ticks for the profiles components are not shown if they are within 1000*f percent of the x-axis width.
f = 0.001
if max_wave_shift > f*(self.x_plot_lims[1] - self.x_plot_lims[0]):
x1 = (wavelength - self.x_plot_lims[0])/(self.x_plot_lims[1] - self.x_plot_lims[0])
for shift in wave_shifts:
self.fig.axes[k].axvline( wavelength+shift, ys1, ys2, color = color, linestyle = '--', linewidth = 2.5 )
x2 = (wavelength + shift - self.x_plot_lims[0])/(self.x_plot_lims[1] - self.x_plot_lims[0])
self.fig.axes[k].axhline( yy1, x1, x2, color = color, linestyle = '-', linewidth = 1.0 )
if self.addGreenTickToLegend:
self.fig.axes[k].step( [0,0], [0,100], color = color, linestyle = 'solid', label = label, linewidth = 2.5 )
self.fig.axes[k].legend(loc=current_legend_loc, fontsize=self.sp.legend_fontsize)
self.fig.canvas.draw()
self.green_tick_shown = True
self.magenta_tick_shown = False
def show_line_info_dialog(self):
def get_window_size_and_position():
if self.line_info_dialog is None:
font = QtGui.QFont()
width = QtGui.QFontMetrics(font).width('='*120)
self.line_info_dialog_width = width
self.line_info_dialog_height = 470
sG = QtGui.QApplication.desktop().screenGeometry()
self.line_info_dialog_x = sG.width()-self.line_info_dialog_width
self.line_info_dialog_y = 0
else:
self.line_info_dialog_width = self.line_info_dialog.width()
self.line_info_dialog_height = self.line_info_dialog.height()
self.line_info_dialog_x = self.line_info_dialog.pos().x()
self.line_info_dialog_y = self.line_info_dialog.pos().y()
def save_initial_plot_pars():
self.init_line_num = self.line_info_box.text()
self.init_ion = self.ion_box.text()
self.init_xmin = self.xlim_min_box.text()
self.init_xmax = self.xlim_max_box.text()
self.init_y1min = self.y1lim_min_box.text()
self.init_y1max = self.y1lim_max_box.text()
self.init_y3min = self.y3lim_min_box.text()
self.init_y3max = self.y3lim_max_box.text()
self.init_legend_fontsize = self.sp.legend_fontsize
self.init_legend_loc = self.sp.legend_loc
def toggle_statusbar():
self.showStatusBar = not self.showStatusBar
statusBar.setVisible(self.showStatusBar)
def redo_initial_plot():
self.line_info_box.setText(self.init_line_num)
self.ion_box.setText(self.init_ion)
self.xlim_min_box.setText(self.init_xmin)
self.xlim_max_box.setText(self.init_xmax)
self.y1lim_min_box.setText(self.init_y1min)
self.y1lim_max_box.setText(self.init_y1max)
self.y3lim_min_box.setText(self.init_y3min)
self.y3lim_max_box.setText(self.init_y3max)
self.sp.legend_fontsize = self.init_legend_fontsize
self.sp.legend_loc = self.init_legend_loc
self.set_plot_limits_and_draw()
#self.save_from_lim_boxes()
#self.draw_ion()
def do_reset():
self.curr_line_num = self.init_line_num
get_info(self.curr_line_num)
fill_line_info_table()
redo_initial_plot()
def toggle_show_satellites():
self.show_satellites = (self.show_satellites + 1)%3
fill_line_info_table()
def on_click():
item = self.line_info_table.currentItem()
row = item.row()
col = item.column()
s = item.text()
if col == col_ion:
ion = self.line_info_table.item(row, col).text()
self.ion_box.setText(ion)
self.draw_ion()
if not self.isFloat(s):
return
if col in [col_num, col_ref] and int(s) != 0:
self.curr_line_num = s
get_info(self.curr_line_num)
self.line_info_box.setText(self.curr_line_num)
fill_line_info_table()
def on_doubleClick():
item = self.line_info_table.currentItem()
row = item.row()
col = item.column()
s = item.text()
if col == col_ion:
ion = self.line_info_table.item(row, col).text()
self.ion_box.setText(ion)
self.draw_ion()
if not self.isFloat(s):
return
if col in [col_num, col_ref] and int(s) != 0:
self.curr_line_num = s
get_info(self.curr_line_num)
self.line_info_box.setText(self.curr_line_num)
fill_line_info_table()
def on_itemClicked():
# to avoid blinking with itemSelectionChanged
item = self.line_info_table.currentItem()
if item == self.selected_item:
on_itemSelectionChanged()
def on_itemSelectionChanged():
if self.green_tick_shown:
self.on_draw()
self.green_tick_shown = False
item = self.line_info_table.currentItem()
if item == None:
self.draw_ion()
return
self.selected_item = item
row = item.row()
col = item.column()
s = item.text()
l_shift_refline = np.float(self.sp.fieldStrFromLine(self.refline,'l_shift'))
if col == col_wave:
wavelength = np.float(s)
ion = str(self.line_info_table.item(row, col_ion).text())
line_num = str(self.line_info_table.item(row, col_num).text())
max_wave = np.float(self.sp_max_box.text())
min_wave = np.float(self.sp_min_box.text())
if wavelength > min_wave and wavelength < max_wave:
l_shift = np.float(self.line_info_table.item(row, col_lshift).text())
wavelength = wavelength + l_shift + l_shift_refline
r = (self.x_plot_lims[1] - self.x_plot_lims[0])/2
f = 0.05
if (wavelength < self.x_plot_lims[0] + f*r) or (wavelength > self.x_plot_lims[1] - f*r):
if wavelength-r < min_wave:
self.x_plot_lims = (min_wave-r*f, min_wave-r*f+2*r)
elif wavelength+r > max_wave:
self.x_plot_lims = (max_wave+r*f-2*r , max_wave+r*f)
else:
self.x_plot_lims = (wavelength-r,wavelength+r)
if not self.axes_fixed:
self.update_lim_boxes()
self.restore_axes()
self.plot_tick_at(wavelength, ion, line_num)
elif wavelength == 1:
if str(self.line_info_table.item(row, col_ref).text()) == '0000000000000':
satellites = self.satellites
else:
satellites = self.sp.read_satellites(self.sp.phyat_file, int(line_num))
satellites = add_satellites_of_subreferences(satellites)
SelectedSatellites = []
max_wave = np.float(self.sp_max_box.text())
min_wave = np.float(self.sp_min_box.text())
for i in range(0, len(satellites)):
wavelength = np.float(self.sp.fieldStrFromLine(satellites[i],'lambda'))
if (wavelength > min_wave) and (wavelength < max_wave):
SelectedSatellites.append(satellites[i])
satellites = SelectedSatellites
self.plot_line_ticks_for(satellites, ion, line_num, self.refline)
def isRefLine(line):
s = self.sp.fieldStrFromLine(line,'ref').strip()
if s == '0000000000000':
return True
else:
return False
def isSubRefLine(line):
wavelength = np.float(self.sp.fieldStrFromLine(line,'lambda'))
if not isRefLine(line) and (wavelength < 2.0):
return True
else:
return False
def fill_data(i, line, cat=''):
if line == None:
return
editableCols = []
if self.sp.get_conf('qt_allow_editing_lines', False):
if cat == 'sat':
if do_cosmetics:
editableCols = ['l_shift', 'i_cor', 'profile', 'vitesse', 'comment']
else:
editableCols = []
elif cat == 'subref':
if do_cosmetics:
editableCols = ['i_cor', 'comment']
else:
editableCols = []
elif cat == 'ref':
editableCols = ['l_shift', 'i_cor', 'i_rel', 'profile', 'vitesse', 'comment']
for j in range(0,len(fieldItems)):
s = self.sp.fieldStrFromLine(line, fieldItems[j])
s = s.strip()
if j == col_ion:
if self.show_true_ions:
s = self.sp.true_ion(s).replace('_',' ').strip()
isPseudoIon = self.sp.isPseudoIon(s)
if j == fieldItems.index('proc'):
if isRefLine(line):
s = ''
elif isPseudoIon:
s = ''
else:
s = self.sp.process[s]
item = QtGui.QTableWidgetItem(s)
if fieldItems[j] in editableCols:
item.setBackgroundColor(self.editableCells_bg_color)
else:
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.line_info_table.setItem(i,j,item)
def fill_text(i, text):
item = QtGui.QTableWidgetItem(text)
item.setFlags(item.flags() ^ (QtCore.Qt.ItemIsEditable|QtCore.Qt.ItemIsSelectable|QtCore.Qt.ItemIsEnabled))
item.setBackgroundColor(self.readOnlyCells_bg_color)
item.setTextAlignment(QtCore.Qt.AlignBottom)
item.setTextColor(QtCore.Qt.blue)
self.line_info_table.setItem(i,0,item)
self.line_info_table.setSpan(i,0,2,len(fieldItems))
def add_satellites_of_subreferences(satellites):
subref_list = []
all_satellites = satellites
for sat_line in satellites:
if isSubRefLine(sat_line):
subref_list.append(sat_line)
i = 0
while i < len(subref_list):
sat_line_num = self.sp.fieldStrFromLine(subref_list[i],'num')
new_satellites = self.sp.read_satellites(self.sp.phyat_file, int(sat_line_num))
for line in new_satellites:
if isSubRefLine(line):
subref_list.append(line)
i += 1
for line in new_satellites:
if not line in all_satellites:
all_satellites.append(line)
return all_satellites
def get_info(line_num):
line = None
refline = None
subrefline = None
LineList = []
if int(line_num) == 0:
return
while refline == None:
refline = self.sp.read_line(self.sp.fic_model, int(line_num))
if refline is None:
if do_cosmetics:
curr_line = self.sp.read_line(self.sp.fic_cosmetik, int(line_num))
else:
curr_line = None
if self.sp.cosmetic_line_ok(curr_line) is not True:
curr_line = None
if curr_line == None:
curr_line = self.sp.read_line(self.sp.phyat_file, int(line_num))
LineList.append(curr_line)
line_num = self.sp.fieldStrFromLine(curr_line,'ref')
if len(LineList) > 0:
if isSubRefLine(LineList[0]):
subrefline = LineList[:1]
else:
line = LineList[0]
if len(LineList) > 1:
subrefline = LineList[1:]
if subrefline is not None:
n_subref = len(subrefline)
else:
n_subref = 0
subsatellites = []
for k in range(0, n_subref):
subsat = []
subrefline_num = self.sp.fieldStrFromLine(subrefline[k], 'num')
subsat = self.sp.read_satellites(self.sp.phyat_file, int(subrefline_num))
n_subsat = len(subsat)
if do_cosmetics:
for i in range(0,n_subsat):
sat_line = subsat[i]
sat_line_num = int(self.sp.fieldStrFromLine(sat_line,'num'))
cosmetic_line = self.sp.read_line(self.sp.fic_cosmetik, sat_line_num)
if cosmetic_line is not None:
subsat[i] = cosmetic_line
subsatellites = subsatellites + subsat
subsatellites = add_satellites_of_subreferences(subsatellites)
n_subsat = len(subsatellites)
if refline is not None:
refline_num = self.sp.fieldStrFromLine(refline,'num')
satellites = self.sp.read_satellites(self.sp.phyat_file, int(refline_num))
satellites = add_satellites_of_subreferences(satellites)
n_sat = len(satellites)
if do_cosmetics:
for i in range(0,n_sat):
sat_line = satellites[i]
sat_line_num = int(self.sp.fieldStrFromLine(sat_line,'num'))
cosmetic_line = self.sp.read_line(self.sp.fic_cosmetik, sat_line_num)
if cosmetic_line is not None:
satellites[i] = cosmetic_line
else:
n_sat = 0
if line is None and refline is None:
title = 'Error in line info dialog'
msg = 'Line number not found.'
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
self.line = line
self.subrefline = subrefline
self.refline = refline
self.subsatellites = subsatellites
self.satellites = satellites
self.n_sat = n_sat
self.n_subsat = n_subsat
self.n_subref = n_subref
def do_sort(lines):
waves = []
for i in range(0,len(lines)):
waves.append(self.sp.fieldStrFromLine(lines[i], 'lambda'))
lines = [x for _,x in sorted(zip(waves,lines))]
return lines
def fill_line_info_table():
self.line_info_table.blockSignals(True)
line = self.line
subrefline = self.subrefline
refline = self.refline
subsatellites = self.subsatellites
satellites = self.satellites
n_sat = self.n_sat
n_subsat = self.n_subsat
n_subref = self.n_subref
SelectedSatellites = []
SelectedSubSatellites = []
if self.show_satellites == 0:
n_sat = 0
n_subsat = 0
else:
max_wave = np.float(self.sp_max_box.text())
min_wave = np.float(self.sp_min_box.text())
for i in range(0, len(satellites)):
wavelength = np.float(self.sp.fieldStrFromLine(satellites[i],'lambda'))
if self.show_satellites == 2 or \
(self.show_satellites == 1 and (wavelength > min_wave) and (wavelength < max_wave)):
SelectedSatellites.append(satellites[i])
for i in range(0, len(subsatellites)):
wavelength = np.float(self.sp.fieldStrFromLine(subsatellites[i],'lambda'))
if self.show_satellites == 2 or \
(self.show_satellites == 1 and (wavelength > min_wave) and (wavelength < max_wave)):
SelectedSubSatellites.append(subsatellites[i])
n_sat = len(SelectedSatellites)
n_subsat = len(SelectedSubSatellites)
self.line_info_table.clearContents()
self.line_info_table.setRowCount(n_sat+n_subsat+20)
self.line_info_table.clearSpans()
k = 0
sat_list = []
if line is not None:
fill_text(k,'Line:')
k += 2
fill_data(k, line, 'sat')
k += 1
if subrefline is not None:
fill_text(k,'Subreference line:')
k += 2
for i in range(0,n_subref):
fill_data(k, subrefline[i], 'subref')
k += 1
if n_subsat > 0:
SelectedSubSatellites = do_sort(SelectedSubSatellites)
fill_text(k, str(n_subsat) + ' satellites:')
sat_list.append([k,n_subsat])
k += 2
for i in range(0,n_subsat):
if isSubRefLine(SelectedSubSatellites[i]):
fill_data(k+i, SelectedSubSatellites[i], 'subref')
else:
fill_data(k+i, SelectedSubSatellites[i], 'sat')
k += n_subsat
fill_text(k,'Reference line:')
k += 2
fill_data(k, refline, 'ref')
k += 1
if n_sat > 0:
SelectedSatellites = do_sort(SelectedSatellites)
fill_text(k, str(n_sat) + ' satellites:')
sat_list.append([k,n_sat])
k += 2
for i in range(0,n_sat):
if isSubRefLine(SelectedSatellites[i]):
fill_data(k+i, SelectedSatellites[i], 'subref')
else:
fill_data(k+i, SelectedSatellites[i], 'sat')
k += n_sat
self.line_info_table.setRowCount(k)
self.line_info_table.resizeColumnsToContents()
self.line_info_table.resizeRowsToContents()
self.line_info_table.blockSignals(False)
self.line_info_table.blockSignals(True)
if self.show_satellites == 1:
s0 = ' (in the synthesis range)'
elif self.show_satellites == 2:
s0 = ' (in the entire database and including subreferences)'
else:
s0 = ''
for i in sat_list:
k = i[0]
n = i[1]
fill_text(k, str(n) + ' satellites:' + s0)
self.line_info_table.blockSignals(False)
def on_itemChanged():
self.line_info_table.blockSignals(True)
item = self.line_info_table.currentItem()
if not (item.flags() & QtCore.Qt.ItemIsEditable):
self.line_info_table.blockSignals(False)
return
row = item.row()
col = item.column()
s = str(item.text())
value = self.rightFormat(s, fieldItems[col])
if value != None:
self.line_info_table.setItem(row, col, QtGui.QTableWidgetItem(value.strip()))
self.line_info_table.item(row, col).setBackgroundColor(self.editableCells_bg_color)
save_change(row,col)
else:
self.line_info_table.item(row, col).setBackgroundColor(QtGui.QColor('red'))
title = 'Invalid format for the ' + self.sp.field_tip[fieldItems[col]]
s0 = self.sp.field_format[fieldItems[col]]
s0 = s0[2:-1]
msg = "'" + s + "' can not be converted into the proper field format: " + s0
if col == self.sp.fields.index('vitesse'):
msg = msg + '\nor it is not a positive number.'
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
get_info(self.curr_line_num)
fill_line_info_table()
self.line_info_table.blockSignals(False)
def get_line_from_table(row):
line = ' '*85
jList = range(0,len(fieldItems))
jList.remove(col_proc)
for j in jList:
s = self.line_info_table.item(row,j).text()
width = self.sp.field_width[fieldItems[j]]
align = self.sp.field_align[fieldItems[j]]
pos = self.sp.field_pos[fieldItems[j]]
s = '{:{a}{w}s}'.format(s, a=align, w=width)
line = line[:pos] + s + line[pos:]
line = line.rstrip()
return line
def save_change(row, col):
line = get_line_from_table(row)
if isRefLine(line):
filename = self.sp.fic_model
else:
filename = self.sp.fic_cosmetik
self.sp.replace_line(filename, line)
if col != self.sp.fields.index('comment') and \
self.sp.get_conf('qt_update_after_editing_lines', False):
self.adjust()
self.nearbyLines = self.sp.get_nearby_lines(self.cursor_w1, self.cursor_w2, do_print=False)
if self.nearbyLines is not None and self.nearbyLines_dialog.isVisible():
self.fill_nearbyLines_table()
def init_lines():
self.line = None
self.subrefline = None
self.refline = None
self.subsatellites = []
self.satellites = []
self.n_sat = 0
self.n_subsat = 0
self.n_subref = 0
statusBar = QtGui.QStatusBar()
s = 'Click on \"Satellites\" to cycle the tri-state display of satellite lines:\n' \
' 1 - The satellite lines in the spectral range of the synthesis are shown; \n' \
' 2 - All satellite lines (including subreference lines and lines outside the spectral range of the synthesis) are shown. \n' \
' 3 - No satellite line is shown; \n' \
'Double-click on a line number to show the data for that line. \n' \
'Double-click on an ion to plot line ticks and spectrum for that single ion. \n' \
'Select or click on a wavelength to draw a tick at that position and recenter the spectrum if necessary. \n' \
'Click on \"Reset\" to return to the original line and plot settings. \n' \
'The green fields are editable.'
statusBar.addWidget(QtGui.QLabel(s),1)
self.showStatusBar = False
statusBar.setVisible(self.showStatusBar)
self.show_satellites = 1
get_window_size_and_position()
if self.line_info_dialog is not None:
self.line_info_dialog.close()
self.line_info_table.close()
self.line_info_dialog = QtGui.QDialog()
self.line_info_dialog.resize(self.line_info_dialog_width,self.line_info_dialog_height)
self.line_info_dialog.move(self.line_info_dialog_x,self.line_info_dialog_y)
self.line_info_table = QtGui.QTableWidget()
fieldItems = self.sp.fields
fieldNames = [ self.sp.field_abbr[item] for item in fieldItems ]
col_num = fieldItems.index('num')
col_ion = fieldItems.index('id')
col_wave = fieldItems.index('lambda')
col_proc = fieldItems.index('proc')
col_lshift = fieldItems.index('l_shift')
col_irel = fieldItems.index('i_rel')
col_icor = fieldItems.index('i_cor')
col_ref = fieldItems.index('ref')
col_prof = fieldItems.index('profile')
col_vel = fieldItems.index('vitesse')
col_comm = fieldItems.index('comment')
self.line_info_table.setColumnCount(len(fieldItems))
self.line_info_table.setHorizontalHeaderLabels(fieldNames)
if self.enable_tooltips_action.isChecked():
for j in range(0,len(fieldItems)):
self.line_info_table.horizontalHeaderItem(j).setToolTip(self.sp.field_tip[fieldItems[j]])
self.line_info_table.horizontalHeaderItem(col_vel).setText(u'\u0394v (factor)')
if self.enable_tooltips_action.isChecked():
s = 'For a reference line, it is the thermal broadening parameter, in km/s. \n' \
'For satellite line, it is the dimensionless correction factor for the thermal broadening parameter with respect to the reference line.'
self.line_info_table.horizontalHeaderItem(col_vel).setToolTip(s)
self.line_info_table.horizontalHeaderItem(col_comm).setTextAlignment(QtCore.Qt.AlignLeft)
self.line_info_table.horizontalHeaderItem(col_comm).setText(' comment')
init_lines()
do_cosmetics = self.sp.get_conf('do_cosmetik')
save_initial_plot_pars()
self.curr_line_num = self.line_info_box.text()
get_info(self.curr_line_num)
fill_line_info_table()
self.buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.Close|
QtGui.QDialogButtonBox.Reset|
QtGui.QDialogButtonBox.Apply)
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).setText("Satellites")
if self.enable_tooltips_action.isChecked():
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).setToolTip("Click to toggle the satellite lines")
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(toggle_show_satellites)
s = "Click to return to the initial states of the line info dialog and figures"
if self.enable_tooltips_action.isChecked():
self.buttonBox.button(QtGui.QDialogButtonBox.Reset).setToolTip(s)
self.buttonBox.button(QtGui.QDialogButtonBox.Reset).clicked.connect(do_reset)
self.buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_statusbar)
self.buttonBox.rejected.connect(self.line_info_dialog.close)
self.line_info_table.doubleClicked.connect(on_doubleClick)
self.line_info_table.itemChanged.connect(on_itemChanged)
self.selected_item = None
self.line_info_table.itemSelectionChanged.connect(on_itemSelectionChanged)
self.line_info_table.itemClicked.connect(on_itemClicked)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.line_info_table)
vbox.addWidget(self.buttonBox)
vbox.addWidget(statusBar)
self.line_info_dialog.setLayout(vbox)
self.line_info_dialog.setWindowTitle('line info dialog')
self.line_info_dialog.setWindowModality(QtCore.Qt.NonModal)
self.line_info_dialog.show()
def fill_nearbyLines_table(self):
if self.nearbyLines is None or self.nearbyLines_table is None:
return
k = self.sp.get_conf('diff_lines_by')
fieldItems = self.sp.fields
jList = range(0,len(fieldItems))
jProc = fieldItems.index('proc')
jList.remove(jProc)
if self.nearbyDialogFilterIsActive:
#selected_ions = self.sp.get_conf('selected_ions')
selected_ions = self.nearbyLines_selected_ions
selected_true_ions = [self.sp.true_ion(ion) for ion in selected_ions]
nearbyLines = []
for line in self.nearbyLines:
ion = str(line[fieldItems.index('id')]).strip()
true_ion = self.sp.true_ion(ion)
selectThisIon = (( ion in selected_ions or true_ion in selected_ions ) and k == 1) or (true_ion in selected_true_ions and k != 1)
if selectThisIon:
nearbyLines.append(line)
else:
nearbyLines = self.nearbyLines
self.nearbyLines_table.setRowCount(len(nearbyLines))
for i in range(0,len(nearbyLines)):
ion = self.sp.true_ion(nearbyLines[i][fieldItems.index('id')])
for j in jList:
if j > jProc:
k = j - 1
else:
k = j
fmt = self.sp.field_format[fieldItems[j]]
s = fmt.format(nearbyLines[i][k])
s = str(s).strip()
if j == fieldItems.index('num'):
if self.sp.isPseudoIon(ion):
proc_str = ''
else:
proc_str = self.sp.process[s[-9]]
if j == fieldItems.index('id'):
if self.show_true_ions:
s = self.sp.true_ion(s).replace('_',' ').strip()
item = QtGui.QTableWidgetItem(s)
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.nearbyLines_table.setItem(i,j,item)
item = QtGui.QTableWidgetItem(proc_str)
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.nearbyLines_table.setItem(i,jProc,item)
self.nearbyLines_table.resizeColumnsToContents()
self.nearbyLines_table.resizeRowsToContents()
self.nearbyLines_table.clearSelection()
def show_nearbyLines_dialog(self):
def get_window_size_and_position():
if self.nearbyLines_dialog is None:
font = QtGui.QFont()
width = QtGui.QFontMetrics(font).width('='*120)
self.nearbyLines_dialog_width = width
self.nearbyLines_dialog_height = 470
sG = QtGui.QApplication.desktop().screenGeometry()
self.nearbyLines_dialog_x = sG.width()-self.nearbyLines_dialog_width
self.nearbyLines_dialog_y = sG.height()-self.nearbyLines_dialog_height
else:
self.nearbyLines_dialog_width = self.nearbyLines_dialog.width()
self.nearbyLines_dialog_height = self.nearbyLines_dialog.height()
self.nearbyLines_dialog_x = self.nearbyLines_dialog.pos().x()
self.nearbyLines_dialog_y = self.nearbyLines_dialog.pos().y()
def do_reset():
self.curr_line_num = self.init_nearby_line_num
#get_info(self.curr_line_num)
#fill_line_info_table()
self.nearbyDialogFilterIsActive = True
#self.nearbyLines_selected_ions = []
toggle_filter()
redo_initial_plot()
def toggle_filter():
self.nearbyLines_selected_ions = []
if not self.nearbyDialogFilterIsActive:
get_selected_ions()
if len(self.nearbyLines_selected_ions) > 0:
self.nearbyDialogFilterIsActive = True
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setStyleSheet('background-color:red;')
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setText('Deactivate ion filter')
else:
QtGui.QMessageBox.critical(self, 'nearby lines dialog: ion filter', 'No ion selected.', QtGui.QMessageBox.Ok )
else:
self.nearbyDialogFilterIsActive = False
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setStyleSheet('')
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setText('Filter selected ions')
self.fill_nearbyLines_table()
def save_initial_plot_pars():
self.init_nearby_line_num = self.line_info_box.text()
self.init_nearby_ion = self.ion_box.text()
self.init_nearby_xmin = self.xlim_min_box.text()
self.init_nearby_xmax = self.xlim_max_box.text()
self.init_nearby_y1min = self.y1lim_min_box.text()
self.init_nearby_y1max = self.y1lim_max_box.text()
self.init_nearby_y3min = self.y3lim_min_box.text()
self.init_nearby_y3max = self.y3lim_max_box.text()
self.init_nearby_legend_fontsize = self.sp.legend_fontsize
self.init_nearby_legend_loc = self.sp.legend_loc
def redo_initial_plot():
#self.line_info_box.setText(self.init_line_num)
self.ion_box.setText(self.init_nearby_ion)
self.xlim_min_box.setText(self.init_nearby_xmin)
self.xlim_max_box.setText(self.init_nearby_xmax)
self.y1lim_min_box.setText(self.init_nearby_y1min)
self.y1lim_max_box.setText(self.init_nearby_y1max)
self.y3lim_min_box.setText(self.init_nearby_y3min)
self.y3lim_max_box.setText(self.init_nearby_y3max)
self.sp.legend_fontsize = self.init_nearby_legend_fontsize
self.sp.legend_loc = self.init_nearby_legend_loc
self.set_plot_limits_and_draw()
def toggle_statusbar():
self.showStatusBar = not self.showStatusBar
statusBar.setVisible(self.showStatusBar)
def on_doubleClick():
item = self.nearbyLines_table.currentItem()
row = item.row()
col = item.column()
if col in [col_num, col_ref]:
self.line_info_box.setText(item.text())
self.show_line_info_dialog()
elif col == col_ion:
self.ion_box.setText(item.text())
self.draw_ion()
def on_itemClicked():
# to avoid blinking with itemSelectionChanged
item = self.nearbyLines_table.currentItem()
if item == self.selected_item:
on_itemSelectionChanged()
def on_itemSelectionChanged():
item = self.nearbyLines_table.currentItem()
self.selected_item = item
row = item.row()
col = item.column()
if col == col_wave:
wavelength = np.float(item.text())
l_shift = np.float(self.nearbyLines_table.item(row,col_lshift).text())
wavelength = wavelength + l_shift
line_num = str(self.nearbyLines_table.item(row,col_num).text())
ion = str(self.nearbyLines_table.item(row,col_ion).text())
max_wave = np.float(self.sp_max_box.text())
min_wave = np.float(self.sp_min_box.text())
r = (self.x_plot_lims[1] - self.x_plot_lims[0])/2
f = 0.05
if (wavelength < self.x_plot_lims[0] + f*r) or (wavelength > self.x_plot_lims[1] - f*r):
if wavelength-r < min_wave:
self.x_plot_lims = (min_wave-r*f, min_wave-r*f+2*r)
elif wavelength+r > max_wave:
self.x_plot_lims = (max_wave+r*f-2*r , max_wave+r*f)
else:
self.x_plot_lims = (wavelength-r,wavelength+r)
if not self.axes_fixed:
self.update_lim_boxes()
self.restore_axes()
self.plot_tick_at(wavelength, ion, line_num)
else:
if self.green_tick_shown:
self.on_draw()
self.green_tick_shown = False
def do_header_clicked(col):
if col == col_ion:
self.toggle_show_true_ions()
self.fill_nearbyLines_table()
def do_header_doubleClicked(col):
sort = fieldItems[col]
if sort == self.nearbyLines_sort_by:
self.nearbyLines_sort_reverse = not self.nearbyLines_sort_reverse
else:
self.nearbyLines_sort_reverse = False
self.nearbyLines_sort_by = sort
self.sort_nearbyLines(sort, self.nearbyLines_sort_reverse)
self.fill_nearbyLines_table()
def get_selected_ions():
selectedItems = self.nearbyLines_table.selectedItems()
selected_ions = []
for item in selectedItems:
col = item.column()
if col == col_ion:
ion = str(item.text())
if not ion in selected_ions:
selected_ions.append(ion)
if len(selected_ions) > 0:
self.nearbyLines_selected_ions = selected_ions
else:
#self.nearbyLines_selected_ions = self.sp.get_conf('selected_ions')
self.nearbyLines_selected_ions = []
def do_selection():
selectedItems = self.nearbyLines_table.selectedItems()
selected_ions = []
selected_lines = []
for item in selectedItems:
col = item.column()
if col == col_ion:
ion = str(item.text())
if not ion in selected_ions:
selected_ions.append(ion)
if col in [col_num, col_ref]:
line = item.text()
selected_lines.append(line)
if len(selected_ions) > 0:
s = ''
for ion in selected_ions:
s = s + ion + ', '
s = s[:-2]
self.ion_box.setText(s)
self.draw_ion()
if len(selected_lines) > 0:
s = selected_lines[0]
self.line_info_box.setText(s)
self.line_info()
get_window_size_and_position()
self.nearbyLines_dialog = QtGui.QDialog()
self.nearbyLines_dialog.resize(self.nearbyLines_dialog_width, self.nearbyLines_dialog_height)
self.nearbyLines_dialog.move(self.nearbyLines_dialog_x,self.nearbyLines_dialog_y)
statusBar = QtGui.QStatusBar()
s = 'Double-click on a line number (or select the line number and press \"Apply\") to show line info dialog. \n' \
'Double-click on an ion to plot line ticks and spectrum for that single ion. \n' \
'Click or select a wavelength to draw a tick at that position. \n' \
'Select multiple ions (using click, Shift+click, and Ctrl+click) and press \"Plot selected ions\" plot line ticks and spectra for a list of ions. \n' \
'Click on the ion header to select all ions. \n' \
'Double-click on a column header to sort the table; Double-click again to toggle between ascending and descending order. \n' \
'Click on \"Reset\" to return to the original selected ions and plot settings. \n' \
'Click on \"Filter selected ions\" to activate/deactivate ion selection.'
statusBar.addWidget(QtGui.QLabel(s),1)
self.showStatusBar = False
statusBar.setVisible(self.showStatusBar)
self.nearbyLines_table = QtGui.QTableWidget()
self.nearbyLines_table.setRowCount(len(self.nearbyLines))
fieldItems = self.sp.fields
fieldNames = [ self.sp.field_abbr[item] for item in fieldItems ]
col_num = fieldItems.index('num')
col_ion = fieldItems.index('id')
col_wave = fieldItems.index('lambda')
col_proc = fieldItems.index('proc')
col_lshift = fieldItems.index('l_shift')
col_irel = fieldItems.index('i_rel')
col_icor = fieldItems.index('i_cor')
col_ref = fieldItems.index('ref')
col_prof = fieldItems.index('profile')
col_vel = fieldItems.index('vitesse')
col_comm = fieldItems.index('comment')
self.nearbyLines_table.setColumnCount(len(fieldNames))
self.nearbyLines_table.setHorizontalHeaderLabels(fieldNames)
if self.enable_tooltips_action.isChecked():
for j in range(0,len(fieldItems)):
self.nearbyLines_table.horizontalHeaderItem(j).setToolTip(self.sp.field_tip[fieldItems[j]])
self.nearbyLines_table.horizontalHeaderItem(col_comm).setTextAlignment(QtCore.Qt.AlignLeft)
self.nearbyLines_table.horizontalHeaderItem(col_vel).setText(u'\u0394v')
if self.enable_tooltips_action.isChecked():
s = u'\u0394v is the thermal broadening parameter of the line, in km/s. \n' \
'For a single Gaussian profile, it is the half-width of the line at the level of 1/e of the peak, \n' \
'related to the full-width at half maximum and the Gaussian standard deviation by:\n\n' \
u' \u0394v = FWHM/(2(ln2)^\u00BD) = FWHM/1.665\n' \
u' \u0394v = \u221A2 \u03C3\n'
self.nearbyLines_table.horizontalHeaderItem(col_vel).setToolTip(s)
self.nearbyLines_table.horizontalHeaderItem(col_comm).setText(' comment')
#self.nearbyDialogFilterIsActive = False
self.fill_nearbyLines_table()
save_initial_plot_pars()
self.buttonBox_nearbyLines = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.Reset|
QtGui.QDialogButtonBox.RestoreDefaults|
QtGui.QDialogButtonBox.Apply|
QtGui.QDialogButtonBox.Close)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setText('Filter selected ions')
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.Apply).setText('Plot selected ions')
self.buttonBox_nearbyLines.rejected.connect(self.nearbyLines_dialog.close)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.Apply).clicked.connect(do_selection)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_statusbar)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.Reset).clicked.connect(do_reset)
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).clicked.connect(toggle_filter)
self.nearbyLines_table.doubleClicked.connect(on_doubleClick)
self.nearbyLines_table.itemSelectionChanged.connect(on_itemSelectionChanged)
self.nearbyLines_table.itemClicked.connect(on_itemClicked)
self.nearbyLines_table.verticalHeader().sectionDoubleClicked.connect(do_selection)
#self.nearbyLines_table.horizontalHeader().sectionClicked.connect(do_header_clicked)
self.nearbyLines_table.horizontalHeader().sectionDoubleClicked.connect(do_header_doubleClicked)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.nearbyLines_table)
vbox.addWidget(self.buttonBox_nearbyLines)
vbox.addWidget(statusBar)
self.nearbyLines_dialog.setLayout(vbox)
s = 'nearby line dialog: list of lines between {0:.2f} and {1:.2f} angstroms'.format(self.sp.cursor_w1, self.sp.cursor_w2)
self.nearbyLines_dialog.setWindowTitle(s)
self.nearbyLines_dialog.setWindowModality(QtCore.Qt.NonModal)
self.cursor_w1 = self.sp.cursor_w1
self.cursor_w2 = self.sp.cursor_w2
if self.nearbyDialogFilterIsActive:
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setStyleSheet('background-color:red;')
else:
self.buttonBox_nearbyLines.button(QtGui.QDialogButtonBox.RestoreDefaults).setStyleSheet('')
self.nearbyLines_dialog.show()
def cont_dialog(self):
Pars = [ ( 'cont_unred' , 'Set to True if reddening is to be applied to the continuum' ),
( 'cont_edens' , u'Electron density, in cm\u207B\u00B3' ),
( 'cont_hi_t' , 'Temperature for the H I continuum, in K' ),
( 'cont_hi_i' , u'Intensity of the H I continuum (in theory, intensity of H\u03B2)' ),
( 'cont_hei_t' , 'Temperature for the He I continuum, in K' ),
( 'cont_hei_i' , 'Intensity of the He I continuum (in theory, intensity of He I 4471)' ),
( 'cont_heii_t' , 'Temperature for the He II continuum, in K' ),
( 'cont_heii_i' , 'Intensity of the He II continuum (in theory, intensity of He I 4686)' ),
( 'cont_bb_t' , 'Temperature of the blackbody continuum, in K' ),
( 'cont_bb_i' , 'Intensity of the blackbody continuum' ),
( 'cont_pl_alpha' , u'Index \u03B1 of the power-law continuum F = I*(\u03BB/5000 \u212B)**\u03B1' ),
( 'cont_pl_i' , 'Intensity I of the power-law continuum' ),
( 'cont_user_table' , 'Interpolation table for the user-defined continuum' ),
( 'cont_user_func' , 'Interpolation function for the user-defined continuum' ) ]
def toggle_statusbar():
self.showStatusBar = not self.showStatusBar
statusBar.setVisible(self.showStatusBar)
def get_window_size_and_position():
if self.cont_pars_dialog is None:
self.cont_pars_dialog_width = 800
self.cont_pars_dialog_height = 460
sG = QtGui.QApplication.desktop().screenGeometry()
self.cont_pars_dialog_x = sG.width()-self.cont_pars_dialog_width
self.cont_pars_dialog_y = sG.height()-self.cont_pars_dialog_height
self.cont_pars_dialog_x = 0
self.cont_pars_dialog_y = 0
else:
self.cont_pars_dialog_width = self.cont_pars_dialog.width()
self.cont_pars_dialog_height = self.cont_pars_dialog.height()
self.cont_pars_dialog_x = self.cont_pars_dialog.pos().x()
self.cont_pars_dialog_y = self.cont_pars_dialog.pos().y()
def set_conf_from_table(row):
s = str(self.table.item(row,1).text())
value = self.ConvStrToValidTypes(s)
if value != None:
self.sp.set_conf(Pars[row][0], value)
self.table.setItem(row, 1, QtGui.QTableWidgetItem(str(value)))
else:
self.table.setItem(row, 1, QtGui.QTableWidgetItem('Error in ' + s))
def on_itemChanged():
self.table.blockSignals(True)
item = self.table.currentItem()
row = item.row()
s = str(item.text())
value = self.ConvStrToValidTypes(s)
if value != None:
self.sp.set_conf(Pars[row][0], value)
#if isinstance(value, basestring):
# value = '\'{}\''.format(value)
self.table.setItem(row, 1, QtGui.QTableWidgetItem(str(value)))
self.table.item(row, 1).setBackgroundColor(self.editableCells_bg_color)
self.cont_par_changed = True
else:
self.table.setItem(row, 1, QtGui.QTableWidgetItem('Error in ' + s))
self.table.item(row, 1).setBackgroundColor(QtGui.QColor('red'))
self.table.blockSignals(False)
get_window_size_and_position()
self.cont_pars_dialog = QtGui.QDialog()
self.cont_pars_dialog.resize(self.cont_pars_dialog_width, self.cont_pars_dialog_height)
self.cont_pars_dialog.move(self.cont_pars_dialog_x, self.cont_pars_dialog_y)
statusBar = QtGui.QStatusBar()
s = 'Click on \"Save\" to write the continuum parameters to a file. \n' \
'Click on \"Update\" to adjust the synthesis to the changes in the continuum parameters. \n' \
'The green fields are editable.'
statusBar.addWidget(QtGui.QLabel(s),1)
self.showStatusBar = False
statusBar.setVisible(self.showStatusBar)
self.table = QtGui.QTableWidget()
self.table.setRowCount(len(Pars))
self.table.setColumnCount(3)
self.table.setHorizontalHeaderLabels([ 'parameter', 'value', 'help' ])
for j in range(0,len(Pars)):
item = QtGui.QTableWidgetItem(Pars[j][0])
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.table.setItem(j,0,item)
value = self.sp.get_conf(Pars[j][0])
#if isinstance(value, basestring):
# value = '\'{}\''.format(value)
item = QtGui.QTableWidgetItem(str(value))
#item = QtGui.QTableWidgetItem(str(self.sp.get_conf(Pars[j][0])))
item.setBackgroundColor(self.editableCells_bg_color)
self.table.setItem(j,1,item)
item = QtGui.QTableWidgetItem(Pars[j][1])
item.setFlags(item.flags() ^ QtCore.Qt.ItemIsEditable)
item.setBackgroundColor(self.readOnlyCells_bg_color)
self.table.setItem(j,2,item)
self.table.resizeColumnsToContents()
self.table.resizeRowsToContents()
if self.table.columnWidth(1) > 300:
self.table.setColumnWidth(1,300)
self.table.itemChanged.connect(on_itemChanged)
self.buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.Save|
QtGui.QDialogButtonBox.Apply|
QtGui.QDialogButtonBox.Close)
self.buttonBox.button(QtGui.QDialogButtonBox.Help).setDefault(True)
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).setText('Update')
if self.enable_tooltips_action.isChecked():
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).setToolTip('Click to update synthesis with changes in the continuum parameters.')
self.buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(self.adjust)
self.buttonBox.rejected.connect(self.cont_pars_dialog.close)
self.buttonBox.button(QtGui.QDialogButtonBox.Save).clicked.connect(self.save_cont_pars)
self.buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_statusbar)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.table)
vbox.addWidget(self.buttonBox)
vbox.addWidget(statusBar)
self.cont_pars_dialog.setLayout(vbox)
self.cont_pars_dialog.setWindowTitle('Continuum parameters')
self.cont_pars_dialog.show()
def get_line_tick_lim(self, line_tick_pos):
if line_tick_pos == 1:
y1 = 0.43
y2 = 0.57
else:
if line_tick_pos == 2:
y1 = 0.05
y2 = 0.19
else:
y1 = 0.81
y2 = 0.95
return y1, y2
def getTickPosOfSelectedLine(self):
posTick = self.sp.get_conf('line_tick_pos_selectedLine',3)
if posTick not in [0,1,2]:
posOtherTicks = self.sp.get_conf('line_tick_pos')
if posTick == 4:
if posOtherTicks == 2:
posTick = 0
else:
posTick = 2
else:
posTick = posOtherTicks
return posTick
def plot_line_ticks_for(self, satellites, ion, line_num, refline):
k = self.sp.get_conf('line_tick_ax')
if not (k == 1 and self.residual_GroupBox.isChecked()):
k = 0
posTick = self.getTickPosOfSelectedLine()
y1, y2 = self.get_line_tick_lim(posTick)
if len(satellites) > 0:
if ( k == 0 ):
self.sp.plot_line_ticks_for(satellites, ion, line_num, refline, self.axes, y1, y2, self.x_plot_lims[0], self.x_plot_lims[1], self.addGreenTickToLegend)
elif ( k == 1 ):
self.sp.plot_line_ticks_for(satellites, ion, line_num, refline, self.axes3, y1, y2, self.addGreenTickToLegend)
elif ( k == 2 ):
self.sp.plot_line_ticks_for(satellites, ion, line_num, refline, self.axes2, 0.2, 0.8, self.addGreenTickToLegend)
self.green_tick_shown = True
self.canvas.draw()
def on_draw(self, show_legend=True):
log_.debug('Entering on_drawn', calling=self.calling)
if self.sp is None:
log_.debug('Np sp in on_drawn', calling=self.calling)
return
if self.axes is None:
log_.debug('Calling make_axes from on_draw (self.axes is None)', calling=self.calling)
self.call_on_draw=False
self.make_axes()
self.init_axes()
log_.debug('back from make_axes from on_draw', calling=self.calling)
self.call_on_draw=True
if self.do_save:
self.save_axes()
self.axes.cla()
self.sp.plot_ax1(self.axes, show_legend)
k = self.sp.get_conf('line_tick_ax')
if self.show_line_ticks_action.isChecked() and ( k == 0 ):
y1, y2 = self.get_line_tick_lim(self.sp.get_conf('line_tick_pos'))
self.sp.plot_line_ticks(self.axes, y1, y2, self.x_plot_lims[0], self.x_plot_lims[1], show_legend=show_legend)
if self.sp.get_conf('cont_plot', False):
self.sp.plot_conts(self.axes)
if self.residual_GroupBox.isChecked():
self.axes3.cla()
self.sp.plot_ax3(self.axes3, show_legend)
if self.show_line_ticks_action.isChecked() and ( k == 1 ):
y1, y2 = self.get_line_tick_lim(self.sp.get_conf('line_tick_pos'))
self.sp.plot_line_ticks(self.axes3, y1, y2)
if self.show_line_ticks_action.isChecked() and ( k == 2 ):
self.axes2.cla()
# self.sp.plot_ax2(self.axes2)
self.sp.plot_line_ticks(self.axes2, 0.2, 0.8)
if self.residual_GroupBox.isChecked():
self.axes3.set_xlabel(r'Wavelength ($\AA$)')
self.axes3.set_ylabel(r'Residual')
#elif self.show_line_ticks_action.isChecked() and self.sp.get_conf(') and self.axes2 is not None:
elif self.show_line_ticks_action.isChecked() and ( k == 2 ):
self.axes2.set_xlabel(r'Wavelength ($\AA$)')
else:
self.axes.set_xlabel(r'Wavelength ($\AA$)')
self.axes.set_ylabel(r'F$_\lambda$')
self.restore_axes()
# self.update_lim_boxes()
if self.adjust_fig_action.isChecked():
plt.tight_layout(0.1)
self.canvas.draw()
self.statusBar().showMessage('Redraw is finished.', 4000)
log_.debug('Exit on_drawn', calling=self.calling)
self.magenta_tick_shown = False
def show_lines_clicked(self):
if self.lineIDs_GroupBox.isChecked():
self.show_line_ticks_action.setChecked(True)
self.plot_lines_action.setChecked(True)
self.sp.set_conf('plot_lines_of_selected_ions', True)
self.set_ion()
else:
self.show_line_ticks_action.setChecked(False)
self.plot_lines_action.setChecked(False)
self.sp.set_conf('plot_lines_of_selected_ions', False)
self.make_axes()
def line_tick_color_clicked(self):
color = QtGui.QColorDialog.getColor()
self.sp.set_conf('line_tick_color', str(color.name()))
if self.show_line_ticks_action.isChecked():
self.make_axes()
def toggle_show_true_ions(self):
self.show_true_ions = not self.show_true_ions
def toggle_legend_clicked(self):
fontsize_list = ['small', 'medium', 'large']
i = fontsize_list.index(self.sp.legend_fontsize) + 1
if i == len(fontsize_list):
self.sp.legend_fontsize = fontsize_list[0]
self.sp.legend_loc = (self.sp.legend_loc)%2+1
else:
self.sp.legend_fontsize = fontsize_list[i]
self.make_axes()
def enable_tooltips_action_clicked(self):
if self.enable_tooltips_action.isChecked():
self.enableToolTips()
self.sp.set_conf('qt_enable_tooltips', True)
log_.debug('Tooltips enabled', calling=self.calling)
else:
self.disableToolTips()
self.sp.set_conf('qt_enable_tooltips', False)
log_.debug('Tooltips disabled', calling=self.calling)
def adjust_fig_action_clicked(self):
if self.adjust_fig_action.isChecked():
self.sp.set_conf('fig_adjust', True)
log_.debug('Adjust figure enabled', calling=self.calling)
else:
self.fig.subplots_adjust(hspace=self.sp.get_conf('fig_hspace'),
bottom=self.sp.get_conf('fig_bottom'),
right=self.sp.get_conf('fig_right'),
top=self.sp.get_conf('fig_top'),
left=self.sp.get_conf('fig_left'))
log_.debug('Adjust figure disabled', calling=self.calling)
self.draw_ion()
def show_uncor_obs_action_clicked(self):
if self.show_uncor_obs_action.isChecked():
self.sp.show_uncor_spec = True
else:
self.sp.show_uncor_spec = False
self.set_plot_limits_and_draw()
def disableToolTips(self):
self.lineIDs_GroupBox.setToolTip('')
self.residual_GroupBox.setToolTip('')
self.run_button.setToolTip('')
self.adjust_button.setToolTip('')
self.line_info_box.setToolTip('')
self.ebv_box.setToolTip('')
self.obj_velo_box.setToolTip('')
self.sp_min_box.setToolTip('')
self.sp_max_box.setToolTip('')
self.xlim_min_box.setToolTip('')
self.xlim_max_box.setToolTip('')
self.y1lim_min_box.setToolTip('')
self.y1lim_max_box.setToolTip('')
self.y3lim_min_box.setToolTip('')
self.y3lim_max_box.setToolTip('')
self.fix_axes_cb.setToolTip('')
self.cut_cb.setToolTip('')
self.ion_cb.setToolTip('')
self.sp_norm_box.setToolTip('')
self.resol_box.setToolTip('')
self.cut2_box.setToolTip('')
self.ion_box.setToolTip('')
self.line_sort_menu.setToolTip('')
self.line_field_menu.setToolTip('')
self.line_tick_ax_menu.setToolTip('')
self.line_tick_pos_menu.setToolTip('')
self.diff_lines_menu.setToolTip('')
self.verbosity_menu.setToolTip('')
self.style_menu.setToolTip('')
def enableToolTips(self):
self.lineIDs_GroupBox.setToolTip(self.lineIDs_GroupBox_ToolTip)
self.residual_GroupBox.setToolTip(self.residual_GroupBox_ToolTip)
self.run_button.setToolTip(self.run_button_ToolTip)
self.adjust_button.setToolTip(self.adjust_button_ToolTip)
self.line_info_box.setToolTip(self.line_info_box_ToolTip)
self.ebv_box.setToolTip(self.ebv_box_ToolTip)
self.obj_velo_box.setToolTip(self.obj_velo_box_ToolTip)
self.sp_min_box.setToolTip(self.sp_min_box_ToolTip)
self.sp_max_box.setToolTip(self.sp_max_box_ToolTip)
self.xlim_min_box.setToolTip(self.xlim_min_box_ToolTip)
self.xlim_max_box.setToolTip(self.xlim_max_box_ToolTip)
self.y1lim_min_box.setToolTip(self.y1lim_min_box_ToolTip)
self.y1lim_max_box.setToolTip(self.y1lim_max_box_ToolTip)
self.y3lim_min_box.setToolTip(self.y3lim_min_box_ToolTip)
self.y3lim_max_box.setToolTip(self.y3lim_max_box_ToolTip)
self.fix_axes_cb.setToolTip(self.fix_axes_cb_ToolTip)
self.cut_cb.setToolTip(self.cut_cb_ToolTip)
self.ion_cb.setToolTip(self.ion_cb_ToolTip)
self.sp_norm_box.setToolTip(self.sp_norm_box_ToolTip)
self.resol_box.setToolTip(self.resol_box_ToolTip)
self.cut2_box.setToolTip(self.cut2_box_ToolTip)
self.ion_box.setToolTip(self.ion_box_ToolTip)
self.line_sort_menu.setToolTip(self.line_sort_menu_ToolTip)
self.line_field_menu.setToolTip(self.line_field_menu_ToolTip)
self.line_tick_ax_menu.setToolTip(self.line_tick_ax_menu_ToolTip)
self.line_tick_pos_menu.setToolTip(self.line_tick_pos_menu_ToolTip)
self.diff_lines_menu.setToolTip(self.diff_lines_menu_ToolTip)
self.verbosity_menu.setToolTip(self.verbosity_menu_ToolTip)
self.style_menu.setToolTip(self.style_menu_ToolTip)
def show_line_ticks_action_clicked(self):
self.set_ion()
if self.plot_lines_action.isChecked():
self.sp.set_conf('plot_lines_of_selected_ions', True)
else:
self.sp.set_conf('plot_lines_of_selected_ions', False)
if self.show_line_ticks_action.isChecked() or self.plot_lines_action.isChecked():
self.lineIDs_GroupBox.setChecked(True)
else:
self.lineIDs_GroupBox.setChecked(False)
self.make_axes()
def plot_cont_action_clicked(self):
if self.plot_cont_action.isChecked():
self.sp.set_conf('cont_plot', True)
else:
self.sp.set_conf('cont_plot', False)
self.on_draw()
def ion_cb_changed(self):
if self.ion_cb.isChecked():
self.sp.set_conf('show_selected_ions_only', True)
self.selected_ions_action.setChecked(True)
else:
self.sp.set_conf('show_selected_ions_only', False)
self.selected_ions_action.setChecked(False)
self.make_axes()
def cut_cb_changed(self):
if self.cut_cb.isChecked():
self.sp.set_conf('show_selected_intensities_only', True)
self.selected_intensities_action.setChecked(True)
else:
self.sp.set_conf('show_selected_intensities_only', False)
self.selected_intensities_action.setChecked(False)
self.make_axes()
def selected_lines_clicked(self):
if self.selected_ions_action.isChecked():
self.sp.set_conf('show_selected_ions_only', True)
self.ion_cb.setChecked(True)
else:
self.sp.set_conf('show_selected_ions_only', False)
self.ion_cb.setChecked(False)
if self.selected_intensities_action.isChecked():
self.sp.set_conf('show_selected_intensities_only', True)
self.cut_cb.setChecked(True)
else:
self.sp.set_conf('show_selected_intensities_only', False)
self.cut_cb.setChecked(False)
self.make_axes()
def diff_lines_by_process_clicked(self):
if self.diff_lines_by_process_action.isChecked():
self.sp.set_conf('diff_lines_by_process', True)
else:
self.sp.set_conf('diff_lines_by_process', False)
self.make_axes()
def editing_lines_clicked(self):
if self.editing_lines_action.isChecked():
self.sp.set_conf('qt_allow_editing_lines', True)
else:
self.sp.set_conf('qt_allow_editing_lines', False)
def update_lines_clicked(self):
if self.update_lines_action.isChecked():
self.sp.set_conf('qt_update_after_editing_lines', True)
else:
self.sp.set_conf('qt_update_after_editing_lines', False)
def cycle_forwards_ions(self):
j = self.sp.get_conf('index_of_current_ion')
selected_ions = self.sp.get_conf('selected_ions')
if j in range(-1, len(self.sp.selected_ions_data)-1):
j += 1
else:
j = -1
self.sp.set_conf('index_of_current_ion', j)
self.set_refline_to_info_box(j)
self.make_axes()
def cycle_backwards_ions(self):
j = self.sp.get_conf('index_of_current_ion')
selected_ions = self.sp.get_conf('selected_ions')
if j in range(0, len(self.sp.selected_ions_data)):
j -= 1
else:
j = len(self.sp.selected_ions_data)-1
self.sp.set_conf('index_of_current_ion', j)
self.set_refline_to_info_box(j)
self.make_axes()
def show_line_ticks_from_file(self):
file_choices = "Text files (*.txt *.dat) (*.txt *.dat);;Tex files (*.tex) (*.tex);;CSV files (*.csv) (*.csv);;All Files (*) (*)"
if self.tick_file is None:
path = ''
else:
path = self.tick_file
path = unicode(QtGui.QFileDialog.getOpenFileName(self, 'Open file', path, file_choices))
if path:
self.tick_file = path
else:
return
f = open(self.tick_file, 'r')
lines = f.readlines()
f.close()
color = 'darkmagenta'
posTick = self.sp.get_conf('line_tick_pos')
y1, y2 = self.get_line_tick_lim(posTick)
k = self.sp.get_conf('line_tick_ax')
if k == 2:
k = 1
y1 = 0.2
y2 = 0.8
elif k == 1 and self.residual_GroupBox.isChecked():
k = 1
else:
k = 0
dy = (y2-y1)*0.30
if self.magenta_tick_shown == True:
self.draw_ion()
for line in lines:
line = line.strip()
line = line.split(' ')[0]
if self.isFloat(line):
wavelength = np.float(line)
if wavelength > self.x_plot_lims[0] and wavelength < self.x_plot_lims[1]:
self.fig.axes[k].axvline( wavelength, y1+dy, y2-dy, color = color, linestyle = 'solid', linewidth = 1.5 )
self.fig.axes[k].step( [0,0], [0,100], color = color, linestyle = 'solid', linewidth = 1.5, label = self.tick_file.split('/')[-1] )
self.fig.axes[k].legend(loc=self.sp.legend_loc, fontsize=self.sp.legend_fontsize)
self.fig.canvas.draw()
self.magenta_tick_shown = True
def residual_box_clicked(self):
if self.residual_GroupBox.isChecked():
self.sp.set_conf('qt_plot_residuals', True)
else:
self.sp.set_conf('qt_plot_residuals', False)
self.make_axes()
def make_axes(self):
log_.debug('Entering make_axes', calling=self.calling)
if self.call_on_draw:
self.save_axes()
self.fig.clf()
i_ax1 = 0
i_ax2 = 1
i_ax3 = 2
rspan_ax1 = 4
rspan_ax2 = 1
rspan_ax3 = 4
n_subplots = rspan_ax1
k = self.sp.get_conf('line_tick_ax')
ShowAx2 = self.show_line_ticks_action.isChecked() and ( k == 2 )
if ShowAx2:
i_ax2 = n_subplots
n_subplots += rspan_ax2
if self.residual_GroupBox.isChecked():
i_ax3 = n_subplots
n_subplots += rspan_ax3
if self.axes is not None:
del(self.axes)
self.axes = plt.subplot2grid((n_subplots,1), (i_ax1,0), rowspan=rspan_ax1)
self.sp.ax1 = self.axes
if ShowAx2:
if self.axes2 is not None:
del(self.axes2)
self.axes2 = plt.subplot2grid((n_subplots,1), (i_ax2,0), rowspan=rspan_ax2, sharex=self.axes )
self.axes2.tick_params( left='off',labelleft='off' )
self.sp.ax2 = self.axes2
self.axes.get_xaxis().set_visible(False)
else:
self.axes2 = None
self.sp.ax2 = None
if self.residual_GroupBox.isChecked():
if self.axes3 is not None:
del(self.axes3)
self.axes3 = plt.subplot2grid((n_subplots,1), (i_ax3,0), rowspan=rspan_ax3, sharex=self.axes )
self.sp.ax3 = self.axes3
if ShowAx2:
self.axes2.get_xaxis().set_visible(False)
self.axes.get_xaxis().set_visible(False)
else:
self.axes3 = None
self.sp.ax3 = self.axes3
self.fig.subplots_adjust(hspace=self.sp.get_conf('fig_hspace'),
bottom=self.sp.get_conf('fig_bottom'),
right=self.sp.get_conf('fig_right'),
top=self.sp.get_conf('fig_top'),
left=self.sp.get_conf('fig_left'))
if self.call_on_draw:
log_.debug('Calling on_draw from make_axes', calling=self.calling)
self.do_save = False
self.on_draw()
self.do_save = True
log_.debug('Exit make_axes', calling=self.calling)
def init_axes(self):
self.x_plot_lims = self.sp.get_conf('x_plot_lims')
if self.x_plot_lims is None:
self.x_plot_lims = (np.min(self.sp.w), np.max(self.sp.w))
self.y1_plot_lims = self.sp.get_conf('y1_plot_lims')
if self.y1_plot_lims is None:
mask = (self.sp.w_ori > self.x_plot_lims[0]) & (self.sp.w_ori < self.x_plot_lims[1])
r = 1.2
if self.sp.sp_synth_lr is None:
a = np.min(self.sp.f[mask])
b = np.max(self.sp.f[mask])
else:
a = np.min(self.sp.sp_synth_lr[mask])
b = np.max(self.sp.sp_synth_lr[mask])
self.y1_plot_lims = ((a*(1+r)+b*(1-r))/2, (a*(1-r)+b*(1+r))/2)
self.y2_plot_lims = self.sp.get_conf('y2_plot_lims')
if self.y2_plot_lims is None:
self.y2_plot_lims = (-0.5, 1.5)
self.y3_plot_lims = self.sp.get_conf('y3_plot_lims')
if self.y3_plot_lims is None:
mask = (self.sp.w_ori > self.x_plot_lims[0]) & (self.sp.w_ori < self.x_plot_lims[1])
r = 1.2
if self.sp.sp_synth_lr is None:
self.y3_plot_lims = (-1,1)
else:
a = np.min((self.sp.f_ori - self.sp.sp_synth_lr)[mask])
b = np.max((self.sp.f_ori - self.sp.sp_synth_lr)[mask])
self.y3_plot_lims = ((a*(1+r)+b*(1-r))/2, (a*(1-r)+b*(1+r))/2)
log_.debug('Axes initialized. IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
self.print_axes()
def save_axes(self):
if self.axes is not None:
self.x_plot_lims = self.axes.get_xlim()
self.y1_plot_lims = self.axes.get_ylim()
self.xscale = self.axes.get_xscale()
self.yscale = self.axes.get_yscale()
if self.axes2 is not None:
self.y2_plot_lims = self.axes2.get_ylim()
if self.axes3 is not None:
self.y3_plot_lims = self.axes3.get_ylim()
self.sp.save_axes()
log_.debug('Axes saved. IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
self.print_axes()
def restore_axes(self):
if self.x_plot_lims is not None:
if self.axes is not None:
self.axes.set_xlim(self.x_plot_lims)
log_.debug('X-axes restored to {}'.format(self.axes.get_xlim()), calling=self.calling)
else:
log_.debug('axes is None', calling=self.calling)
else:
log_.debug('x_plot_lims is None', calling=self.calling)
if self.y1_plot_lims is not None:
if self.axes is not None:
self.axes.set_ylim(self.y1_plot_lims)
if self.y2_plot_lims is not None:
if self.axes2 is not None:
self.axes2.set_ylim(self.y2_plot_lims)
if self.y3_plot_lims is not None:
if self.axes3 is not None:
self.axes3.set_ylim(self.y3_plot_lims)
if self.xscale is not None:
self.axes.set_xscale(self.xscale)
log_.debug('X scale set to {}'.format(self.xscale))
if self.yscale is not None:
self.axes.set_yscale(self.yscale)
log_.debug('Y scale set to {}'.format(self.yscale))
log_.debug('Axes restored. IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
self.print_axes()
def print_axes(self):
log_.debug('lims: {} {} {} {}'.format(self.x_plot_lims, self.y1_plot_lims, self.y2_plot_lims, self.y3_plot_lims), calling=self.calling)
log_.debug('Axes IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
log_.debug(' IDs {} {} {}'.format(id(self.axes), id(self.axes2), id(self.axes3)), calling=self.calling)
def exec_init(self):
if self.init_file_name is None:
self.get_init_filename()
if self.init_file_name:
self.statusBar().showMessage('Running synthesis ...')
QtGui.QApplication.processEvents()
self.start_spectrum()
self.do_save = False
self.on_draw()
self.do_save = True
self.restore_axes()
self.update_lim_boxes()
self.save_parameters_file = None
else:
log_.warn('A filename must be given', calling=self.calling)
sys.exit('An initialization filename must be given')
def get_init_filename(self):
file_choices = "Python initialization files (*init.py) (*init.py);;Python files (*.py) (*.py);;All files (*) (*)"
title = 'Open pySSN initialization file'
init_file = str(QtGui.QFileDialog.getOpenFileName(self, title, self.init_file_name, file_choices))
if init_file and os.path.isfile(init_file):
self.init_file_name = init_file
else:
self.init_file_name = ''
def select_init(self):
old_name = self.init_file_name
self.get_init_filename()
if self.init_file_name:
self.exec_init()
else:
self.init_file_name = old_name
def save_pars(self):
path = self.sp.get_conf('save_parameters_filename')
keys = self.sp.default_keys
if '__builtins__' in keys:
keys.remove('__builtins__')
keys.sort()
with open(path, 'w') as f:
for key in keys:
value = self.sp.conf[key]
if isinstance(value, basestring):
value = '\"{}\"'.format(value)
f.write('{} = {}\n'.format(key, value))
self.statusBar().showMessage('Parameters saved to file %s' % path, 4000)
def save_pars_as(self):
if self.save_parameters_file is None:
path = self.init_file_name
else:
path = self.save_parameters_file
keys = self.sp.default_keys
keys_to_be_removed = ['__builtins__', 'plot_magenta', 'label_magenta', 'plot_cyan', 'label_cyan']
for key in keys_to_be_removed:
if key in keys:
keys.remove(key)
keys.sort()
file_choices = "pySSN initialization files (*init.py) (*init.py);;Python files (*.py) (*.py);;All files (*) (*)"
title = 'Save synthesis and plot parameters'
selectedFilter = 'pySSN initialization files (*init.py) (*init.py)'
path = unicode(QtGui.QFileDialog.getSaveFileName(self, title, path, file_choices, selectedFilter))
if path:
with open(path, 'w') as f:
for key in keys:
if key == 'instr_prof':
value = self.sp.format_instr_prof()
else:
value = self.sp.conf[key]
if isinstance(value, basestring):
value = '\"{}\"'.format(value)
f.write('{} = {}\n'.format(key, value))
self.save_parameters_file = path
self.statusBar().showMessage('Parameters saved to file %s' % path, 4000)
def teste_instr_prof(self, prof):
if prof is None:
return 'not defined'
keys = prof.keys()
keys.remove('comment')
if not 'width' in keys:
return 'The parameter \'width\' was not found.'
if prof['width'] == 0.0:
return 'The value of \'width\' can not be zero'
if not (self.sp.get_key_indexes('Bb', prof)==self.sp.get_key_indexes('Br', prof)==
self.sp.get_key_indexes('beta', prof)==self.sp.get_key_indexes('alpha', prof)):
return 'Invalid indexes por the parameters \'Bb\', \'Br\', \'alpha\', or \'beta\''
if not all((type(prof[key])==float or type(prof[key])==int) for key in keys):
return 'The values of parameters must be numbers.'
return ''
def apply_instr_prof(self):
def do_update():
path = str(prof_box.toPlainText()).strip()
try:
user_module = {}
exec(path) in user_module
prof = user_module['instr_prof']
self.sp.set_conf('instr_prof', prof)
log_.message('new instrumental profile is ok', calling = self.calling)
except:
title = 'Error reading instrument profile'
msg = 'Unable to read instrumental profile'
path = None
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return
msg = self.teste_instr_prof(prof)
if not msg:
self.update_profile()
else:
title = 'Error in the instrument profile'
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
def toggle_statusbar():
self.showHelpBrowser = not self.showHelpBrowser
helpBrowser.setVisible(self.showHelpBrowser)
if self.showHelpBrowser:
self.instr_prof_dialog.resize(self.instr_prof_dialog_width, 2.1*self.instr_prof_dialog_height)
else:
self.instr_prof_dialog.resize(self.instr_prof_dialog_width, self.instr_prof_dialog_height)
def get_window_size_and_position():
if self.instr_prof_dialog is None:
font = QtGui.QFont("Courier")
width = QtGui.QFontMetrics(font).width('='*80)
height = 15*QtGui.QFontMetrics(font).height()
self.instr_prof_dialog_width = width
self.instr_prof_dialog_height = height
sG = QtGui.QApplication.desktop().screenGeometry()
self.instr_prof_dialog_x = sG.width()-self.instr_prof_dialog_width
self.instr_prof_dialog_y = sG.height()
else:
if not self.showHelpBrowser:
self.instr_prof_dialog_width = self.instr_prof_dialog.width()
self.instr_prof_dialog_height = self.instr_prof_dialog.height()
self.instr_prof_dialog_x = self.instr_prof_dialog.pos().x()
self.instr_prof_dialog_y = self.instr_prof_dialog.pos().y()
self.showHelpBrowser = False
get_window_size_and_position()
self.instr_prof_dialog = QtGui.QDialog()
self.instr_prof_dialog.setWindowFlags(self.instr_prof_dialog.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
self.instr_prof_dialog.resize(self.instr_prof_dialog_width, self.instr_prof_dialog_height)
self.instr_prof_dialog.move(self.instr_prof_dialog_x,self.instr_prof_dialog_y)
self.instr_prof_dialog.setWindowTitle('instrument profile dialog')
prof_box = QtGui.QTextEdit()
prof_box.setFontFamily("Courier")
prof_box.setText('instr_prof = ' + self.sp.format_instr_prof())
linkLabel = QtGui.QLabel('<a href="https://github.com/Morisset/pySSN/wiki">More help online</a>')
linkLabel.setOpenExternalLinks(True)
helpBrowser = QtGui.QTextBrowser()
# text=open('instr_prof.html').read()
# This text should go to a file open with text=open('instr_prof.html').read()
text = """<title> Instrumental profile help</title>
<p>The instrumental profile if defined by the <a href="https://en.wikibooks.org/wiki/Python_Programming/Dictionaries">python dictionary</a> <b>instr_prof</b>.
<p>The main component of the instrumental profile is set by the parameter <b>width</b>, which is the only indispensable parameters.</p>
<p>If <b>width</b> > 0, the main component profile follows a <a href="https://en.wikipedia.org/wiki/Normal_distribution">Gaussian distribution</a>, P ∝ exp(-(λ/<b>width</b>)<sup>2</sup>).
In this case, <b>width</b> is related to the normal full-width at half maximum by <b>width</b> = FWHM/(2(ln2)<sup>1/2</sup>) = FWHM/1.665.</p>
<p>If <b>width</b> < 0, the main component profile follows a <a href="https://en.wikipedia.org/wiki/rectangular_distribution">rectangular distribution</a>, P = 1 for -|<b>width</b>|/2 < λ < |<b>width</b>|/2, and P = 0 for all other values of λ.</p>
<p>A variable number of optional components can be included, each defined by four parameters, <b>Bb</b>, <b>Br</b>, <b>alpha</b>, and <b>beta</b>, and following P ∝ <b>B</b>exp(-(λ/<b>beta</b>)<sup><b>alpha</b></sup>).
<b>Bb</b> and <b>Br</b> are the intensity scale parameters for the bluish and reddish sides of the profile, respectively.</p>
<p>If more than one optional component is in use, the parameters must be indexed as <b>alpha_1</b> <b>alpha_2</b>, etc.</p>
Special cases for the optional components:
<ul>
<li><b>alpha</b> = 2 produces a <a href="https://en.wikipedia.org/wiki/Normal_distribution">Gaussian distribution</a>.
<li><b>alpha</b> = 2, <b>Bb</b> = 0 (or <b>Br</b> = 0) produces a <a href="https://en.wikipedia.org/wiki/Half_normal_distribution">half-Gaussian distribution</a>.
<li><b>alpha</b> = 1 produces an <a href="https://en.wikipedia.org/wiki/Exponential_distribution">exponential distribution</a>.
</ul>
<p>A comment may be included in <b>instr_prof</b>.</p>
<p>Examples:</p>
<ol>
<li>instr_prof = {'width': 0.5}<br>
<li>instr_prof = {'width': 0.5, 'comment': 'Gaussian profle'}<br>
<li>Example: instr_prof = {'width': 0.5, 'Bb':0.00016, 'Br':9e-05, 'beta': 2.2, 'alpha': 0.45}<br>
<li>instr_prof = {'width': 0.5, 'Bb_1':0.00016, 'Br_1':9e-05, 'beta_1': 2.2, 'alpha_1': 0.45, 'Bb_2': 0.0014, 'Br_2':0.001, 'beta_2': 1.4, 'alpha_2': 0.75}<br>
</ol>"""
helpBrowser.document().setHtml(text)
helpBrowser.setOpenExternalLinks(True)
helpBrowser.setVisible(self.showHelpBrowser)
policy = helpBrowser.sizePolicy()
policy.setVerticalStretch(20)
helpBrowser.setSizePolicy(policy)
vbox = QtGui.QVBoxLayout()
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.Close|
QtGui.QDialogButtonBox.Apply)
buttonBox.button(QtGui.QDialogButtonBox.Apply).setText("Update")
vbox.addWidget(prof_box,0)
vbox.addWidget(buttonBox)
vbox.addWidget(linkLabel)
vbox.addWidget(helpBrowser)
buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_statusbar)
buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(do_update)
buttonBox.rejected.connect(self.instr_prof_dialog.close)
self.instr_prof_dialog.setLayout(vbox)
self.instr_prof_dialog.setWindowModality(QtCore.Qt.NonModal)
self.instr_prof_dialog.show()
def refine_wavelengths(self):
def table2list(text):
text = str(text)
text = text.splitlines()
s = ''
for i in range(len(text)):
line = text[i].split()
if len(line) == 2 and sum([self.isFloat(x) for x in line]) == 2:
s += '({}, {}), '.format(line[0], line[1])
else:
if len(line) > 0:
title = 'Error in table'
msg = 'Error in line \'{}\'.\nEach line must have two numbers separated by whitespaces.'.format(text[i])
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return ''
s = s.strip(' ,')
if s == '':
return 'lambda_shift_table = None'
else:
return 'lambda_shift_table = [{}]'.format(s)
def toggle_table():
self.refine_wave_as_table = not self.refine_wave_as_table
if self.refine_wave_as_table:
text = str(edit_box.toPlainText()).strip()
edit_box.clear()
text = text.replace('lambda_shift_table','')
text = text.strip(' =[]')
text = text.split(')')
for i in range(len(text)-1):
line = text[i].strip(' (,')
line = line.split(',')
line = '{:<7} {}'.format(line[0].strip(),line[1].strip())
edit_box.append(line)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as list")
else:
text = table2list(edit_box.toPlainText())
if text == '':
self.refine_wave_as_table = True
return
edit_box.clear()
edit_box.setText(text)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as table")
def do_update():
old_value = self.sp.get_conf('lambda_shift_table')
if self.refine_wave_as_table:
path = table2list(edit_box.toPlainText())
if path == 'error':
return
else:
path = str(edit_box.toPlainText()).strip()
try:
user_module = {}
exec(path) in user_module
value = user_module['lambda_shift_table']
self.sp.set_conf('lambda_shift_table', value)
log_.message('new \'lambda_shit_table\' is ok', calling = self.calling)
except:
title = 'Error'
msg = 'Unable to read \'lambda_shit_table\''
path = None
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return
self.sp.show_uncor_spec = True
self.sp.init_obs()
if self.sp.read_obs_error:
self.sp.set_conf('lambda_shift_table', old_value)
if self.showErrorBox:
title = 'Error'
msg = self.sp.read_obs_error
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
else:
self.rerun()
if not self.show_uncor_obs_action.isChecked():
self.sp.show_uncor_spec = False
def toggle_help():
self.showHelpBrowser = not self.showHelpBrowser
helpBrowser.setVisible(self.showHelpBrowser)
if self.showHelpBrowser:
self.refine_wave_dialog.resize(self.refine_wave_dialog_width, 2.5*self.refine_wave_dialog_height)
else:
self.refine_wave_dialog.resize(self.refine_wave_dialog_width, self.refine_wave_dialog_height)
def get_window_size_and_position():
if self.refine_wave_dialog is None:
font = QtGui.QFont("Courier")
width = QtGui.QFontMetrics(font).width('='*80)
height = 15*QtGui.QFontMetrics(font).height()
self.refine_wave_dialog_width = width
self.refine_wave_dialog_height = height
sG = QtGui.QApplication.desktop().screenGeometry()
self.refine_wave_dialog_x = sG.width()-self.refine_wave_dialog_width
self.refine_wave_dialog_y = sG.height()
else:
if not self.showHelpBrowser:
self.refine_wave_dialog_width = self.refine_wave_dialog.width()
self.refine_wave_dialog_height = self.refine_wave_dialog.height()
self.refine_wave_dialog_x = self.refine_wave_dialog.pos().x()
self.refine_wave_dialog_y = self.refine_wave_dialog.pos().y()
self.showHelpBrowser = False
get_window_size_and_position()
self.refine_wave_dialog = QtGui.QDialog()
self.refine_wave_dialog.setWindowFlags(self.refine_wave_dialog.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
self.refine_wave_dialog.resize(self.refine_wave_dialog_width, self.refine_wave_dialog_height)
self.refine_wave_dialog.move(self.refine_wave_dialog_x,self.refine_wave_dialog_y)
self.refine_wave_dialog.setWindowTitle('wavelength-refining dialog')
edit_box = QtGui.QTextEdit()
edit_box.setFontFamily("Courier")
self.refine_wave_as_table = False
edit_box.setText('lambda_shift_table = ' + str(self.sp.get_conf('lambda_shift_table')))
linkLabel = QtGui.QLabel('<a href="https://github.com/Morisset/pySSN/wiki">More help online</a>')
linkLabel.setOpenExternalLinks(True)
helpBrowser = QtGui.QTextBrowser()
# text=open('wave_refining.html').read()
# This text should go to a file open with text=open('wave-refining').read()
text = """<title> Wavelength-refining help</title>
<p>The wavelength calibration of the observational spectrum can be refined with the use of
the <a href="https://en.wikibooks.org/wiki/Python_Programming/Lists">python list</a> <b>lambda_shift_table</b>.
Each element of this list is an ordered pair of numbers (λ, Δλ), where Δλ is the wavelength shift at the wavelength λ needed to improve the calibration, after the Doppler correction.</p>
<p>The data in <b>lambda_shit_table</b> will be linearly interpolated to provide the corrected wavelengths.
Outside the range of wavelenghts given in <b>lambda_shit_table</b>, the correction will be extrapolated to zero.</p>
<p>To set aside the wavelength-refining, set <b>lambda_shit_table</b> to None.</p>
<p>Examples:</p>
<ol>
<li><p>lambda_shift_table = [(4674, 0.05), (4690, 0.1), (9000, 1)]</p></li>
<li><p>lambda_shift_table = None (to set aside the wavelength-refining)</p></li>
</ol>
<p>Button functions:</p>
<ul>
<li><p>Click on <b><span style="color:red">Show as table</span></b> to display and edit the data contained in <b>lambda_shit_table</b> as a two columns table.</p></li>
<li><p>Click on <b><span style="color:red">Show as list</span></b> to get back the <b>lambda_shit_table</b> list from the two columns table.</p></li>
<li><p>Click on <b><span style="color:red">Update</span></b> to refine the wavelength calibration and redo the synthesis.</p></li>
</ul>
"""
helpBrowser.document().setHtml(text)
helpBrowser.setOpenExternalLinks(True)
helpBrowser.setVisible(self.showHelpBrowser)
policy = helpBrowser.sizePolicy()
policy.setVerticalStretch(20)
helpBrowser.setSizePolicy(policy)
vbox = QtGui.QVBoxLayout()
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.RestoreDefaults|
QtGui.QDialogButtonBox.Close|
QtGui.QDialogButtonBox.Apply)
buttonBox.button(QtGui.QDialogButtonBox.Apply).setText("Update")
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as table")
vbox.addWidget(edit_box,0)
vbox.addWidget(buttonBox)
vbox.addWidget(linkLabel)
vbox.addWidget(helpBrowser)
buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_help)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).clicked.connect(toggle_table)
buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(do_update)
buttonBox.rejected.connect(self.refine_wave_dialog.close)
self.refine_wave_dialog.setLayout(vbox)
self.refine_wave_dialog.setWindowModality(QtCore.Qt.NonModal)
self.refine_wave_dialog.show()
def plot_user_cont(self):
self.fig.axes[0].step( [0,0], [0,100], color = color, linestyle = 'solid', label = label, linewidth = 2.5 )
self.fig.axes[0].legend(loc=current_legend_loc, fontsize=self.sp.legend_fontsize)
self.fig.canvas.draw()
def user_cont_table2list(self, text):
text = str(text)
text = text.splitlines()
text = sorted(text)
s = ''
for i in range(len(text)):
line = text[i].split()
if len(line) == 2 and sum([self.isFloat(x) for x in line]) == 2:
s += '({}, {}), '.format(line[0], line[1])
else:
if len(line) > 0:
title = 'Error in table'
msg = 'Error in line \'{}\'.\nEach line must have two numbers separated by whitespaces.'.format(text[i])
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return ''
s = s.strip(' ,')
if s == '':
s = 'None'
else:
s = '[{}]'.format(s)
return 'cont_user_func = \'{}\'\n\ncont_user_table = {}'.format(self.sp.get_conf('cont_user_func'), s)
def update_user_cont(self):
msg = ''
old_value = self.sp.get_conf('cont_user_table')
old_kind = self.sp.get_conf('cont_user_func')
if self.interpol_cont_as_table:
path = self.user_cont_table2list(self.user_cont_editBox.toPlainText())
if path == 'error':
return
else:
path = str(self.user_cont_editBox.toPlainText()).strip()
try:
user_module = {}
exec(path) in user_module
kind = user_module['cont_user_func']
log_.message('new \'cont_user_func\' is ok', calling = self.calling)
value = user_module['cont_user_table']
log_.message('new \'cont_user_table\' is ok', calling = self.calling)
except:
msg = 'Unable to read \'cont_user_func\' or \'cont_user_table\''
path = None
kinds = {'nearest', 'zero', 'linear', 'slinear', 'quadratic', 'cubic'}
if msg == '':
if kind not in kinds:
msg = 'Invalid function'
if msg != '':
title = 'Error'
msg = 'Problem in user-defined continuum interpolation.\n{}'.format(msg)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return
if old_value != value or old_kind != kind:
self.cont_par_changed = True
if value is not None and len(value) == 0:
value = None
self.sp.set_conf('cont_user_table', value)
self.sp.set_conf('cont_user_func', kind)
self.sp.update_user_cont()
self.set_plot_limits_and_draw()
self.sp.plot_conts(self.axes, ['user'])
self.canvas.draw()
else:
self.set_plot_limits_and_draw()
def user_cont_list2table(self, points):
self.user_cont_editBox.clear()
for point in points:
line = '{:<7} {}'.format(str(point[0]).strip(),str(point[1]).strip())
self.user_cont_editBox.append(line)
def user_continuum(self):
def save_initial_plot_pars():
self.init_cont_line_num = self.line_info_box.text()
self.init_cont_ion = self.ion_box.text()
self.init_cont_xmin = self.xlim_min_box.text()
self.init_cont_xmax = self.xlim_max_box.text()
self.init_cont_y1min = self.y1lim_min_box.text()
self.init_cont_y1max = self.y1lim_max_box.text()
self.init_cont_y3min = self.y3lim_min_box.text()
self.init_cont_y3max = self.y3lim_max_box.text()
self.init_cont_legend_fontsize = self.sp.legend_fontsize
self.init_cont_legend_loc = self.sp.legend_loc
self.init_cont_sel_ions_only = self.selected_ions_action.isChecked()
def redo_initial_plot():
self.line_info_box.setText(self.init_cont_line_num)
self.ion_box.setText(self.init_cont_ion)
self.xlim_min_box.setText(self.init_cont_xmin)
self.xlim_max_box.setText(self.init_cont_xmax)
self.y1lim_min_box.setText(self.init_cont_y1min)
self.y1lim_max_box.setText(self.init_cont_y1max)
self.y3lim_min_box.setText(self.init_cont_y3min)
self.y3lim_max_box.setText(self.init_cont_y3max)
self.sp.legend_fontsize = self.init_cont_legend_fontsize
self.sp.legend_loc = self.init_cont_legend_loc
self.selected_ions_action.setChecked(self.init_cont_sel_ions_only)
self.selected_lines_clicked()
self.set_plot_limits_and_draw()
def toggle_table():
self.interpol_cont_as_table = not self.interpol_cont_as_table
if self.interpol_cont_as_table:
text = str(self.user_cont_editBox.toPlainText()).strip()
text = text[text.find('[')+1:text.find(']')]
text = text.replace('\n','')
self.user_cont_editBox.clear()
text = text.split(')')
for i in range(len(text)-1):
line = text[i].strip(' (,')
line = line.split(',')
line = '{:<7} {}'.format(line[0].strip(),line[1].strip())
self.user_cont_editBox.append(line)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as list")
else:
self.get_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('')
self.del_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('')
self.on_draw()
text = self.user_cont_table2list(self.user_cont_editBox.toPlainText())
if text == '':
self.interpol_cont_as_table = True
return
self.user_cont_editBox.clear()
self.user_cont_editBox.setText(text)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as table")
def toggle_help():
self.showHelpBrowser = not self.showHelpBrowser
helpBrowser.setVisible(self.showHelpBrowser)
if self.showHelpBrowser:
self.interpol_cont_dialog.resize(self.interpol_cont_dialog_width, 2.5*self.interpol_cont_dialog_height)
else:
self.interpol_cont_dialog.resize(self.interpol_cont_dialog_width, self.interpol_cont_dialog_height)
def get_window_size_and_position():
if self.interpol_cont_dialog is None:
font = QtGui.QFont("Courier")
width = QtGui.QFontMetrics(font).width('='*80)
height = 15*QtGui.QFontMetrics(font).height()
self.interpol_cont_dialog_width = width
self.interpol_cont_dialog_height = height
sG = QtGui.QApplication.desktop().screenGeometry()
self.interpol_cont_dialog_x = sG.width()-self.interpol_cont_dialog_width
self.interpol_cont_dialog_y = sG.height()
else:
if not self.showHelpBrowser:
self.interpol_cont_dialog_width = self.interpol_cont_dialog.width()
self.interpol_cont_dialog_height = self.interpol_cont_dialog.height()
self.interpol_cont_dialog_x = self.interpol_cont_dialog.pos().x()
self.interpol_cont_dialog_y = self.interpol_cont_dialog.pos().y()
def get_points():
self.get_user_cont_points = not self.get_user_cont_points
self.del_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('')
if self.get_user_cont_points:
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('background-color:red;')
self.set_plot_limits_and_draw()
self.sp.plot_conts(self.axes, ['user'])
self.canvas.draw()
if self.interpol_cont_as_table == False:
toggle_table()
else:
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('')
def del_points():
self.del_user_cont_points = not self.del_user_cont_points
self.get_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('')
if self.del_user_cont_points:
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('background-color:red;')
self.set_plot_limits_and_draw()
self.sp.plot_conts(self.axes, ['user'])
self.canvas.draw()
if self.interpol_cont_as_table == False:
toggle_table()
else:
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('')
def on_close():
redo_initial_plot()
self.interpol_cont_dialog.close()
def do_update():
self.get_user_cont_points = False
self.del_user_cont_points = False
buttonBox.button(QtGui.QDialogButtonBox.Retry).setStyleSheet('')
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setStyleSheet('')
self.update_user_cont()
self.showHelpBrowser = False
get_window_size_and_position()
save_initial_plot_pars()
self.ion_box.setText('')
self.selected_ions_action.setChecked(True)
self.selected_lines_clicked()
self.set_plot_limits_and_draw()
self.interpol_cont_dialog = QtGui.QDialog()
self.interpol_cont_dialog.setWindowFlags(self.interpol_cont_dialog.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
#self.interpol_cont_dialog.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint | QtCore.Qt.WindowMaximizeButtonHint | QtCore.Qt.WindowStaysOnTopHint)
self.interpol_cont_dialog.resize(self.interpol_cont_dialog_width, self.interpol_cont_dialog_height)
self.interpol_cont_dialog.move(self.interpol_cont_dialog_x,self.interpol_cont_dialog_y)
self.interpol_cont_dialog.setWindowTitle('user-defined continuum dialog')
self.user_cont_editBox = QtGui.QTextEdit()
self.user_cont_editBox.setFontFamily("Courier")
self.interpol_cont_as_table = False
self.get_user_cont_points = False
self.del_user_cont_points = False
text = 'cont_user_func = \'{}\'\n\ncont_user_table = {}'.format(str(self.sp.get_conf('cont_user_func')), self.sp.get_conf('cont_user_table'))
self.user_cont_editBox.setText(text)
linkLabel = QtGui.QLabel('<a href="https://github.com/Morisset/pySSN/wiki">More help online</a>')
linkLabel.setOpenExternalLinks(True)
helpBrowser = QtGui.QTextBrowser()
# text=open('user_continuum.html').read()
# This text should go to a file open with text=open('user_continuum').read()
text = """<title> User-defined continuum help</title>
<p>A user-defined continuum can be added to the continuum calculated from other sources (electron recombination, free-free transition, two-photom, black-body and
power-law emission). It is obtained by the interpolation of the data contained in the
<a href="https://en.wikibooks.org/wiki/Python_Programming/Lists">python list</a> <b>cont_user_table</b>. Each element of this list is an ordered pair of numbers
(λ, <i>f</i>), where <i>f</i> is the additional continuum flux at the wavelength λ.</p>
<p>The parameter <b>cont_user_func</b> defines the kind of the interpolation. Possible values are 'linear', 'quadratic', 'cubic', corresponding to linear
interpolation, second and third order spline interpolation, respectively. Outside the range of wavelenghts given in <b>cont_user_table</b>, the user continuum
component will be extrapolated to zero.</p>
<p>There are three modes of editing the interpolation control points: editing the list <b>cont_user_table</b> directly or as a two columns table, or clicking
with the mouse on the figure at the intended level of total continuum (see Button functions below). To set aside the user-defined continuum, set
<b>cont_user_table</b> to None.</p>
<p>Examples:</p>
<ol>
<li><p>cont_user_func = 'linear'<br>
cont_user_table = [(4674, 0.05), (4690, 0.1), (9000, 1)]
</p></li>
<li><p>cont_user_table = None (to set aside the user-defined continuum)</p></li>
</ol>
<p>Button functions:</p>
<ul>
<li><p>Click on <b><span style="color:red">Show as table</span></b> to display and edit the data contained in <b>cont_user_table</b> as a two columns table.</p></li>
<li><p>Click on <b><span style="color:red">Show as list</span></b> to get back the <b>cont_user_table</b> list from the two columns table.</p></li>
<li><p>Click on <b><span style="color:red">Add points</span></b> to activate/deactivate the mode that allows to add new controls points by mouse-clicking on the
figure. Each time a new control point is included, the interpolation is automatically updated.</p></li>
<li><p>Click on <b><span style="color:red">Del points</span></b> to activate/deactivate the mode that allows to click on the figure to delete the nearest
(in wavelength) control point. Each time a control point is deleted, the interpolation is automatically updated</p></li>
<li><p>Click on <b><span style="color:red">Update</span></b> to incorporate the changes in the user-defined continuum.</p></li>
<li><p>Click on <b><span style="color:red">Close</span></b> to close the dialog and return to the preceding plot setting.</p></li>
</ul>
"""
helpBrowser.document().setHtml(text)
helpBrowser.setOpenExternalLinks(True)
helpBrowser.setVisible(self.showHelpBrowser)
policy = helpBrowser.sizePolicy()
policy.setVerticalStretch(20)
helpBrowser.setSizePolicy(policy)
vbox = QtGui.QVBoxLayout()
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Help|
QtGui.QDialogButtonBox.RestoreDefaults|
QtGui.QDialogButtonBox.Retry|
QtGui.QDialogButtonBox.Ignore|
QtGui.QDialogButtonBox.Close|
QtGui.QDialogButtonBox.Apply)
buttonBox.button(QtGui.QDialogButtonBox.Apply).setText("Update")
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).setText("Show as table")
buttonBox.button(QtGui.QDialogButtonBox.Retry).setText("Add points")
buttonBox.button(QtGui.QDialogButtonBox.Ignore).setText("Del points")
vbox.addWidget(self.user_cont_editBox,0)
vbox.addWidget(buttonBox)
vbox.addWidget(linkLabel)
vbox.addWidget(helpBrowser)
buttonBox.button(QtGui.QDialogButtonBox.Help).clicked.connect(toggle_help)
buttonBox.button(QtGui.QDialogButtonBox.RestoreDefaults).clicked.connect(toggle_table)
buttonBox.button(QtGui.QDialogButtonBox.Apply).clicked.connect(do_update)
buttonBox.button(QtGui.QDialogButtonBox.Retry).clicked.connect(get_points)
buttonBox.button(QtGui.QDialogButtonBox.Ignore).clicked.connect(del_points)
buttonBox.rejected.connect(on_close)
#self.interpol_cont_dialog.onCloseEvet(on_close)
self.interpol_cont_dialog.setLayout(vbox)
self.interpol_cont_dialog.setWindowModality(QtCore.Qt.NonModal)
self.interpol_cont_dialog.show()
def isValidFilename(self, filename):
if filename is None:
return False
try:
open(filename,'r')
return True
except IOError:
try:
open(filename, 'w')
return True
except IOError:
return False
def set_cosmetic_file(self):
file_choices = "Line cosmetic files (*cosm*.dat) (*cosm*.dat);;Data files (*.dat) (*.dat);;All files (*) (*)"
title = 'Set the line cosmetic file'
cosmetic_file = str(QtGui.QFileDialog.getSaveFileName(self, title, '', file_choices, options=QtGui.QFileDialog.DontConfirmOverwrite))
msg = "Line cosmetic file '{}' not valid!".format(cosmetic_file)
if cosmetic_file and not self.isValidFilename(cosmetic_file):
QtGui.QMessageBox.critical(self, 'pySSN', msg, QtGui.QMessageBox.Ok )
cosmetic_file = None
if cosmetic_file:
self.sp.set_conf('do_cosmetik', True)
dir_ = os.path.dirname(cosmetic_file)
if dir_ == os.getcwd():
cosmetic_file = cosmetic_file.split('/')[-1]
self.sp.set_conf('fic_cosmetik', cosmetic_file)
self.sp.fic_cosmetik = cosmetic_file
if self.sp is not None:
self.set_status_text()
if self.axes is not None:
self.adjust()
def empty_cosmetic_file(self):
if self.sp.fic_cosmetik is None or self.sp.phyat_file is None:
return
title = 'pySSN: cosmetic file'
msg = 'All lines in the cosmetic file will be removed.\nConfirm?'
ret = QtGui.QMessageBox.question(self, title, msg, QtGui.QMessageBox.Ok, QtGui.QMessageBox.Cancel )
if ret == QtGui.QMessageBox.Ok:
f = open(self.sp.fic_cosmetik, 'w')
f.close()
def order_lines(self, lines):
if lines is None:
return None
numbers = []
for line in lines:
line_num = int(self.sp.fieldStrFromLine(line,'num'))
numbers.append(line_num)
lines = [x for _,x in sorted(zip(numbers, lines))]
return lines
def remove_duplicate_lines(self, lines):
if lines is None:
return None
numbers = []
output = []
for line in lines:
line_num = int(self.sp.fieldStrFromLine(line,'num'))
if line_num not in numbers:
numbers.append(line_num)
output.append(line)
return output
def order_cosmetic_file(self):
if self.sp.fic_cosmetik is None or not os.path.isfile(self.sp.fic_cosmetik):
return
f = open(self.sp.fic_cosmetik, 'r')
cosmetic_lines = f.readlines()
f.close()
cosmetic_lines = self.order_lines(cosmetic_lines)
n0 = len(cosmetic_lines)
cosmetic_lines = self.remove_duplicate_lines(cosmetic_lines)
n1 = len(cosmetic_lines)
f = open(self.sp.fic_cosmetik, 'w')
f.writelines(cosmetic_lines)
f.close()
if n0 > n1:
s = ' and the duplicate lines removed'
else:
s = ''
msg = 'The cosmetic \'{0:}\' file was ordered{1:}.'.format(self.sp.fic_cosmetik, s)
self.statusBar().showMessage(msg, 4000)
def clean_cosmetic_file(self):
def ShowCleanMessage(UnchangedLineList):
nUL = len(UnchangedLineList)
if nUL == 1:
s1 = ''
s2 = 'was'
s3 = 'this line'
elif nUL > 1:
s1 = 's'
s2 = 'were'
s3 = 'these lines'
msgBox = QtGui.QMessageBox()
msgBox.setIcon(QtGui.QMessageBox.Question)
msgBox.title = 'pySSN: cosmetic file'
msg = '{0:} unchanged line{1:} in the cosmetic file {2:} found.'.format(nUL, s1, s2)
msgBox.setText(msg)
msgBox.setInformativeText('Do you want to delete {:}?\n'.format(s3))
detailedText = 'Unchanged line{:}:\n\n'.format(s1)
for i in UnchangedLineList:
detailedText = detailedText + str(i) + '\n'
msgBox.setDetailedText(detailedText)
DelButton = msgBox.addButton(self.tr("Delete"), QtGui.QMessageBox.ActionRole)
s = 'Delete from the cosmetic file all unchanged lines'
if self.enable_tooltips_action.isChecked():
DelButton.setToolTip(s)
msgBox.addButton(QtGui.QMessageBox.Cancel)
answer = msgBox.exec_()
if msgBox.clickedButton() == DelButton:
answer = True
else:
answer = False
return answer
if self.sp.fic_cosmetik is None or self.sp.phyat_file is None:
return
#if not self.sp.get_conf('clean_cosmetic_file'):
# return
if not os.path.isfile(self.sp.fic_cosmetik):
return
f = open(self.sp.fic_cosmetik, 'r')
cosmetic_lines = f.readlines()
f.close()
UnchangedLineList = []
ChangedLines = []
for i in range(len(cosmetic_lines)):
line_c = cosmetic_lines[i].rstrip()
line_num = int(self.sp.fieldStrFromLine(line_c,'num'))
if self.sp.cosmetic_line_unchanged(line_c):
UnchangedLineList.append(line_num)
else:
ChangedLines.append(line_c + '\n')
if len(UnchangedLineList) > 0:
ret = ShowCleanMessage(UnchangedLineList)
if ret == True:
f = open(self.sp.fic_cosmetik, 'w')
f.writelines(ChangedLines)
f.close()
else:
msg = 'No unchanged line in the cosmetic file {:}'.format(self.sp.fic_cosmetik)
self.statusBar().showMessage(msg, 4000)
def match_cosmetic_phyat_files(self):
def ShowErrorMessage():
msg = 'The wavelength or intensity in the cosmetic file does not match that in the atomic database.\n\n' \
'Do you want to try to automatically correct the cosmetic file?'
msgBox = QtGui.QMessageBox()
msgBox.setText("Error in cosmetic file for line: " + str(line_num))
msgBox.setInformativeText(msg)
msgBox.addButton(QtGui.QMessageBox.Yes)
msgBox.addButton(QtGui.QMessageBox.YesToAll)
msgBox.addButton(QtGui.QMessageBox.No)
msgBox.addButton(QtGui.QMessageBox.NoToAll)
msgBox.setDefaultButton(QtGui.QMessageBox.Yes)
answer = msgBox.exec_()
return answer
def ShowFinalMessage(nErr, nCor, nUnCor, nNfd, UnCorList, NotFound):
msgBox = QtGui.QMessageBox()
msgBox.setText('pySSN: error in cosmetic file')
if nCor > 0:
s0 = 'Rerun the synthesis to take into account the changes.\n\n'
else:
s0 = ''
if nUnCor > 0:
s1 = 'The cosmetic data for lines that still have problems will be ignored. ' \
'Do you want to delete them from the cosmetic file?'
else:
s1 = ''
msg = 'Number of lines with problems: {0:}\n' \
'Number of corrected lines: {1:}\n' \
'Number of uncorrected lines: {2:}\n' \
'Number of lines not found in the atomic database: {3:}\n\n' \
'{4:}{5:}'.format(nErr, nCor, nUnCor, nNfd, s0, s1)
msgBox.setInformativeText(msg)
if nNfd > 0:
detailedText = 'Lines not found:\n\n'
for i in NotFound:
detailedText = detailedText + i + '\n'
detailedText = detailedText + '\n'
else:
detailedText = ''
if nUnCor > 0:
detailedText = detailedText + 'Lines not corrected:\n\n'
for i in UnCorList:
detailedText = detailedText + i + '\n'
msgBox.setDetailedText(detailedText)
DelAllButton = msgBox.addButton(self.tr("Delete all"), QtGui.QMessageBox.ActionRole)
DelNotFndButton = msgBox.addButton(self.tr("delete not found"), QtGui.QMessageBox.ActionRole)
DelUncorButton = msgBox.addButton(self.tr("delete uncorrected"), QtGui.QMessageBox.ActionRole)
if self.enable_tooltips_action.isChecked():
s = 'Delete from the cosmetic file all lines that still have problems'
DelAllButton.setToolTip(s)
s = 'Delete from the cosmetic file the lines not found in the atomic database'
DelNotFndButton.setToolTip(s)
s = 'Delete from the cosmetic file the uncorrected lines'
DelUncorButton.setToolTip(s)
msgBox.addButton(QtGui.QMessageBox.Cancel)
msgBox.setMaximumHeight(16777215)
msgBox.setMinimumHeight(800)
# It does not expand! Why?
msgBox.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
msgBox.setSizeGripEnabled(True)
if nUnCor == 0:
DelUncorButton.setEnabled(False)
DelAllButton.setEnabled(False)
if nNfd == 0:
DelNotFndButton.setEnabled(False)
DelAllButton.setEnabled(False)
answer = msgBox.exec_()
if msgBox.clickedButton() == DelAllButton:
answer = ['DelNotFnd', 'DelUncor']
elif msgBox.clickedButton() == DelNotFndButton:
answer = ['DelNotFnd']
elif msgBox.clickedButton() == DelUncorButton:
answer = ['DelUncor']
else:
answer = []
return answer
if self.sp.fic_cosmetik is None:
return
if os.path.isfile(self.sp.fic_cosmetik):
cosmetik_arr, errorMsg = self.sp.read_cosmetik()
if len(errorMsg) > 0:
self.sp.do_cosmetik = False
self.sp.set_conf('do_cosmetik', False)
title = 'Error in cosmetic file: '
msg = 'Unable to read cosmetic data from file \'{}\':{}\n\nLine cosmetics will be disabled!'.format(self.sp.get_conf('fic_cosmetik'), errorMsg)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
return
ret = None
f = open(self.sp.fic_cosmetik, 'r')
cosmetic_lines = f.readlines()
f.close()
ErrorList = []
CorrectedList = []
UnCorList = []
NotFound =[]
k = self.sp.field_pos['id']
keys = [ 'lambda', 'l_shift', 'i_rel', 'i_cor' ]
for i in range(len(cosmetic_lines)):
line_c = cosmetic_lines[i].rstrip()
line_num = int(self.sp.fieldStrFromLine(line_c,'num'))
cosmeticLineOk = self.sp.cosmetic_line_ok(line_c)
if cosmeticLineOk == None:
NotFound.append(line_c[:k])
ErrorList.append(line_c[:k])
elif cosmeticLineOk == False:
ErrorList.append(line_c[:k])
if ret != QtGui.QMessageBox.YesToAll and ret != QtGui.QMessageBox.NoToAll:
ret = ShowErrorMessage()
if ret == QtGui.QMessageBox.Yes or ret == QtGui.QMessageBox.YesToAll:
CorrectedList.append(line_c[:k])
line = self.sp.read_line(self.sp.phyat_file, line_num)
line = line.rstrip()
v0 = {i: np.float(self.sp.fieldStrFromLine(line, i)) for i in keys}
v1 = {i: np.float(self.sp.fieldStrFromLine(line_c, i)) for i in keys}
l_shift = v1['lambda'] + v1['l_shift'] - v0['lambda']
i_cor = v1['i_cor'] * v1['i_rel'] / v0['i_rel']
l_shift_str = self.rightFormat(str(l_shift), 'l_shift')
i_cor_str = self.rightFormat(str(i_cor), 'i_cor')
line = self.sp.replace_field(line, 'l_shift', l_shift_str)
line = self.sp.replace_field(line, 'i_cor', i_cor_str)
log_.warn('(corrected) ' + line + '\n', calling=self.calling)
self.sp.replace_line(self.sp.fic_cosmetik, line)
else:
UnCorList.append(line_c[:k])
log_.warn('Not corrected.\n', calling=self.calling)
nErr = len(ErrorList)
nCor = len(CorrectedList)
nUnCor = len(UnCorList)
nNfd = len(NotFound)
if nErr > 0:
answer = ShowFinalMessage(nErr, nCor, nUnCor, nNfd, UnCorList, NotFound)
if 'DelNotFnd' in answer:
for i in NotFound:
self.sp.remove_line(self.sp.fic_cosmetik, int(i))
if 'DelUncor' in answer:
for i in UnCorList:
self.sp.remove_line(self.sp.fic_cosmetik, int(i))
def set_status_text(self):
if self.sp is None:
return
if self.sp.phyat_file == 'NO_phyat.dat':
self.status_text.setText('pySSN, v {}. init file: {}, No synthesis'.format(__version__,
self.sp.config_file.split('/')[-1]))
elif self.sp.get_conf('do_cosmetik'):
self.status_text.setText('pySSN, v {}. init file: {}, at. data: {}, model: {}, cosmetic: {}'.format(__version__,
self.sp.config_file.split('/')[-1],
self.sp.phyat_file.split('/')[-1],
self.sp.get_conf('fic_modele').split('/')[-1],
self.sp.get_conf('fic_cosmetik').split('/')[-1]))
else:
self.status_text.setText('pySSN, v {}. init file: {}, at. data: {}, model: {}, No cosmetic'.format(__version__,
self.sp.config_file.split('/')[-1],
self.sp.phyat_file.split('/')[-1],
self.sp.get_conf('fic_modele').split('/')[-1]))
def test_init_file(self):
if self.sp == None:
self.showErrorBox = False
self.showErrorBox = True
invalidCommands = []
if os.path.isfile(self.init_file_name):
f = open(self.init_file_name, 'r')
lines = f.readlines()
f.close()
else:
invalidCommands.append('\nFile not found')
lines = []
triple_quoted_string_found = False
newlines = []
rows = []
for i in range(len(lines)):
line = lines[i].split('#')[0].rstrip()
k = line.find('=')
if not (line.strip().startswith('#') or len(line.strip()) == 0):
if '"""' in line:
triple_quoted_string_found = not triple_quoted_string_found
if triple_quoted_string_found:
newlines.append(line.split('#')[0].rstrip())
rows.append(i+1)
else:
s = line.split('#')[0].rstrip()
if len(s.strip()) > 0:
newlines[-1] += '\n' + s
else:
if len(line) == len(line.lstrip()) and not triple_quoted_string_found:
newlines.append(line.split('#')[0].rstrip())
rows.append(i+1)
else:
s = line.split('#')[0].rstrip()
if len(s.strip()) > 0:
newlines[-1] += '\n' + s
for i in range(len(newlines)):
line = newlines[i]
line_list = line.split('\n')
if len(line_list) > 3:
line_str = line_list[0] + '\n' + line_list[1] + '\n' + line_list[2] + '\n...'
else:
line_str = line
try:
exec(line)
except IndentationError:
invalidCommands.append('\nIndentation error, line {}:\n{}'.format(rows[i],line_str))
except SyntaxError:
if '"""' in line and triple_quoted_string_found:
invalidCommands.append('\nUnclosed triple-quotation mark, line {}:\n{}'.format(rows[i],line_str))
else:
invalidCommands.append('\nInvalid syntax, line {}:\n{}'.format(rows[i],line_str))
except(AttributeError, NameError):
invalidCommands.append('\nUndefined variable name or attribute, line {}:\n{}'.format(rows[i],line_str))
except:
invalidCommands.append('\nUndefined error, line {}:\n{}'.format(rows[i],line_str))
if len(invalidCommands) > 0:
title = 'Fatal error'
msg = 'Error in the initialization file \'{0}\': '.format(self.init_file_name)
for line in invalidCommands:
msg = msg + '\n' + line
if self.showErrorBox:
if self.sp == None:
buttom = QtGui.QMessageBox.Abort
else:
buttom = QtGui.QMessageBox.Cancel
QtGui.QMessageBox.critical(self, title, msg, buttom)
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
return False
return True
def start_spectrum(self):
init_file = self.init_file_name.split('/')[-1]
dir_ = self.init_file_name.split(init_file)[0]
if dir_ == '':
dir_ = './'
self.directory = dir_
if not self.test_init_file():
if self.sp == None:
sys.exit()
else:
return
self.sp = spectrum(config_file=self.init_file_name)
if self.sp.errorMsg:
if self.showErrorBox:
msg = 'Synthesis not possible. \n\n{}'.format(self.sp.errorMsg)
msg = self.sp.errorMsg
ret = QtGui.QMessageBox.critical(self, 'Critical Error', msg, QtGui.QMessageBox.Abort, QtGui.QMessageBox.Ignore)
if ret == QtGui.QMessageBox.Abort:
sys.exit()
self.sp.errorMsg = ''
if len(self.sp.read_obs_error) > 0:
title = 'Error reading observations'
msg = self.sp.read_obs_error
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
if ( self.sp.get_conf('fic_cosmetik') is None or
self.sp.get_conf('fic_cosmetik') == '' ):
self.sp.set_conf('do_cosmetik', False)
if self.sp.get_conf('do_synth') and self.sp.get_conf('do_cosmetik'):
self.match_cosmetic_phyat_files()
if self.sp.get_conf('clean_cosmetic_file'):
self.clean_cosmetic_file()
if self.sp.get_conf('order_cosmetic_file'):
self.order_cosmetic_file()
self.set_status_text()
self.axes = None
self.sp.ax2_fontsize = 6
self.sp_norm_box.setText('{}'.format(self.sp.get_conf('sp_norm')))
self.obj_velo_box.setText('{}'.format(self.sp.get_conf('obj_velo')))
self.ebv_box.setText('{}'.format(self.sp.get_conf('e_bv', 0)))
self.resol_box.setText('{}'.format(self.sp.get_conf('resol')))
self.cut2_box.setText('{}'.format(self.sp.get_conf('cut_plot2')))
self.magenta_box.setText('{}'.format(self.sp.plot_magenta))
self.magenta_label_box.setText('{}'.format(self.sp.label_magenta))
self.cyan_box.setText('{}'.format(self.sp.plot_cyan))
self.cyan_label_box.setText('{}'.format(self.sp.label_cyan))
self.sp_min_box.setText('{}'.format(self.sp.get_conf('limit_sp')[0]))
self.sp_max_box.setText('{}'.format(self.sp.get_conf('limit_sp')[1]))
self.init_axes()
self.xlim_min_box.setText('{}'.format(self.x_plot_lims[0]))
self.xlim_max_box.setText('{}'.format(self.x_plot_lims[1]))
self.y1lim_min_box.setText('{}'.format(self.y1_plot_lims[0]))
self.y1lim_max_box.setText('{}'.format(self.y1_plot_lims[1]))
self.y3lim_min_box.setText('{}'.format(self.y3_plot_lims[0]))
self.y3lim_max_box.setText('{}'.format(self.y3_plot_lims[1]))
self.verbosity_ag.actions()[self.sp.get_conf('log_level', 0)].setChecked(True)
self.line_tick_ax_ag.actions()[self.sp.get_conf('line_tick_ax', 0)].setChecked(True)
self.line_tick_pos_ag.actions()[self.sp.get_conf('line_tick_pos', 0)].setChecked(True)
self.residual_GroupBox.setChecked(self.sp.get_conf('qt_plot_residuals', True))
self.selected_ions_action.setChecked(self.sp.get_conf('show_selected_ions_only', False))
self.ion_cb.setChecked(self.sp.get_conf('show_selected_ions_only', False))
self.selected_intensities_action.setChecked(self.sp.get_conf('show_selected_intensities_only', False))
self.cut_cb.setChecked(self.sp.get_conf('show_selected_intensities_only', False))
self.diff_lines_ag.actions()[self.sp.get_conf('diff_lines_by', 0)].setChecked(True)
self.line_tick_ax_ag.actions()[self.sp.get_conf('line_tick_ax', 0)].setChecked(True)
self.editing_lines_action.setChecked(self.sp.get_conf('qt_allow_editing_lines', False))
self.update_lines_action.setChecked(self.sp.get_conf('qt_update_after_editing_lines', False))
self.plot_cont_action.setChecked(self.sp.get_conf('cont_plot', False))
self.show_line_ticks_action.setChecked(self.sp.get_conf('show_line_ticks', False))
self.plot_lines_action.setChecked(self.sp.get_conf('plot_lines_of_selected_ions', False))
self.lineIDs_GroupBox.setChecked(self.sp.get_conf('show_line_ticks', False) or self.sp.get_conf('plot_lines_of_selected_ions', False))
try:
selected_ions = self.sp.get_conf('selected_ions')
s = ''
for ion in selected_ions:
s = s + ion + ', '
if not s == '':
s = s[:-2]
self.ion_box.setText(s)
self.set_ion()
except:
self.ion_box.setText('')
self.line_sort_ag.actions()[self.sp.get_conf('save_lines_sort', 0)].setChecked(True)
self.show_header_action.setChecked(self.sp.get_conf('save_lines_header', False))
self.get_line_fields_to_print()
self.readOnlyCells_bg_color = QtGui.QColor('white')
self.editableCells_bg_color = QtGui.QColor('lightgreen')
if 'linux' in sys.platform and 'Plastique' in self.style_list:
default_style = 'Plastique'
elif 'darwin' in sys.platform and 'Macintosh (aqua)' in self.style_list:
default_style = 'Macintosh (aqua)'
else:
default_style = self.style_list[0]
if self.sp.get_conf('qt_style') not in self.style_list:
if 'QT_STYLE' in os.environ:
if os.environ['QT_STYLE'] in self.style_list:
self.sp.set_conf('qt_style', os.environ['QT_STYLE'])
else:
log_.warn('Unknown Qt style {}, using {}'.format(os.environ['QT_STYLE'], default_style))
self.sp.set_conf('qt_style', default_style)
else:
self.sp.set_conf('qt_style', default_style)
index_style = self.style_list.index(self.sp.get_conf('qt_style'))
self.style_ag.actions()[index_style].setChecked(True)
QtGui.qApp.setStyle(self.sp.get_conf('qt_style'))
self.enable_tooltips_action.setChecked(self.sp.get_conf('qt_enable_tooltips', True))
self.enable_tooltips_action_clicked()
self.adjust_fig_action.setChecked(self.sp.get_conf('fig_adjust', True))
def sp_norm(self):
if self.sp is None:
return
if not self.validate_sp_norm():
return
old_sp_norm = self.sp.get_conf('sp_norm')
new_sp_norm = np.float(self.sp_norm_box.text())
if old_sp_norm == new_sp_norm:
return
log_.message('Changing sp_norm. Old: {}, New: {}'.format(old_sp_norm, new_sp_norm), calling=self.calling)
self.statusBar().showMessage('Changing intensity scale of the observed spectrum ...')
QtGui.QApplication.processEvents()
self.sp.renorm(new_sp_norm)
self.on_draw()
def obj_velo(self):
if self.sp is None:
return
if not self.validate_obj_velo():
return
old_obj_velo = self.sp.get_conf('obj_velo')
new_obj_velo = np.float(self.obj_velo_box.text())
if old_obj_velo == new_obj_velo:
return
self.sp.iterpolate_velocity = False
self.sp.set_conf('obj_velo', new_obj_velo)
log_.message('Changing obj_velo. Old: {}, New: {}'.format(old_obj_velo, new_obj_velo), calling=self.calling)
self.statusBar().showMessage('Executing doppler correction of the observed spectrum ...')
QtGui.QApplication.processEvents()
self.sp.init_obs(obj_velo=new_obj_velo)
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run(do_synth = self.sp.do_synth, do_read_liste = True, do_profiles=False)
self.on_draw()
def ebv(self):
if self.sp is None:
return
if not self.validate_ebv():
return
old_ebv = self.sp.get_conf('e_bv')
new_ebv = np.float(self.ebv_box.text())
if old_ebv == new_ebv and not self.cont_par_changed:
return
log_.message('Changing E B-V. Old: {}, New: {}'.format(old_ebv, new_ebv), calling=self.calling)
self.statusBar().showMessage('Changing color excess E(B-V) ...', 4000)
self.statusBar().showMessage('Executing reddening correction of the synthetic spectrum ...')
QtGui.QApplication.processEvents()
self.sp.set_conf('e_bv', new_ebv)
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run(do_synth = self.sp.do_synth, do_read_liste = False, do_profiles=False)
self.on_draw()
self.cont_par_changed = False
def rerun(self):
if not self.validate_synthesis_parameters():
return
if ( self.x_plot_lims[0] < np.float(self.sp_min_box.text()) or
self.x_plot_lims[1] > np.float(self.sp_max_box.text()) ):
self.xlim_min_box.setText(self.sp_min_box.text())
self.xlim_max_box.setText(self.sp_max_box.text())
self.statusBar().showMessage('Rerunning synthesis ...')
QtGui.QApplication.processEvents()
self.sp.set_conf('limit_sp', (np.float(self.sp_min_box.text()), np.float(self.sp_max_box.text())))
self.sp.set_conf('resol', np.int(self.resol_box.text()))
self.sp.set_conf('obj_velo', np.float(self.obj_velo_box.text()))
self.sp.set_conf('sp_norm', np.float(self.sp_norm_box.text()))
self.sp.set_conf('e_bv', np.float(self.ebv_box.text()))
self.sp.init_obs()
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run()
self.set_plot_limits_and_draw()
def adjust(self):
if self.sp is None:
return
self.sp.errorMsg = ''
self.statusBar().showMessage('Running update ...')
QtGui.QApplication.processEvents()
self.sp_norm()
self.obj_velo()
self.ebv()
if self.sp.errorMsg:
if self.showErrorBox:
msg = self.sp.errorMsg
QtGui.QMessageBox.warning(self, 'Update error', msg, QtGui.QMessageBox.Ok)
return 0
ndiff, errorMsg = self.sp.adjust()
if ndiff == -1:
self.sp.do_cosmetik = False
self.sp.set_conf('do_cosmetik', False)
self.sp.fic_cosmetik
self.set_status_text()
title = 'Error in cosmetic file'
msg = 'Unable to read from file \'{}\'\nChanging to \'no cosmetic\':\n{}'.format(self.sp.get_conf('fic_cosmetik'), errorMsg)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
if ndiff > 0:
self.on_draw()
self.statusBar().showMessage('Update finished.', 4000)
return ndiff
def apply_post_proc(self):
path = str(self.post_proc_file or '')
file_choices = "Python files (*.py) (*.py);;All files (*) (*)"
title = 'Open post-process file'
path = unicode(QtGui.QFileDialog.getOpenFileName(self, title, path, file_choices))
path = path.split('/')[-1]
if not path:
return
try:
user_module = {}
execfile(path, user_module)
self.post_proc = user_module['post_proc']
self.post_proc_file = path
log_.message('function post_proc read from {}'.format(self.post_proc_file))
except:
self.post_proc = None
title = 'Error reading post-process file'
msg = 'Unable to read post-process file \'{}\''.format(path)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
return
try:
self.post_proc(self.fig)
self.canvas.draw()
except:
title = 'Error executing post-process'
msg = 'Error in post-process file \'{}\''.format(self.post_proc_file)
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok)
else:
log_.warn(msg, calling = self.calling)
def update_profile(self):
if self.sp is None:
return
self.sp.run(do_synth = True, do_read_liste = False, do_profiles=True)
self.on_draw()
def cut2(self):
if self.sp is None:
return
if not self.validate_cut():
return
self.selected_intensities_action.setChecked(True)
self.sp.set_conf('show_selected_intensities_only', True)
self.cut_cb.setChecked(True)
self.draw_ion()
def get_ion_str(self,s):
s = s.strip()
s = s.replace(' ', '_')
if s.isdigit():
line = self.sp.get_line_from_reduce_code(s)
if line is None:
s = ''
else:
s = self.sp.fieldStrFromLine(line,'id').strip()
return s
def set_ion(self):
if self.sp is None:
return
sList = []
s = self.ion_box.text()
k = s.indexOf(',')
while k >= 0:
s0 = self.get_ion_str(str(s[:k]))
if s0 != '' and s0 != '*':
sList.append(s0)
s = s[k+1:]
k = s.indexOf(',')
s0 = self.get_ion_str(str(s))
if s0 != '' and s0 != '*':
sList.append(s0)
s = ''
for s0 in sList:
s = s + s0 + ', '
s = s[:-2]
for item in sList[:]:
sList.remove(item)
if item[-1] == '*':
item = item[:-1]
this_ion_only = False
else:
this_ion_only = True
self.sp.set_ion_list()
if item.ljust(9) in self.sp.liste_raies['id']:
if self.sp.true_ion(item) == item or this_ion_only:
sList = sList + [item]
if not this_ion_only:
sList = sList + self.sp.get_all_ions_from_ion(item)
elif item.ljust(9) in self.sp.sp_theo['raie_ref']['id']:
if self.sp.true_ion(item) == item or this_ion_only:
sList = sList + [item]
if not this_ion_only:
sList = sList + self.sp.get_all_ions_from_ion(item)
else:
ion_list = self.sp.get_ions_from_element(item)
sList = sList + ion_list
self.sp.set_conf('selected_ions', sList)
self.ion_box.setText(s)
def set_refline_to_info_box(self,j):
if self.sp.get_conf('diff_lines_by') == 0 and len(self.sp.selected_ions_data) > 0:
if j == -1:
j = 0
s = str(self.sp.selected_ions_data[j][2][0])
self.line_info_box.setText(s)
def draw_ion(self):
if self.cut_cb.isChecked():
if self.validate_cut():
self.sp.set_conf('cut_plot2', np.float(self.cut2_box.text()))
else:
return
self.set_ion()
self.sp.set_conf('index_of_current_ion', -1)
self.sp.set_selected_ions_data()
self.set_refline_to_info_box(-1)
self.on_draw()
def line_info(self):
if self.sp is None:
return
msg = ''
s = str(self.line_info_box.text())
if s == '':
return
w = self.sp.field_width['num'] - 1
s = s[-w:]
if s[0] == '0':
s = s[1:]
self.line_info_box.setText(s)
try:
new_ref = int(s)
except ValueError:
msg = 'Invalid input.\n It is not an integer'
if msg == '':
line = self.sp.get_line_from_reduce_code(s)
if line is None:
msg = 'No line unambiguously associated with this number.'
if msg == '':
s = self.sp.fieldStrFromLine(line,'num').strip()
self.line_info_box.setText(s)
self.line_info_ref = int(s)
if self.sp.get_conf('qt_show_dialogs', True):
self.show_line_info_dialog()
else:
self.sp.line_info(new_ref, sort='i_rel')
else:
title = 'Error in line number'
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
def magenta_line(self):
if self.sp is None:
return
ref_str = self.magenta_box.text()
ref_txt = self.magenta_label_box.text()
if ref_str == '':
self.sp.plot_magenta = None
self.sp.label_magenta = ''
self.on_draw()
else:
new_ref = np.int(ref_str)
self.sp.plot_magenta = new_ref
self.sp.label_magenta = ref_txt
self.on_draw()
def cyan_line(self):
if self.sp is None:
return
ref_str = self.cyan_box.text()
ref_txt = self.cyan_label_box.text()
if ref_str == '':
self.sp.plot_cyan = None
self.sp.label_cyan = ''
self.on_draw()
else:
new_ref = np.int(ref_str)
self.sp.plot_cyan = new_ref
self.sp.label_cyan = ref_txt
self.on_draw()
def diff_lines(self):
self.sp.set_conf('index_of_current_ion', -1)
self.set_plot_ax2()
if self.sp.get_conf('diff_lines_by') == 0 and len(self.sp.selected_ions_data) > 0:
s = str(self.sp.selected_ions_data[0][2][0])
self.line_info_box.setText(s)
def set_plot_ax2(self):
self.sp.set_selected_ions_data()
k = self.line_tick_ax_list.index(self.line_tick_ax_ag.checkedAction().text())
self.sp.set_conf('line_tick_ax',k)
k = self.line_tick_pos_list.index(self.line_tick_pos_ag.checkedAction().text())
self.sp.set_conf('line_tick_pos',k)
k = self.diff_lines_list.index(self.diff_lines_ag.checkedAction().text())
self.sp.set_conf('diff_lines_by',k)
if self.show_line_ticks_action.isChecked():
self.make_axes()
def verbosity(self):
verbosity = self.verbosity_list.index(self.verbosity_ag.checkedAction().text())
if verbosity == log_.level:
return
log_.debug('Verbosity changed from {} to {}'.format(log_.level, verbosity), calling=self.calling)
log_.level = verbosity
self.sp.set_conf('log_level', verbosity)
def style(self):
new_style_str = str(self.style_ag.checkedAction().text())
old_style_str = self.sp.get_conf('qt_style')
if new_style_str == old_style_str:
return
self.sp.set_conf('qt_style', new_style_str)
QtGui.qApp.setStyle(new_style_str)
log_.debug('Widget style changed from {} to {}'.format(old_style_str, new_style_str), calling=self.calling)
def update_lim_boxes(self):
xformat = '{:.1f}'
yformat = '{1:.{0}f}'
min_diff = 2
if abs(self.x_plot_lims[1] - self.x_plot_lims[0]) < min_diff:
m = (self.x_plot_lims[0] + self.x_plot_lims[1])/2
x_lims = (m - min_diff/2,m + min_diff/2)
else:
x_lims = self.x_plot_lims
min_diff = 0.2
if abs(self.y1_plot_lims[1] - self.y1_plot_lims[0]) < min_diff:
m = (self.y1_plot_lims[0] + self.y1_plot_lims[1])/2
y1_lims = (m - min_diff/2,m + min_diff/2)
else:
y1_lims = self.y1_plot_lims
min_diff = 0.2
if abs(self.y3_plot_lims[1] - self.y3_plot_lims[0]) < min_diff:
m = (self.y3_plot_lims[0] + self.y3_plot_lims[1])/2
y3_lims = (m - min_diff/2,m + min_diff/2)
else:
y3_lims = self.y3_plot_lims
if self.x_plot_lims[0] != np.float(self.xlim_min_box.text()):
self.xlim_min_box.setText(xformat.format(x_lims[0]))
if self.x_plot_lims[1] != np.float(self.xlim_max_box.text()):
self.xlim_max_box.setText(xformat.format(x_lims[1]))
delta = abs(y1_lims[1]-y1_lims[0])
if delta < 2:
precision = 2
else:
precision = 1
if self.y1_plot_lims[0] != np.float(self.y1lim_min_box.text()):
self.y1lim_min_box.setText(yformat.format(precision, y1_lims[0]))
if self.y1_plot_lims[1] != np.float(self.y1lim_max_box.text()):
self.y1lim_max_box.setText(yformat.format(precision, y1_lims[1]))
delta = abs(y3_lims[1]-y3_lims[0])
if delta < 2:
precision = 2
else:
precision = 1
if self.y3_plot_lims[0] != np.float(self.y3lim_min_box.text()):
self.y3lim_min_box.setText(yformat.format(precision, y3_lims[0]))
if self.y3_plot_lims[1] != np.float(self.y3lim_max_box.text()):
self.y3lim_max_box.setText(yformat.format(precision, y3_lims[1]))
self.set_plot_limits_and_draw()
def validate_input(self, editBox, field, title, varType = 'float', showError = True):
value = editBox.text()
if value == None:
return False
if ( ( varType == 'float' and not self.isFloat(value) ) or \
( varType == 'integer' and not self.isInteger(value) ) or \
( varType == 'positive integer' and not self.isPositiveInteger(value) ) or \
( varType == 'positive odd integer' and not self.isPositiveOdd(value) ) ):
msg = '{} should be a {}'.format(field, varType)
msg.replace('a integer', 'an integer')
editBox.setFocus()
if showError:
if self.showErrorBox:
QtGui.QMessageBox.critical(self, title, msg, QtGui.QMessageBox.Ok )
else:
log_.warn('{}: {}'.format(title, msg), calling=self.calling)
return False
else:
return True
def validate_sp_min(self):
return self.validate_input(self.sp_min_box, 'xmin for the synthesis', 'Input error', 'float')
def validate_sp_max(self):
return self.validate_input(self.sp_max_box, 'xmax for the synthesis', 'Input error', 'float')
def validate_sp_norm(self):
return self.validate_input(self.sp_norm_box, 'normalization factor', 'Input error', 'float')
def validate_ebv(self):
return self.validate_input(self.ebv_box, 'color excess E(B-V)', 'Input error', 'float')
def validate_obj_velo(self):
return self.validate_input(self.obj_velo_box, 'radial velocity', 'Input error', 'float')
def validate_resol(self):
return self.validate_input(self.resol_box, 'rebinning factor', 'Input error', 'positive odd integer')
def validate_xlim_min(self, showError = True):
return self.validate_input(self.xlim_min_box, 'xmin', 'Invalid plot limit', 'float', showError)
def validate_xlim_max(self, showError = True):
return self.validate_input(self.xlim_max_box, 'xmax', 'Invalid plot limit', 'float', showError)
def validate_y1lim_min(self):
return self.validate_input(self.y1lim_min_box, 'ymin', 'Invalid plot limit', 'float')
def validate_y1lim_max(self):
return self.validate_input(self.y1lim_max_box, 'ymax', 'Invalid plot limit', 'float')
def validate_y3lim_min(self):
return self.validate_input(self.y3lim_min_box, 'residual ymin', 'Invalid plot limit', 'float')
def validate_y3lim_max(self):
return self.validate_input(self.y3lim_max_box, 'residual ymax', 'Invalid plot limit', 'float')
def validate_cut(self):
return self.validate_input(self.cut2_box, 'cut', 'Input error', 'float')
def sp_lim_in_range(self):
xmin = np.float(self.sp_min_box.text())
xmax = np.float(self.sp_max_box.text())
if ( xmin < xmax - 9.999 ) and ( xmin > 0. ) and ( xmax < 200000000.):
return True
else:
if self.showErrorBox:
QtGui.QMessageBox.critical(self, 'Invalid synthesis limits', 'The acceptable values are:\n\n xmax - xmin > 10,\n xmin > 0,\n xmax < 200000000.',
QtGui.QMessageBox.Ok )
else:
log_.warn('Invalid synthesis limits', 'The acceptable values are:\n\n xmax - xmin > 10,\n xmin > 0,\n xmax < 200000000.', calling=self.calling)
return False
def validate_synthesis_parameters(self):
return ( self.validate_sp_min() and
self.validate_sp_max() and
self.sp_lim_in_range() and
self.validate_sp_norm() and
self.validate_obj_velo() and
self.validate_ebv() and
self.validate_resol() )
def validate_plot_parameters(self):
return ( self.validate_xlim_min() and
self.validate_xlim_max() and
self.validate_y1lim_min() and
self.validate_y1lim_max() and
self.validate_y3lim_min() and
self.validate_y3lim_max() )
def set_plot_limits_and_draw(self):
if not self.validate_plot_parameters():
return
self.x_plot_lims = (np.float(self.xlim_min_box.text()), np.float(self.xlim_max_box.text()))
self.y1_plot_lims = (np.float(self.y1lim_min_box.text()), np.float(self.y1lim_max_box.text()))
self.y3_plot_lims = (np.float(self.y3lim_min_box.text()), np.float(self.y3lim_max_box.text()))
self.sp.set_conf('x_plot_lims', self.x_plot_lims)
self.sp.set_conf('y1_plot_lims', self.y1_plot_lims)
self.sp.set_conf('y3_plot_lims', self.y3_plot_lims)
self.restore_axes()
self.draw_ion()
def set_limit_sp(self):
if not ( self.validate_sp_min() and
self.validate_sp_max() and
self.sp_lim_in_range() ):
return
limit_sp = (np.float(self.sp_min_box.text()), np.float(self.sp_max_box.text()))
self.sp.set_conf('limit_sp', limit_sp)
def set_limit_sp_and_run(self):
if str(self.sp_min_box.text()).strip() == '':
self.sp_min_box.setText('{:.1f}'.format(self.sp.w_min))
if str(self.sp_max_box.text()).strip() == '':
self.sp_max_box.setText('{:.1f}'.format(self.sp.w_max))
if not ( self.validate_sp_min() and
self.validate_sp_max() and
self.sp_lim_in_range() ):
return
old_limit_sp = self.sp.get_conf('limit_sp')
new_limit_sp = (np.float(self.sp_min_box.text()), np.float(self.sp_max_box.text()))
if old_limit_sp == new_limit_sp:
if not self.axes_fixed:
self.xlim_min_box.setText(self.sp_min_box.text())
self.xlim_max_box.setText(self.sp_max_box.text())
self.set_plot_limits_and_draw()
return
if not self.validate_xlim_min(False):
self.xlim_min_box.setText(self.sp_min_box.text())
if not self.validate_xlim_max(False):
self.xlim_max_box.setText(self.sp_max_box.text())
if ( np.float(self.xlim_min_box.text()) >= new_limit_sp[1] or
np.float(self.xlim_max_box.text()) <= new_limit_sp[0] ):
self.xlim_min_box.setText(self.sp_min_box.text())
self.xlim_max_box.setText(self.sp_max_box.text())
self.sp.set_conf('limit_sp', new_limit_sp)
log_.message('Changing limit_sp. Old: {}, New: {}'.format(old_limit_sp, new_limit_sp), calling=self.calling)
self.statusBar().showMessage('Changing the synthesis wavelength limits ...')
QtGui.QApplication.processEvents()
self.sp.init_obs()
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run(do_synth = True, do_read_liste = True, do_profiles=False)
self.set_plot_limits_and_draw()
def resol(self):
if self.sp is None:
return
if not self.validate_resol():
return
old_resol = self.sp.get_conf('resol')
new_resol = np.int(self.resol_box.text())
if old_resol == new_resol:
return
self.sp.set_conf('resol', new_resol)
log_.message('Changing resol. Old: {}, New: {}'.format(old_resol, new_resol), calling=self.calling)
self.statusBar().showMessage('Changing rebinning factor ...')
QtGui.QApplication.processEvents()
self.sp.set_conf('resol', new_resol)
self.sp.init_obs()
self.sp.init_red_corr()
self.sp.make_continuum()
self.sp.run(do_synth = True, do_read_liste = True, do_profiles=False)
self.on_draw()
def leave_fig(self, event):
self.sp.firstClick = True
if ( self.x_plot_lims != self.axes.get_xlim() or
self.y1_plot_lims != self.axes.get_ylim() or
( self.axes3 is not None and self.y3_plot_lims != self.axes3.get_ylim() ) ):
limits_changed = True
else:
limits_changed = False
if not self.axes_fixed and limits_changed:
self.save_axes()
self.update_lim_boxes()
def fix_axes(self):
if self.fix_axes_cb.isChecked():
self.axes_fixed = True
else:
self.axes_fixed = False
def get_line_fields_to_print(self):
field_list = self.sp.get_conf('save_lines_fields')
for i in range(0,len(self.line_field_menu.actions())):
if self.line_print_dic.keys()[i] in field_list:
self.line_field_menu.actions()[i].setChecked(True)
else:
self.line_field_menu.actions()[i].setChecked(False)
def set_show_header(self):
if self.show_header_action.isChecked():
self.sp.set_conf('save_lines_header', True)
else:
self.sp.set_conf('save_lines_header', False)
def set_line_fields_to_print(self):
s = []
for i in range(0,len(self.line_field_menu.actions())):
if self.line_field_menu.actions()[i].isChecked():
s.append( self.line_print_dic.keys()[i])
self.sp.set_conf('save_lines_fields', s)
def save_lines(self):
self.sp.save_lines()
path = self.sp.get_conf('save_lines_filename')
self.statusBar().showMessage('Lines saved to file %s' % path, 4000)
def save_lines_as(self):
file_choices = "Text files (*.txt *.dat) (*.txt *.dat);;Tex files (*.tex) (*.tex);;CSV files (*.csv) (*.csv);;All Files (*) (*)"
filename = self.sp.get_conf('save_lines_filename')
extension = os.path.splitext(filename)[1][1:].lower()
if extension in ['txt','dat']:
selectedFilter = 'Text files (*.txt *.dat) (*.txt *.dat)'
elif extension in ['tex']:
selectedFilter = 'Tex files (*.tex) (*.tex)'
elif extension in ['csv']:
selectedFilter = 'CSV files (*.csv) (*.csv)'
else:
selectedFilter = 'All Files (*) (*)'
path = unicode(QtGui.QFileDialog.getSaveFileName(self, 'Save lines to file', filename, file_choices, selectedFilter))
if path:
self.sp.set_conf('save_lines_filename', path)
self.sp.save_lines()
self.statusBar().showMessage('Lines saved to file %s' % path, 4000)
def line_sort(self):
k = self.line_sort_list.index(self.line_sort_ag.checkedAction().text())
self.sp.set_conf('save_lines_sort',k)
def main_loc(init_filename=None, post_proc_file=None):
app = QtGui.QApplication(sys.argv)
form = AppForm(init_filename=init_filename, post_proc_file=post_proc_file)
form.show()
app.exec_()
return form.fig
def main_loc_obj(init_filename=None, post_proc_file=None):
app = QtGui.QApplication(sys.argv)
form = AppForm(init_filename=init_filename, post_proc_file=post_proc_file)
form.show()
app.exec_()
return form
def main():
parser = get_parser()
args = parser.parse_args()
log_.level = args.verbosity
app = QtGui.QApplication(sys.argv)
form = AppForm(init_filename=args.file, post_proc_file=args.post_proc)
#import pdb
#pdb.set_trace()
form.show()
app.exec_()
if __name__ == "__main__":
main()
| gpl-3.0 |
opalytics/opalytics-ticdat | ticdat/pandatio.py | 1 | 27387 | # coding=utf-8
try:
import sqlite3 as sql
except:
sql = None
import json
import os
from ticdat.utils import freezable_factory, verify, case_space_to_pretty, pd, TicDatError, FrozenDict, all_fields
from ticdat.utils import all_underscore_replacements, stringish, dictish
from itertools import product
from collections import defaultdict
import inspect
_longest_sheet = 30 # seems to be an Excel limit with pandas
def _sql_con(dbFile):
verify(sql, "sqlite3 needs to be installed")
con = sql.connect(dbFile)
return con
def _brackets(l) :
return ["[%s]"%_ for _ in l]
class _DummyContextManager(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self, *execinfo) :
return self
def __exit__(self, *excinfo) :
pass
class OpalyticsPanFactory(freezable_factory(object, "_isFrozen")) :
"""
Primary class for reading PanDat objects from the Opalytics Cloud Platform.
Not expected to be used outside of Opalytics Cloud hosted notebooks.
Don't create this object explicitly. An OpalyticsPanFactory will
automatically be associated with the opalytics attribute of the parent
PanDatFactory.
"""
def __init__(self, pan_dat_factory):
"""
Don't create this object explicitly. A JsonPanFactory will
automatically be associated with the json attribute of the parent
PanDatFactory.
:param pan_dat_factory:
:return:
"""
self.pan_dat_factory = pan_dat_factory
self._isFrozen = True
def _find_table_matchings(self, inputset):
rtn = defaultdict(list)
for t,x in product(self.pan_dat_factory.all_tables, inputset.schema):
if stringish(x) and t.lower() == x.lower().replace(" ", "_"):
rtn[t].append(x)
return rtn
def _good_inputset(self, inputset, message_writer = lambda x : x):
if not hasattr(inputset, "schema") and dictish(inputset.schema):
message_writer("Failed to find dictish schema attribute")
return False
if not hasattr(inputset, "getTable") and callable(inputset.getTable):
message_writer("Failed to find calleable getTable attribute")
return False
table_matchings = self._find_table_matchings(inputset)
badly_matched = {t for t,v in table_matchings.items() if len(v) != 1}
if badly_matched:
message_writer("Following tables could not be uniquely resolved in inputset.schema\n%s"%
badly_matched)
return False
return True
def create_pan_dat(self, inputset, raw_data=False, freeze_it=False):
"""
Create a PanDat object from an opalytics inputset
:param inputset: An opalytics inputset consistent with this PanDatFactory
:param raw_data: boolean. should data cleaning be skipped? On the Opalytics Cloud Platform
cleaned data will be passed to instant apps. Data cleaning involves
removing data type failures, data row predicate failures, foreign key
failures, duplicated rows and deactivated records.
:return: a PanDat object populated by the tables as they are rendered by inputset
"""
message = []
verify(self._good_inputset(inputset, message.append),
"inputset is inconsistent with this PanDatFactory : %s"%(message or [None])[0])
for t in self.pan_dat_factory.all_tables:
all_fields = set(self.pan_dat_factory.primary_key_fields[t]).\
union(self.pan_dat_factory.data_fields[t])
verify("_active" not in all_fields, "Table %s has a field named '_active'.\n" +
"This conflicts with internal data processing.\n" +
" Don't use '_active' for in your PanDatFactory definition if you want to use this reader.")
tms = {k:v[0] for k,v in self._find_table_matchings(inputset).items()}
ia = {}
if "includeActive" in inspect.getargspec(inputset.getTable)[0]:
ia = {"includeActive": not raw_data}
rtn = self.pan_dat_factory.PanDat(**{t:inputset.getTable(tms[t], **ia) for t in tms})
for t in self.pan_dat_factory.all_tables:
df = getattr(rtn, t)
if "_active" in df.columns:
df = df[df["_active"]].drop('_active', axis=1)
setattr(rtn, t, df)
if not raw_data:
def removing():
removal_occured = set()
for (t,_), brs in list(self.pan_dat_factory.find_data_type_failures(rtn, as_table=False).items()) + \
list(self.pan_dat_factory.find_data_row_failures(rtn, as_table=False).items()) + \
[((t, None), brs) for t,brs in
self.pan_dat_factory.find_duplicates(rtn, as_table=False).items()]:
if t not in removal_occured:
removal_occured.add(t)
setattr(rtn, t, getattr(rtn, t)[[not _ for _ in brs]])
fkfs = self.pan_dat_factory.find_foreign_key_failures(rtn)
if fkfs:
self.pan_dat_factory.remove_foreign_key_failures(rtn)
return removal_occured or fkfs
while removing():
pass
return rtn
class JsonPanFactory(freezable_factory(object, "_isFrozen")):
"""
Primary class for reading/writing json data with PanDat objects.
Don't create this object explicitly. A JsonPanFactory will
automatically be associated with the json attribute of the parent
PanDatFactory.
"""
def __init__(self, pan_dat_factory):
"""
Don't create this object explicitly. A JsonPanFactory will
automatically be associated with the json attribute of the parent
PanDatFactory.
:param pan_dat_factory:
:return:
"""
self.pan_dat_factory = pan_dat_factory
to_json_args = inspect.getargspec(pd.DataFrame.to_json).args
assert "orient" in to_json_args
self._modern_pandas = "index" in to_json_args
self._isFrozen = True
def create_pan_dat(self, path_or_buf, fill_missing_fields=False, orient='split', **kwargs):
"""
Create a PanDat object from a SQLite database file
:param path_or_buf: a valid JSON string or file-like
:param fill_missing_fields: boolean. If truthy, missing fields will be filled in
with their default value. Otherwise, missing fields
throw an Exception.
:param orient: Indication of expected JSON string format. See pandas.read_json for more details.
:param kwargs: additional named arguments to pass to pandas.read_json
:return: a PanDat object populated by the matching tables.
caveats: Missing tables always throw an Exception.
Table names are matched with case-space insensitivity, but spaces
are respected for field names.
(ticdat supports whitespace in field names but not table names).
+- "inf", "-inf" strings will be converted to +-float("inf")
"""
if os.path.exists(path_or_buf):
verify(os.path.isfile(path_or_buf), "%s appears to be a directory and not a file." % path_or_buf)
with open(path_or_buf, "r") as f:
loaded_dict = json.load(f)
else:
verify(stringish(path_or_buf), "%s isn't a string" % path_or_buf)
loaded_dict = json.loads(path_or_buf)
verify(dictish(loaded_dict), "path_or_buf to json.load as a dict")
verify(all(map(dictish, loaded_dict.values())),
"the json.load result doesn't resolve to a dictionary whose values are themselves dictionaries")
tbl_names = self._get_table_names(loaded_dict)
verify("orient" not in kwargs, "orient should be passed as a non-kwargs argument")
rtn = {t: pd.read_json(json.dumps(loaded_dict[f]), orient=orient, **kwargs) for t,f in tbl_names.items()}
missing_fields = {(t, f) for t in rtn for f in all_fields(self.pan_dat_factory, t)
if f not in rtn[t].columns}
if fill_missing_fields:
for t,f in missing_fields:
rtn[t][f] = self.pan_dat_factory.default_values[t][f]
verify(fill_missing_fields or not missing_fields,
"The following (table, field) pairs are missing fields.\n%s" % [(t, f) for t,f in missing_fields])
for v in rtn.values():
v.replace("inf", float("inf"), inplace=True)
v.replace("-inf", -float("inf"), inplace=True)
rtn = self.pan_dat_factory.PanDat(**rtn)
msg = []
assert self.pan_dat_factory.good_pan_dat_object(rtn, msg.append), str(msg)
return rtn
def _get_table_names(self, loaded_dict):
rtn = {}
for table in self.pan_dat_factory.all_tables:
rtn[table] = [c for c in loaded_dict if c.lower().replace(" ", "_") == table.lower()]
verify(len(rtn[table]) >= 1, "Unable to recognize table %s" % table)
verify(len(rtn[table]) <= 1, "Multiple dictionary key choices found for table %s" % table)
rtn[table] = rtn[table][0]
return rtn
def write_file(self, pan_dat, json_file_path, case_space_table_names=False, orient='split',
index=False, indent=None, sort_keys=False, **kwargs):
"""
write the PanDat data to a collection of csv files
:param pan_dat: the PanDat object to write
:param json_file_path: the json file into which the data is to be written. If falsey, will return a
JSON string
:param case_space_table_names: boolean - make best guesses how to add spaces and upper case
characters to table names
:param orient: Indication of expected JSON string format. See pandas.to_json for more details.
:param index: boolean - whether or not to write the index.
:param indent: None. See json.dumps
:param sort_keys: See json.dumps
:param kwargs: additional named arguments to pass to pandas.to_json
:return:
caveats: +-float("inf") will be converted to "inf", "-inf"
"""
msg = []
verify(self.pan_dat_factory.good_pan_dat_object(pan_dat, msg.append),
"pan_dat not a good object for this factory : %s"%"\n".join(msg))
verify("orient" not in kwargs, "orient should be passed as a non-kwargs argument")
verify("index" not in kwargs, "index should be passed as a non-kwargs argument")
if self._modern_pandas:
# FYI - pandas Exception: ValueError: 'index=False' is only valid when 'orient' is 'split' or 'table'
kwargs["index"] = index if orient in ("split", "table") else True
case_space_table_names = case_space_table_names and \
len(set(self.pan_dat_factory.all_tables)) == \
len(set(map(case_space_to_pretty, self.pan_dat_factory.all_tables)))
rtn = {}
for t in self.pan_dat_factory.all_tables:
df = getattr(pan_dat, t).replace(float("inf"), "inf").replace(-float("inf"), "-inf")
k = case_space_to_pretty(t) if case_space_table_names else t
rtn[k] = json.loads(df.to_json(path_or_buf=None, orient=orient, **kwargs))
if orient == 'split' and not index:
rtn[k].pop("index", None)
if json_file_path:
with open(json_file_path, "w") as f:
json.dump(rtn, f, indent=indent, sort_keys=sort_keys)
else:
return json.dumps(rtn, indent=indent, sort_keys=sort_keys)
class CsvPanFactory(freezable_factory(object, "_isFrozen")):
"""
Primary class for reading/writing csv files with PanDat objects.
Don't create this object explicitly. A CsvPanFactory will
automatically be associated with the csv attribute of the parent
PanDatFactory.
"""
def __init__(self, pan_dat_factory):
"""
Don't create this object explicitly. A CsvPanFactory will
automatically be associated with the csv attribute of the parent
PanDatFactory.
:param pan_dat_factory:
:return:
"""
self.pan_dat_factory = pan_dat_factory
self._isFrozen = True
def create_pan_dat(self, dir_path, fill_missing_fields=False, **kwargs):
"""
Create a PanDat object from a SQLite database file
:param db_file_path: the directory containing the .csv files.
:param fill_missing_fields: boolean. If truthy, missing fields will be filled in
with their default value. Otherwise, missing fields
throw an Exception.
:param kwargs: additional named arguments to pass to pandas.read_csv
:return: a PanDat object populated by the matching tables.
caveats: Missing tables always throw an Exception.
Table names are matched with case-space insensitivity, but spaces
are respected for field names.
(ticdat supports whitespace in field names but not table names).
"""
verify(os.path.isdir(dir_path), "%s not a directory path"%dir_path)
tbl_names = self._get_table_names(dir_path)
rtn = {t: pd.read_csv(f, **kwargs) for t,f in tbl_names.items()}
missing_fields = {(t, f) for t in rtn for f in all_fields(self.pan_dat_factory, t)
if f not in rtn[t].columns}
if fill_missing_fields:
for t,f in missing_fields:
rtn[t][f] = self.pan_dat_factory.default_values[t][f]
verify(fill_missing_fields or not missing_fields,
"The following (table, file_name, field) triplets are missing fields.\n%s" %
[(t, os.path.basename(tbl_names[t]), f) for t,f in missing_fields])
rtn = self.pan_dat_factory.PanDat(**rtn)
msg = []
assert self.pan_dat_factory.good_pan_dat_object(rtn, msg.append), str(msg)
return rtn
def _get_table_names(self, dir_path):
rtn = {}
for table in self.pan_dat_factory.all_tables:
rtn[table] = [path for f in os.listdir(dir_path) for path in [os.path.join(dir_path, f)]
if os.path.isfile(path) and
f.lower().replace(" ", "_") == "%s.csv"%table.lower()]
verify(len(rtn[table]) >= 1, "Unable to recognize table %s" % table)
verify(len(rtn[table]) <= 1, "Multiple possible csv files found for table %s" % table)
rtn[table] = rtn[table][0]
return rtn
def write_directory(self, pan_dat, dir_path, case_space_table_names=False, index=False, **kwargs):
"""
write the PanDat data to a collection of csv files
:param pan_dat: the PanDat object to write
:param dir_path: the directory in which to write the csv files
Set to falsey if using con argument.
:param case_space_table_names: boolean - make best guesses how to add spaces and upper case
characters to table names
:param index: boolean - whether or not to write the index.
:param kwargs: additional named arguments to pass to pandas.to_csv
:return:
caveats: The row names (index) isn't written (unless kwargs indicates it should be).
"""
verify(not os.path.isfile(dir_path), "A file is not a valid directory path")
msg = []
verify(self.pan_dat_factory.good_pan_dat_object(pan_dat, msg.append),
"pan_dat not a good object for this factory : %s"%"\n".join(msg))
verify("index" not in kwargs, "index should be passed as a non-kwargs argument")
kwargs["index"] = index
case_space_table_names = case_space_table_names and \
len(set(self.pan_dat_factory.all_tables)) == \
len(set(map(case_space_to_pretty, self.pan_dat_factory.all_tables)))
if not os.path.isdir(dir_path) :
os.mkdir(dir_path)
for t in self.pan_dat_factory.all_tables :
f = os.path.join(dir_path, (case_space_to_pretty(t) if case_space_table_names else t) + ".csv")
getattr(pan_dat, t).to_csv(f, **kwargs)
class SqlPanFactory(freezable_factory(object, "_isFrozen")):
"""
Primary class for reading/writing SQLite files
(and sqlalchemy.engine.Engine objects) with PanDat objects.
Don't create this object explicitly. A SqlPanFactory will
automatically be associated with the sql attribute of the parent
PanDatFactory.
"""
def __init__(self, pan_dat_factory):
"""
Don't create this object explicitly. A SqlPanFactory will
automatically be associated with the sql attribute of the parent
PanDatFactory.
:param pan_dat_factory:
:return:
"""
self.pan_dat_factory = pan_dat_factory
self._isFrozen = True
def create_pan_dat(self, db_file_path, con=None, fill_missing_fields=False):
"""
Create a PanDat object from a SQLite database file
:param db_file_path: A SQLite DB File. Set to falsey if using con argument
:param con: sqlalchemy.engine.Engine or sqlite3.Connection.
Set to falsey if using db_file_path argument.
:param fill_missing_fields: boolean. If truthy, missing fields will be filled in
with their default value. Otherwise, missing fields
throw an Exception.
:return: a PanDat object populated by the matching tables.
caveats: Missing tables always throw an Exception.
Table names are matched with case-space insensitivity, but spaces
are respected for field names.
(ticdat supports whitespace in field names but not table names).
"""
verify(bool(db_file_path) != bool(con),
"use either the con argument or the db_file_path argument but not both")
if db_file_path:
verify(os.path.exists(db_file_path) and not os.path.isdir(db_file_path),
"%s not a file path"%db_file_path)
rtn = {}
con_maker = lambda: _sql_con(db_file_path) if db_file_path else _DummyContextManager(con)
with con_maker() as _:
con_ = con or _
for t, s in self._get_table_names(con_).items():
rtn[t] = pd.read_sql(sql="Select * from [%s]"%s, con=con_)
missing_fields = {(t, f) for t in rtn for f in all_fields(self.pan_dat_factory, t)
if f not in rtn[t].columns}
if fill_missing_fields:
for t,f in missing_fields:
rtn[t][f] = self.pan_dat_factory.default_values[t][f]
verify(fill_missing_fields or not missing_fields,
"The following are (table, field) pairs missing from the %s file.\n%s" % (db_file_path, missing_fields))
rtn = self.pan_dat_factory.PanDat(**rtn)
msg = []
assert self.pan_dat_factory.good_pan_dat_object(rtn, msg.append), str(msg)
return rtn
def _get_table_names(self, con):
rtn = {}
def try_name(name):
try :
con.execute("Select * from [%s]"%name)
except :
return False
return True
for table in self.pan_dat_factory.all_tables:
rtn[table] = [t for t in all_underscore_replacements(table) if try_name(t)]
verify(len(rtn[table]) >= 1, "Unable to recognize table %s" % table)
verify(len(rtn[table]) <= 1, "Multiple possible tables found for table %s" % table)
rtn[table] = rtn[table][0]
return rtn
def write_file(self, pan_dat, db_file_path, con=None, if_exists='replace', case_space_table_names=False):
"""
write the PanDat data to an excel file
:param pan_dat: the PanDat object to write
:param db_file_path: The file path of the SQLite file to create.
Set to falsey if using con argument.
:param con: sqlalchemy.engine.Engine or sqlite3.Connection.
Set to falsey if using db_file_path argument
:param if_exists: ‘fail’, ‘replace’ or ‘append’. How to behave if the table already exists
:param case_space_table_names: boolean - make best guesses how to add spaces and upper case
characters to table names
:return:
caveats: The row names (index) isn't written. The default pandas schema generation is used,
and thus foreign key relationships aren't written.
"""
# The code to generate foreign keys is written and tested as part of TicDatFactory, and
# thus this shortcoming could be easily rectified if need be).
# note - pandas has an unfortunate tendency to push types into SQLite columns. This can result in
# writing-reading round trips converting your numbers to text if they are mixed type columns.
verify(bool(db_file_path) != bool(con),
"use either the con argument or the db_file_path argument but not both")
msg = []
verify(self.pan_dat_factory.good_pan_dat_object(pan_dat, msg.append),
"pan_dat not a good object for this factory : %s"%"\n".join(msg))
if db_file_path:
verify(not os.path.isdir(db_file_path), "A directory is not a valid SQLLite file path")
case_space_table_names = case_space_table_names and \
len(set(self.pan_dat_factory.all_tables)) == \
len(set(map(case_space_to_pretty, self.pan_dat_factory.all_tables)))
con_maker = lambda: _sql_con(db_file_path) if db_file_path else _DummyContextManager(con)
with con_maker() as _:
con_ = con or _
for t in self.pan_dat_factory.all_tables:
getattr(pan_dat, t).to_sql(name=case_space_to_pretty(t) if case_space_table_names else t,
con=con_, if_exists=if_exists, index=False)
class XlsPanFactory(freezable_factory(object, "_isFrozen")):
"""
Primary class for reading/writing Excel files with panDat objects.
Don't create this object explicitly. A XlsPanFactory will automatically be associated
with the xls attribute of the parent PanDatFactory.
"""
def __init__(self, pan_dat_factory):
"""
Don't create this object explicitly. A XlsPanFactory will
automatically be associated with the xls attribute of the parent
PanDatFactory.
:param pan_dat_factory:
:return:
"""
self.pan_dat_factory = pan_dat_factory
self._isFrozen = True
def create_pan_dat(self, xls_file_path, fill_missing_fields=False):
"""
Create a PanDat object from an Excel file
:param xls_file_path: An Excel file containing sheets whose names match
the table names in the schema.
:param fill_missing_fields: boolean. If truthy, missing fields will be filled in
with their default value. Otherwise, missing fields
throw an Exception.
:return: a PanDat object populated by the matching sheets.
caveats: Missing sheets resolve to an empty table, but missing fields
on matching sheets throw an Exception (unless fill_missing_fields is falsey).
Table names are matched to sheets with with case-space insensitivity, but spaces and
case are respected for field names.
(ticdat supports whitespace in field names but not table names).
"""
rtn = {}
for t, s in self._get_sheet_names(xls_file_path).items():
rtn[t] = pd.read_excel(xls_file_path, s)
missing_tables = {t for t in self.pan_dat_factory.all_tables if t not in rtn}
if missing_tables:
print ("The following table names could not be found in the %s file.\n%s\n"%
(xls_file_path,"\n".join(missing_tables)))
missing_fields = {(t, f) for t in rtn for f in all_fields(self.pan_dat_factory, t)
if f not in rtn[t].columns}
if fill_missing_fields:
for t,f in missing_fields:
rtn[t][f] = self.pan_dat_factory.default_values[t][f]
verify(fill_missing_fields or not missing_fields,
"The following are (table, field) pairs missing from the %s file.\n%s" % (xls_file_path, missing_fields))
rtn = self.pan_dat_factory.PanDat(**rtn)
msg = []
assert self.pan_dat_factory.good_pan_dat_object(rtn, msg.append), str(msg)
return rtn
def _get_sheet_names(self, xls_file_path):
sheets = defaultdict(list)
try :
xl = pd.ExcelFile(xls_file_path)
except Exception as e:
raise TicDatError("Unable to open %s as xls file : %s"%(xls_file_path, e.message))
for table, sheet in product(self.pan_dat_factory.all_tables, xl.sheet_names) :
if table.lower()[:_longest_sheet] == sheet.lower().replace(' ', '_')[:_longest_sheet]:
sheets[table].append(sheet)
duplicated_sheets = tuple(_t for _t,_s in sheets.items() if len(_s) > 1)
verify(not duplicated_sheets, "The following sheet names were duplicated : " +
",".join(duplicated_sheets))
sheets = FrozenDict({k:v[0] for k,v in sheets.items()})
return sheets
def write_file(self, pan_dat, file_path, case_space_sheet_names=False):
"""
write the panDat data to an excel file
:param pan_dat: the PanDat object to write
:param file_path: The file path of the excel file to create
:param case_space_sheet_names: boolean - make best guesses how to add spaces and upper case
characters to sheet names
:return:
caveats: The row names (index) isn't written.
"""
msg = []
verify(self.pan_dat_factory.good_pan_dat_object(pan_dat, msg.append),
"pan_dat not a good object for this factory : %s"%"\n".join(msg))
verify(not os.path.isdir(file_path), "A directory is not a valid xls file path")
case_space_sheet_names = case_space_sheet_names and \
len(set(self.pan_dat_factory.all_tables)) == \
len(set(map(case_space_to_pretty, self.pan_dat_factory.all_tables)))
writer = pd.ExcelWriter(file_path)
for t in self.pan_dat_factory.all_tables:
getattr(pan_dat, t).to_excel(writer, case_space_to_pretty(t) if case_space_sheet_names else t,
index=False)
writer.save()
| bsd-2-clause |
basnijholt/holoviews | holoviews/plotting/mpl/chart.py | 2 | 51054 | from __future__ import absolute_import, division, unicode_literals
from itertools import product
import param
import numpy as np
import matplotlib as mpl
from matplotlib import cm
from matplotlib.collections import LineCollection
from matplotlib.dates import DateFormatter, date2num
from ...core.dimension import Dimension, dimension_name
from ...core.options import Store, abbreviated_exception
from ...core.util import (
OrderedDict, match_spec, unique_iterator, basestring, max_range,
isfinite, datetime_types, dt_to_int, dt64_to_dt, search_indices,
unique_array, isscalar
)
from ...element import Raster, HeatMap
from ...operation import interpolate_curve
from ...util.transform import dim
from ..plot import PlotSelector
from ..util import compute_sizes, get_sideplot_ranges, get_min_distance
from .element import ElementPlot, ColorbarPlot, LegendPlot
from .path import PathPlot
from .plot import AdjoinedPlot, mpl_rc_context
from .util import mpl_version
class ChartPlot(ElementPlot):
"""
Baseclass to plot Chart elements.
"""
class CurvePlot(ChartPlot):
"""
CurvePlot can plot Curve and ViewMaps of Curve, which can be
displayed as a single frame or animation. Axes, titles and legends
are automatically generated from dim_info.
If the dimension is set to cyclic in the dim_info it will rotate
the curve so that minimum y values are at the minimum x value to
make the plots easier to interpret.
"""
autotick = param.Boolean(default=False, doc="""
Whether to let matplotlib automatically compute tick marks
or to allow the user to control tick marks.""")
interpolation = param.ObjectSelector(objects=['linear', 'steps-mid',
'steps-pre', 'steps-post'],
default='linear', doc="""
Defines how the samples of the Curve are interpolated,
default is 'linear', other options include 'steps-mid',
'steps-pre' and 'steps-post'.""")
relative_labels = param.Boolean(default=False, doc="""
If plotted quantity is cyclic and center_cyclic is enabled,
will compute tick labels relative to the center.""")
show_grid = param.Boolean(default=False, doc="""
Enable axis grid.""")
show_legend = param.Boolean(default=True, doc="""
Whether to show legend for the plot.""")
style_opts = ['alpha', 'color', 'visible', 'linewidth', 'linestyle', 'marker', 'ms']
_nonvectorized_styles = style_opts
_plot_methods = dict(single='plot')
def get_data(self, element, ranges, style):
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
if 'steps' in self.interpolation:
element = interpolate_curve(element, interpolation=self.interpolation)
xs = element.dimension_values(0)
ys = element.dimension_values(1)
dims = element.dimensions()
if xs.dtype.kind == 'M' or (len(xs) and isinstance(xs[0], datetime_types)):
dimtype = element.get_dimension_type(0)
dt_format = Dimension.type_formatters.get(dimtype, '%Y-%m-%d %H:%M:%S')
dims[0] = dims[0](value_format=DateFormatter(dt_format))
coords = (ys, xs) if self.invert_axes else (xs, ys)
return coords, style, {'dimensions': dims}
def init_artists(self, ax, plot_args, plot_kwargs):
xs, ys = plot_args
if xs.dtype.kind == 'M' or (len(xs) and isinstance(xs[0], datetime_types)):
artist = ax.plot_date(xs, ys, '-', **plot_kwargs)[0]
else:
artist = ax.plot(xs, ys, **plot_kwargs)[0]
return {'artist': artist}
def update_handles(self, key, axis, element, ranges, style):
artist = self.handles['artist']
(xs, ys), style, axis_kwargs = self.get_data(element, ranges, style)
artist.set_xdata(xs)
artist.set_ydata(ys)
return axis_kwargs
class ErrorPlot(ColorbarPlot):
"""
ErrorPlot plots the ErrorBar Element type and supporting
both horizontal and vertical error bars via the 'horizontal'
plot option.
"""
style_opts = ['edgecolor', 'elinewidth', 'capsize', 'capthick',
'barsabove', 'lolims', 'uplims', 'xlolims',
'errorevery', 'xuplims', 'alpha', 'linestyle',
'linewidth', 'markeredgecolor', 'markeredgewidth',
'markerfacecolor', 'markersize', 'solid_capstyle',
'solid_joinstyle', 'dashes', 'color']
_plot_methods = dict(single='errorbar')
def init_artists(self, ax, plot_data, plot_kwargs):
handles = ax.errorbar(*plot_data, **plot_kwargs)
bottoms, tops = None, None
if mpl_version >= str('2.0'):
_, caps, verts = handles
if caps:
bottoms, tops = caps
else:
_, (bottoms, tops), verts = handles
return {'bottoms': bottoms, 'tops': tops, 'verts': verts[0], 'artist': verts[0]}
def get_data(self, element, ranges, style):
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
color = style.get('color')
if isinstance(color, np.ndarray):
style['ecolor'] = color
if 'edgecolor' in style:
style['ecolor'] = style.pop('edgecolor')
c = style.get('c')
if isinstance(c, np.ndarray):
with abbreviated_exception():
raise ValueError('Mapping a continuous or categorical '
'dimension to a color on a ErrorBarPlot '
'is not supported by the {backend} backend. '
'To map a dimension to a color supply '
'an explicit list of rgba colors.'.format(
backend=self.renderer.backend
)
)
style['fmt'] = 'none'
dims = element.dimensions()
xs, ys = (element.dimension_values(i) for i in range(2))
yerr = element.array(dimensions=dims[2:4])
if self.invert_axes:
coords = (ys, xs)
err_key = 'xerr'
else:
coords = (xs, ys)
err_key = 'yerr'
style[err_key] = yerr.T if len(dims) > 3 else yerr[:, 0]
return coords, style, {}
def update_handles(self, key, axis, element, ranges, style):
bottoms = self.handles['bottoms']
tops = self.handles['tops']
verts = self.handles['verts']
_, style, axis_kwargs = self.get_data(element, ranges, style)
xs, ys, neg_error = (element.dimension_values(i) for i in range(3))
samples = len(xs)
pos_error = element.dimension_values(3) if len(element.dimensions()) > 3 else neg_error
if self.invert_axes:
bxs, bys = ys - neg_error, xs
txs, tys = ys + pos_error, xs
new_arrays = [np.array([[bxs[i], xs[i]], [txs[i], xs[i]]])
for i in range(samples)]
else:
bxs, bys = xs, ys - neg_error
txs, tys = xs, ys + pos_error
new_arrays = [np.array([[xs[i], bys[i]], [xs[i], tys[i]]])
for i in range(samples)]
verts.set_paths(new_arrays)
if bottoms:
bottoms.set_xdata(bxs)
bottoms.set_ydata(bys)
if tops:
tops.set_xdata(txs)
tops.set_ydata(tys)
if 'ecolor' in style:
verts.set_edgecolors(style['ecolor'])
if 'linewidth' in style:
verts.set_linewidths(style['linewidth'])
return axis_kwargs
class AreaPlot(ChartPlot):
show_legend = param.Boolean(default=False, doc="""
Whether to show legend for the plot.""")
style_opts = ['color', 'facecolor', 'alpha', 'edgecolor', 'linewidth',
'hatch', 'linestyle', 'joinstyle',
'fill', 'capstyle', 'interpolate']
_nonvectorized_styles = style_opts
_plot_methods = dict(single='fill_between')
def get_data(self, element, ranges, style):
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
xs = element.dimension_values(0)
ys = [element.dimension_values(vdim) for vdim in element.vdims]
return tuple([xs]+ys), style, {}
def init_artists(self, ax, plot_data, plot_kwargs):
fill_fn = ax.fill_betweenx if self.invert_axes else ax.fill_between
stack = fill_fn(*plot_data, **plot_kwargs)
return {'artist': stack}
def get_extents(self, element, ranges, range_type='combined'):
vdims = element.vdims[:2]
vdim = vdims[0].name
if len(vdims) > 1:
new_range = {}
for r in ranges[vdim]:
new_range[r] = max_range([ranges[vd.name][r] for vd in vdims])
ranges[vdim] = new_range
else:
s0, s1 = ranges[vdim]['soft']
s0 = min(s0, 0) if isfinite(s0) else 0
s1 = max(s1, 0) if isfinite(s1) else 0
ranges[vdim]['soft'] = (s0, s1)
return super(AreaPlot, self).get_extents(element, ranges, range_type)
class SideAreaPlot(AdjoinedPlot, AreaPlot):
bgcolor = param.Parameter(default=(1, 1, 1, 0), doc="""
Make plot background invisible.""")
border_size = param.Number(default=0, doc="""
The size of the border expressed as a fraction of the main plot.""")
xaxis = param.ObjectSelector(default='bare',
objects=['top', 'bottom', 'bare', 'top-bare',
'bottom-bare', None], doc="""
Whether and where to display the xaxis, bare options allow suppressing
all axis labels including ticks and xlabel. Valid options are 'top',
'bottom', 'bare', 'top-bare' and 'bottom-bare'.""")
yaxis = param.ObjectSelector(default='bare',
objects=['left', 'right', 'bare', 'left-bare',
'right-bare', None], doc="""
Whether and where to display the yaxis, bare options allow suppressing
all axis labels including ticks and ylabel. Valid options are 'left',
'right', 'bare' 'left-bare' and 'right-bare'.""")
class SpreadPlot(AreaPlot):
"""
SpreadPlot plots the Spread Element type.
"""
show_legend = param.Boolean(default=False, doc="""
Whether to show legend for the plot.""")
def __init__(self, element, **params):
super(SpreadPlot, self).__init__(element, **params)
def get_data(self, element, ranges, style):
xs = element.dimension_values(0)
mean = element.dimension_values(1)
neg_error = element.dimension_values(2)
pos_idx = 3 if len(element.dimensions()) > 3 else 2
pos_error = element.dimension_values(pos_idx)
return (xs, mean-neg_error, mean+pos_error), style, {}
def get_extents(self, element, ranges, range_type='combined'):
return ChartPlot.get_extents(self, element, ranges, range_type)
class HistogramPlot(ColorbarPlot):
"""
HistogramPlot can plot DataHistograms and ViewMaps of
DataHistograms, which can be displayed as a single frame or
animation.
"""
style_opts = ['alpha', 'color', 'align', 'visible', 'facecolor',
'edgecolor', 'log', 'capsize', 'error_kw', 'hatch',
'linewidth']
_nonvectorized_styles = ['alpha', 'log', 'error_kw', 'hatch', 'visible', 'align']
def __init__(self, histograms, **params):
self.center = False
self.cyclic = False
super(HistogramPlot, self).__init__(histograms, **params)
if self.invert_axes:
self.axis_settings = ['ylabel', 'xlabel', 'yticks']
else:
self.axis_settings = ['xlabel', 'ylabel', 'xticks']
val_dim = self.hmap.last.get_dimension(1)
self.cyclic_range = val_dim.range if val_dim.cyclic else None
@mpl_rc_context
def initialize_plot(self, ranges=None):
hist = self.hmap.last
key = self.keys[-1]
ranges = self.compute_ranges(self.hmap, key, ranges)
el_ranges = match_spec(hist, ranges)
# Get plot ranges and values
dims = hist.dimensions()[:2]
edges, hvals, widths, lims, isdatetime = self._process_hist(hist)
if isdatetime and not dims[0].value_format:
dt_format = Dimension.type_formatters[np.datetime64]
dims[0] = dims[0](value_format=DateFormatter(dt_format))
style = self.style[self.cyclic_index]
if self.invert_axes:
self.offset_linefn = self.handles['axis'].axvline
self.plotfn = self.handles['axis'].barh
else:
self.offset_linefn = self.handles['axis'].axhline
self.plotfn = self.handles['axis'].bar
with abbreviated_exception():
style = self._apply_transforms(hist, ranges, style)
if 'vmin' in style:
raise ValueError('Mapping a continuous dimension to a '
'color on a HistogramPlot is not '
'supported by the {backend} backend. '
'To map a dimension to a color supply '
'an explicit list of rgba colors.'.format(
backend=self.renderer.backend
)
)
# Plot bars and make any adjustments
legend = hist.label if self.show_legend else ''
bars = self.plotfn(edges, hvals, widths, zorder=self.zorder, label=legend, align='edge', **style)
self.handles['artist'] = self._update_plot(self.keys[-1], hist, bars, lims, ranges) # Indexing top
ticks = self._compute_ticks(hist, edges, widths, lims)
ax_settings = self._process_axsettings(hist, lims, ticks)
ax_settings['dimensions'] = dims
return self._finalize_axis(self.keys[-1], ranges=el_ranges, element=hist, **ax_settings)
def _process_hist(self, hist):
"""
Get data from histogram, including bin_ranges and values.
"""
self.cyclic = hist.get_dimension(0).cyclic
x = hist.kdims[0]
edges = hist.interface.coords(hist, x, edges=True)
values = hist.dimension_values(1)
hist_vals = np.array(values)
xlim = hist.range(0)
ylim = hist.range(1)
isdatetime = False
if edges.dtype.kind == 'M' or isinstance(edges[0], datetime_types):
edges = np.array([dt64_to_dt(e) if isinstance(e, np.datetime64) else e for e in edges])
edges = date2num(edges)
xlim = tuple(dt_to_int(v, 'D') for v in xlim)
isdatetime = True
widths = np.diff(edges)
return edges[:-1], hist_vals, widths, xlim+ylim, isdatetime
def _compute_ticks(self, element, edges, widths, lims):
"""
Compute the ticks either as cyclic values in degrees or as roughly
evenly spaced bin centers.
"""
if self.xticks is None or not isinstance(self.xticks, int):
return None
if self.cyclic:
x0, x1, _, _ = lims
xvals = np.linspace(x0, x1, self.xticks)
labels = ["%.0f" % np.rad2deg(x) + '\N{DEGREE SIGN}' for x in xvals]
elif self.xticks:
dim = element.get_dimension(0)
inds = np.linspace(0, len(edges), self.xticks, dtype=np.int)
edges = list(edges) + [edges[-1] + widths[-1]]
xvals = [edges[i] for i in inds]
labels = [dim.pprint_value(v) for v in xvals]
return [xvals, labels]
def get_extents(self, element, ranges, range_type='combined'):
ydim = element.get_dimension(1)
s0, s1 = ranges[ydim.name]['soft']
s0 = min(s0, 0) if isfinite(s0) else 0
s1 = max(s1, 0) if isfinite(s1) else 0
ranges[ydim.name]['soft'] = (s0, s1)
return super(HistogramPlot, self).get_extents(element, ranges, range_type)
def _process_axsettings(self, hist, lims, ticks):
"""
Get axis settings options including ticks, x- and y-labels
and limits.
"""
axis_settings = dict(zip(self.axis_settings, [None, None, (None if self.overlaid else ticks)]))
return axis_settings
def _update_plot(self, key, hist, bars, lims, ranges):
"""
Process bars can be subclassed to manually adjust bars
after being plotted.
"""
return bars
def _update_artists(self, key, hist, edges, hvals, widths, lims, ranges):
"""
Update all the artists in the histogram. Subclassable to
allow updating of further artists.
"""
plot_vals = zip(self.handles['artist'], edges, hvals, widths)
for bar, edge, height, width in plot_vals:
if self.invert_axes:
bar.set_y(edge)
bar.set_width(height)
bar.set_height(width)
else:
bar.set_x(edge)
bar.set_height(height)
bar.set_width(width)
def update_handles(self, key, axis, element, ranges, style):
# Process values, axes and style
edges, hvals, widths, lims, _ = self._process_hist(element)
ticks = self._compute_ticks(element, edges, widths, lims)
ax_settings = self._process_axsettings(element, lims, ticks)
self._update_artists(key, element, edges, hvals, widths, lims, ranges)
return ax_settings
class SideHistogramPlot(AdjoinedPlot, HistogramPlot):
bgcolor = param.Parameter(default=(1, 1, 1, 0), doc="""
Make plot background invisible.""")
offset = param.Number(default=0.2, bounds=(0,1), doc="""
Histogram value offset for a colorbar.""")
show_grid = param.Boolean(default=False, doc="""
Whether to overlay a grid on the axis.""")
def _process_hist(self, hist):
"""
Subclassed to offset histogram by defined amount.
"""
edges, hvals, widths, lims, isdatetime = super(SideHistogramPlot, self)._process_hist(hist)
offset = self.offset * lims[3]
hvals *= 1-self.offset
hvals += offset
lims = lims[0:3] + (lims[3] + offset,)
return edges, hvals, widths, lims, isdatetime
def _update_artists(self, n, element, edges, hvals, widths, lims, ranges):
super(SideHistogramPlot, self)._update_artists(n, element, edges, hvals, widths, lims, ranges)
self._update_plot(n, element, self.handles['artist'], lims, ranges)
def _update_plot(self, key, element, bars, lims, ranges):
"""
Process the bars and draw the offset line as necessary. If a
color map is set in the style of the 'main' ViewableElement object, color
the bars appropriately, respecting the required normalization
settings.
"""
main = self.adjoined.main
_, y1 = element.range(1)
offset = self.offset * y1
range_item, main_range, dim = get_sideplot_ranges(self, element, main, ranges)
# Check if plot is colormapped
plot_type = Store.registry['matplotlib'].get(type(range_item))
if isinstance(plot_type, PlotSelector):
plot_type = plot_type.get_plot_class(range_item)
opts = self.lookup_options(range_item, 'plot')
if plot_type and issubclass(plot_type, ColorbarPlot):
cidx = opts.options.get('color_index', None)
if cidx is None:
opts = self.lookup_options(range_item, 'style')
cidx = opts.kwargs.get('color', None)
if cidx not in range_item:
cidx = None
cdim = None if cidx is None else range_item.get_dimension(cidx)
else:
cdim = None
# Get colormapping options
if isinstance(range_item, (HeatMap, Raster)) or cdim:
style = self.lookup_options(range_item, 'style')[self.cyclic_index]
cmap = cm.get_cmap(style.get('cmap'))
main_range = style.get('clims', main_range)
else:
cmap = None
if offset and ('offset_line' not in self.handles):
self.handles['offset_line'] = self.offset_linefn(offset,
linewidth=1.0,
color='k')
elif offset:
self._update_separator(offset)
if cmap is not None:
self._colorize_bars(cmap, bars, element, main_range, dim)
return bars
def _colorize_bars(self, cmap, bars, element, main_range, dim):
"""
Use the given cmap to color the bars, applying the correct
color ranges as necessary.
"""
cmap_range = main_range[1] - main_range[0]
lower_bound = main_range[0]
colors = np.array(element.dimension_values(dim))
colors = (colors - lower_bound) / (cmap_range)
for c, bar in zip(colors, bars):
bar.set_facecolor(cmap(c))
bar.set_clip_on(False)
def _update_separator(self, offset):
"""
Compute colorbar offset and update separator line
if map is non-zero.
"""
offset_line = self.handles['offset_line']
if offset == 0:
offset_line.set_visible(False)
else:
offset_line.set_visible(True)
if self.invert_axes:
offset_line.set_xdata(offset)
else:
offset_line.set_ydata(offset)
class PointPlot(ChartPlot, ColorbarPlot):
"""
Note that the 'cmap', 'vmin' and 'vmax' style arguments control
how point magnitudes are rendered to different colors.
"""
show_grid = param.Boolean(default=False, doc="""
Whether to draw grid lines at the tick positions.""")
# Deprecated parameters
color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Deprecated in favor of color style mapping, e.g. `color=dim('color')`""")
size_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Deprecated in favor of size style mapping, e.g. `size=dim('size')`""")
scaling_method = param.ObjectSelector(default="area",
objects=["width", "area"],
doc="""
Deprecated in favor of size style mapping, e.g.
size=dim('size')**2.""")
scaling_factor = param.Number(default=1, bounds=(0, None), doc="""
Scaling factor which is applied to either the width or area
of each point, depending on the value of `scaling_method`.""")
size_fn = param.Callable(default=np.abs, doc="""
Function applied to size values before applying scaling,
to remove values lower than zero.""")
style_opts = ['alpha', 'color', 'edgecolors', 'facecolors',
'linewidth', 'marker', 'size', 'visible',
'cmap', 'vmin', 'vmax', 'norm']
_nonvectorized_styles = ['alpha', 'marker', 'cmap', 'vmin', 'vmax',
'norm', 'visible']
_disabled_opts = ['size']
_plot_methods = dict(single='scatter')
def get_data(self, element, ranges, style):
xs, ys = (element.dimension_values(i) for i in range(2))
self._compute_styles(element, ranges, style)
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
return (ys, xs) if self.invert_axes else (xs, ys), style, {}
def _compute_styles(self, element, ranges, style):
cdim = element.get_dimension(self.color_index)
color = style.pop('color', None)
cmap = style.get('cmap', None)
if cdim and ((isinstance(color, basestring) and color in element) or isinstance(color, dim)):
self.param.warning(
"Cannot declare style mapping for 'color' option and "
"declare a color_index; ignoring the color_index.")
cdim = None
if cdim and cmap:
cs = element.dimension_values(self.color_index)
# Check if numeric otherwise treat as categorical
if cs.dtype.kind in 'uif':
style['c'] = cs
else:
style['c'] = search_indices(cs, unique_array(cs))
self._norm_kwargs(element, ranges, style, cdim)
elif color is not None:
style['color'] = color
style['edgecolors'] = style.pop('edgecolors', style.pop('edgecolor', 'none'))
ms = style.get('s', mpl.rcParams['lines.markersize'])
sdim = element.get_dimension(self.size_index)
if sdim and ((isinstance(ms, basestring) and ms in element) or isinstance(ms, dim)):
self.param.warning(
"Cannot declare style mapping for 's' option and "
"declare a size_index; ignoring the size_index.")
sdim = None
if sdim:
sizes = element.dimension_values(self.size_index)
sizes = compute_sizes(sizes, self.size_fn, self.scaling_factor,
self.scaling_method, ms)
if sizes is None:
eltype = type(element).__name__
self.param.warning(
'%s dimension is not numeric, cannot use to '
'scale %s size.' % (sdim.pprint_label, eltype))
else:
style['s'] = sizes
style['edgecolors'] = style.pop('edgecolors', 'none')
def update_handles(self, key, axis, element, ranges, style):
paths = self.handles['artist']
(xs, ys), style, _ = self.get_data(element, ranges, style)
paths.set_offsets(np.column_stack([xs, ys]))
if 's' in style:
sizes = style['s']
if isscalar(sizes):
sizes = [sizes]
paths.set_sizes(sizes)
if 'vmin' in style:
paths.set_clim((style['vmin'], style['vmax']))
if 'c' in style:
paths.set_array(style['c'])
if 'norm' in style:
paths.norm = style['norm']
if 'linewidth' in style:
paths.set_linewidths(style['linewidth'])
if 'edgecolors' in style:
paths.set_edgecolors(style['edgecolors'])
if 'facecolors' in style:
paths.set_edgecolors(style['facecolors'])
class VectorFieldPlot(ColorbarPlot):
"""
Renders vector fields in sheet coordinates. The vectors are
expressed in polar coordinates and may be displayed according to
angle alone (with some common, arbitrary arrow length) or may be
true polar vectors.
The color or magnitude can be mapped onto any dimension using the
color_index and size_index.
The length of the arrows is controlled by the 'scale' style
option. The scaling of the arrows may also be controlled via the
normalize_lengths and rescale_lengths plot option, which will
normalize the lengths to a maximum of 1 and scale them according
to the minimum distance respectively.
"""
arrow_heads = param.Boolean(default=True, doc="""
Whether or not to draw arrow heads. If arrowheads are enabled,
they may be customized with the 'headlength' and
'headaxislength' style options.""")
magnitude = param.ClassSelector(class_=(basestring, dim), doc="""
Dimension or dimension value transform that declares the magnitude
of each vector. Magnitude is expected to be scaled between 0-1,
by default the magnitudes are rescaled relative to the minimum
distance between vectors, this can be disabled with the
rescale_lengths option.""")
rescale_lengths = param.Boolean(default=True, doc="""
Whether the lengths will be rescaled to take into account the
smallest non-zero distance between two vectors.""")
# Deprecated parameters
color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Deprecated in favor of dimension value transform on color option,
e.g. `color=dim('Magnitude')`.
""")
size_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Deprecated in favor of the magnitude option, e.g.
`magnitude=dim('Magnitude')`.
""")
normalize_lengths = param.Boolean(default=True, doc="""
Deprecated in favor of rescaling length using dimension value
transforms using the magnitude option, e.g.
`dim('Magnitude').norm()`.""")
style_opts = ['alpha', 'color', 'edgecolors', 'facecolors',
'linewidth', 'marker', 'visible', 'cmap',
'scale', 'headlength', 'headaxislength', 'pivot',
'width', 'headwidth', 'norm']
_nonvectorized_styles = ['alpha', 'marker', 'cmap', 'visible', 'norm',
'pivot', 'headlength', 'headaxislength',
'headwidth']
_plot_methods = dict(single='quiver')
def _get_magnitudes(self, element, style, ranges):
size_dim = element.get_dimension(self.size_index)
mag_dim = self.magnitude
if size_dim and mag_dim:
self.param.warning(
"Cannot declare style mapping for 'magnitude' option "
"and declare a size_index; ignoring the size_index.")
elif size_dim:
mag_dim = size_dim
elif isinstance(mag_dim, basestring):
mag_dim = element.get_dimension(mag_dim)
if mag_dim is not None:
if isinstance(mag_dim, dim):
magnitudes = mag_dim.apply(element, flat=True)
else:
magnitudes = element.dimension_values(mag_dim)
_, max_magnitude = ranges[dimension_name(mag_dim)]['combined']
if self.normalize_lengths and max_magnitude != 0:
magnitudes = magnitudes / max_magnitude
else:
magnitudes = np.ones(len(element))
return magnitudes
def get_data(self, element, ranges, style):
# Compute coordinates
xidx, yidx = (1, 0) if self.invert_axes else (0, 1)
xs = element.dimension_values(xidx) if len(element.data) else []
ys = element.dimension_values(yidx) if len(element.data) else []
# Compute vector angle and magnitude
radians = element.dimension_values(2) if len(element.data) else []
if self.invert_axes: radians = radians+1.5*np.pi
angles = list(np.rad2deg(radians))
magnitudes = self._get_magnitudes(element, style, ranges)
input_scale = style.pop('scale', 1.0)
if self.rescale_lengths:
min_dist = get_min_distance(element)
input_scale = input_scale / min_dist
args = (xs, ys, magnitudes, [0.0] * len(element))
# Compute color
cdim = element.get_dimension(self.color_index)
color = style.get('color', None)
if cdim and ((isinstance(color, basestring) and color in element) or isinstance(color, dim)):
self.param.warning(
"Cannot declare style mapping for 'color' option and "
"declare a color_index; ignoring the color_index.")
cdim = None
if cdim:
colors = element.dimension_values(self.color_index)
style['c'] = colors
cdim = element.get_dimension(self.color_index)
self._norm_kwargs(element, ranges, style, cdim)
style.pop('color', None)
# Process style
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
style.update(dict(scale=input_scale, angles=angles, units='x', scale_units='x'))
if 'vmin' in style:
style['clim'] = (style.pop('vmin'), style.pop('vmax'))
if 'c' in style:
style['array'] = style.pop('c')
if 'pivot' not in style:
style['pivot'] = 'mid'
if not self.arrow_heads:
style['headaxislength'] = 0
return args, style, {}
def update_handles(self, key, axis, element, ranges, style):
args, style, axis_kwargs = self.get_data(element, ranges, style)
# Set magnitudes, angles and colors if supplied.
quiver = self.handles['artist']
quiver.set_offsets(np.column_stack(args[:2]))
quiver.U = args[2]
quiver.angles = style['angles']
if 'color' in style:
quiver.set_facecolors(style['color'])
quiver.set_edgecolors(style['color'])
if 'array' in style:
quiver.set_array(style['array'])
if 'clim' in style:
quiver.set_clim(style['clim'])
if 'linewidth' in style:
quiver.set_linewidths(style['linewidth'])
return axis_kwargs
class BarPlot(LegendPlot):
padding = param.Number(default=0.2, doc="""
Defines the padding between groups.""")
show_legend = param.Boolean(default=True, doc="""
Whether to show legend for the plot.""")
stacked = param.Boolean(default=False, doc="""
Whether the bars should be stacked or grouped.""")
xticks = param.Integer(0, precedence=-1)
# Deprecated parameters
color_by = param.List(default=['category'], doc="""
Defines how the Bar elements colored. Valid options include
any permutation of 'group', 'category' and 'stack'.""")
group_index = param.Integer(default=0, doc="""
Index of the dimension in the supplied Bars
Element, which will be laid out into groups.""")
category_index = param.Integer(default=1, doc="""
Index of the dimension in the supplied Bars
Element, which will be laid out into categories.""")
stack_index = param.Integer(default=2, doc="""
Index of the dimension in the supplied Bars
Element, which will stacked.""")
style_opts = ['alpha', 'color', 'align', 'visible', 'edgecolor',
'log', 'facecolor', 'capsize', 'error_kw', 'hatch']
_nonvectorized_styles = style_opts
legend_specs = dict(LegendPlot.legend_specs, **{
'top': dict(bbox_to_anchor=(0., 1.02, 1., .102),
ncol=3, loc=3, mode="expand", borderaxespad=0.),
'bottom': dict(ncol=3, mode="expand", loc=2,
bbox_to_anchor=(0., -0.4, 1., .102),
borderaxespad=0.1)})
_dimensions = OrderedDict([('group', 0),
('category',1),
('stack',2)])
def __init__(self, element, **params):
super(BarPlot, self).__init__(element, **params)
self.values, self.bar_dimensions = self._get_values()
def _get_values(self):
"""
Get unique index value for each bar
"""
(gi, _), (ci, _), (si, _) = self._get_dims(self.hmap.last)
ndims = self.hmap.last.ndims
dims = self.hmap.last.kdims
dimensions = []
values = {}
for vidx, vtype in zip([gi, ci, si], self._dimensions):
if vidx < ndims:
dim = dims[vidx]
dimensions.append(dim)
vals = self.hmap.dimension_values(dim.name)
else:
dimensions.append(None)
vals = [None]
values[vtype] = list(unique_iterator(vals))
return values, dimensions
def _compute_styles(self, element, style_groups):
"""
Computes color and hatch combinations by
any combination of the 'group', 'category'
and 'stack'.
"""
style = self.lookup_options(element, 'style')[0]
sopts = []
for sopt in ['color', 'hatch']:
if sopt in style:
sopts.append(sopt)
style.pop(sopt, None)
color_groups = []
for sg in style_groups:
color_groups.append(self.values[sg])
style_product = list(product(*color_groups))
wrapped_style = self.lookup_options(element, 'style').max_cycles(len(style_product))
color_groups = {k:tuple(wrapped_style[n][sopt] for sopt in sopts)
for n,k in enumerate(style_product)}
return style, color_groups, sopts
def get_extents(self, element, ranges, range_type='combined'):
ngroups = len(self.values['group'])
vdim = element.vdims[0].name
if self.stacked or self.stack_index == 1:
return 0, 0, ngroups, np.NaN
else:
vrange = ranges[vdim]['combined']
return 0, np.nanmin([vrange[0], 0]), ngroups, vrange[1]
@mpl_rc_context
def initialize_plot(self, ranges=None):
element = self.hmap.last
vdim = element.vdims[0]
axis = self.handles['axis']
key = self.keys[-1]
ranges = self.compute_ranges(self.hmap, key, ranges)
ranges = match_spec(element, ranges)
self.handles['artist'], self.handles['xticks'], xdims = self._create_bars(axis, element)
return self._finalize_axis(key, ranges=ranges, xticks=self.handles['xticks'],
element=element, dimensions=[xdims, vdim])
def _finalize_ticks(self, axis, element, xticks, yticks, zticks):
"""
Apply ticks with appropriate offsets.
"""
yalignments = None
if xticks is not None:
ticks, labels, yalignments = zip(*sorted(xticks, key=lambda x: x[0]))
xticks = (list(ticks), list(labels))
super(BarPlot, self)._finalize_ticks(axis, element, xticks, yticks, zticks)
if yalignments:
for t, y in zip(axis.get_xticklabels(), yalignments):
t.set_y(y)
def _get_dims(self, element):
ndims = len(element.dimensions())
if element.ndims < 2:
gdim, cdim, sdim = element.kdims[0], None, None
gi, ci, si = 0, ndims+1, ndims+1
elif element.ndims == 3:
gdim, cdim, sdim = element.kdims
gi, ci, si = 0, 1, 2
elif self.stacked or self.stack_index == 1:
gdim, cdim, sdim = element.kdims[0], None, element.kdims[1]
gi, ci, si = 0, ndims+1, 1
else:
gdim, cdim, sdim = element.kdims[0], element.kdims[1], None
gi, ci, si = 0, 1, ndims+1
return (gi, gdim), (ci, cdim), (si, sdim)
def _create_bars(self, axis, element):
# Get style and dimension information
values = self.values
if self.group_index != 0:
self.warning('Bars group_index plot option is deprecated '
'and will be ignored, set stacked=True/False '
'instead.')
if self.category_index != 1:
self.warning('Bars category_index plot option is deprecated '
'and will be ignored, set stacked=True/False '
'instead.')
if self.stack_index != 2 and not (self.stack_index == 1 and not self.stacked):
self.warning('Bars stack_index plot option is deprecated '
'and will be ignored, set stacked=True/False '
'instead.')
if self.color_by != ['category']:
self.warning('Bars color_by plot option is deprecated '
'and will be ignored, in future it will '
'support color style mapping by dimension.')
(gi, gdim), (ci, cdim), (si, sdim) = self._get_dims(element)
indices = dict(zip(self._dimensions, (gi, ci, si)))
color_by = ['category'] if cdim else ['stack']
style_groups = [sg for sg in color_by if indices[sg] < element.ndims]
style_opts, color_groups, sopts = self._compute_styles(element, style_groups)
dims = element.dimensions('key', label=True)
ndims = len(dims)
xdims = [d for d in [cdim, gdim] if d is not None]
# Compute widths
width = (1-(2.*self.padding)) / len(values['category'])
# Initialize variables
xticks = []
val_key = [None] * ndims
style_key = [None] * len(style_groups)
label_key = [None] * len(style_groups)
labels = []
bars = {}
# Iterate over group, category and stack dimension values
# computing xticks and drawing bars and applying styles
for gidx, grp_name in enumerate(values['group']):
if grp_name is not None:
grp = gdim.pprint_value(grp_name)
if 'group' in style_groups:
idx = style_groups.index('group')
label_key[idx] = str(grp)
style_key[idx] = grp_name
val_key[gi] = grp_name
if ci < ndims:
yalign = -0.04
else:
yalign = 0
xticks.append((gidx+0.5, grp, yalign))
for cidx, cat_name in enumerate(values['category']):
xpos = gidx+self.padding+(cidx*width)
if cat_name is not None:
cat = gdim.pprint_value(cat_name)
if 'category' in style_groups:
idx = style_groups.index('category')
label_key[idx] = str(cat)
style_key[idx] = cat_name
val_key[ci] = cat_name
xticks.append((xpos+width/2., cat, 0))
prev = 0
for stk_name in values['stack']:
if stk_name is not None:
if 'stack' in style_groups:
idx = style_groups.index('stack')
stk = gdim.pprint_value(stk_name)
label_key[idx] = str(stk)
style_key[idx] = stk_name
val_key[si] = stk_name
vals = element.sample([tuple(val_key)]).dimension_values(element.vdims[0].name)
val = float(vals[0]) if len(vals) else np.NaN
label = ', '.join(label_key)
style = dict(style_opts, label='' if label in labels else label,
**dict(zip(sopts, color_groups[tuple(style_key)])))
with abbreviated_exception():
style = self._apply_transforms(element, {}, style)
bar = axis.bar([xpos+width/2.], [val], width=width, bottom=prev,
**style)
# Update variables
bars[tuple(val_key)] = bar
prev += val if isfinite(val) else 0
labels.append(label)
title = [element.kdims[indices[cg]].pprint_label
for cg in color_by if indices[cg] < ndims]
if self.show_legend and any(len(l) for l in labels) and color_by != ['category']:
leg_spec = self.legend_specs[self.legend_position]
if self.legend_cols: leg_spec['ncol'] = self.legend_cols
axis.legend(title=', '.join(title), **leg_spec)
return bars, xticks, xdims
def update_handles(self, key, axis, element, ranges, style):
dims = element.dimensions('key', label=True)
ndims = len(dims)
(gi, _), (ci, _), (si, _) = self._get_dims(element)
val_key = [None] * ndims
for g in self.values['group']:
if g is not None: val_key[gi] = g
for c in self.values['category']:
if c is not None: val_key[ci] = c
prev = 0
for s in self.values['stack']:
if s is not None: val_key[si] = s
bar = self.handles['artist'].get(tuple(val_key))
if bar:
vals = element.sample([tuple(val_key)]).dimension_values(element.vdims[0].name)
height = float(vals[0]) if len(vals) else np.NaN
bar[0].set_height(height)
bar[0].set_y(prev)
prev += height if isfinite(height) else 0
return {'xticks': self.handles['xticks']}
class SpikesPlot(PathPlot, ColorbarPlot):
aspect = param.Parameter(default='square', doc="""
The aspect ratio mode of the plot. Allows setting an
explicit aspect ratio as width/height as well as
'square' and 'equal' options.""")
color_index = param.ClassSelector(default=None, allow_None=True,
class_=(basestring, int), doc="""
Index of the dimension from which the color will the drawn""")
spike_length = param.Number(default=0.1, doc="""
The length of each spike if Spikes object is one dimensional.""")
position = param.Number(default=0., doc="""
The position of the lower end of each spike.""")
style_opts = PathPlot.style_opts + ['cmap']
def init_artists(self, ax, plot_args, plot_kwargs):
if 'c' in plot_kwargs:
plot_kwargs['array'] = plot_kwargs.pop('c')
if 'vmin' in plot_kwargs and 'vmax' in plot_kwargs:
plot_kwargs['clim'] = plot_kwargs.pop('vmin'), plot_kwargs.pop('vmax')
line_segments = LineCollection(*plot_args, **plot_kwargs)
ax.add_collection(line_segments)
return {'artist': line_segments}
def get_extents(self, element, ranges, range_type='combined'):
if len(element.dimensions()) > 1:
ydim = element.get_dimension(1)
s0, s1 = ranges[ydim.name]['soft']
s0 = min(s0, 0) if isfinite(s0) else 0
s1 = max(s1, 0) if isfinite(s1) else 0
ranges[ydim.name]['soft'] = (s0, s1)
l, b, r, t = super(SpikesPlot, self).get_extents(element, ranges, range_type)
if len(element.dimensions()) == 1 and range_type != 'hard':
if self.batched:
bs, ts = [], []
# Iterate over current NdOverlay and compute extents
# from position and length plot options
frame = self.current_frame or self.hmap.last
for el in frame.values():
opts = self.lookup_options(el, 'plot').options
pos = opts.get('position', self.position)
length = opts.get('spike_length', self.spike_length)
bs.append(pos)
ts.append(pos+length)
b, t = (np.nanmin(bs), np.nanmax(ts))
else:
b, t = self.position, self.position+self.spike_length
return l, b, r, t
def get_data(self, element, ranges, style):
dimensions = element.dimensions(label=True)
ndims = len(dimensions)
pos = self.position
if ndims > 1:
data = [[(x, pos), (x, pos+y)] for x, y in element.array([0, 1])]
else:
height = self.spike_length
data = [[(x[0], pos), (x[0], pos+height)] for x in element.array([0])]
if self.invert_axes:
data = [(line[0][::-1], line[1][::-1]) for line in data]
dims = element.dimensions()
clean_spikes = []
for spike in data:
xs, ys = zip(*spike)
cols = []
for i, vs in enumerate((xs, ys)):
vs = np.array(vs)
if (vs.dtype.kind == 'M' or (len(vs) and isinstance(vs[0], datetime_types))) and i < len(dims):
dt_format = Dimension.type_formatters[np.datetime64]
dims[i] = dims[i](value_format=DateFormatter(dt_format))
vs = np.array([dt_to_int(v, 'D') for v in vs])
cols.append(vs)
clean_spikes.append(np.column_stack(cols))
cdim = element.get_dimension(self.color_index)
color = style.get('color', None)
if cdim and ((isinstance(color, basestring) and color in element) or isinstance(color, dim)):
self.param.warning(
"Cannot declare style mapping for 'color' option and "
"declare a color_index; ignoring the color_index.")
cdim = None
if cdim:
style['array'] = element.dimension_values(cdim)
self._norm_kwargs(element, ranges, style, cdim)
with abbreviated_exception():
style = self._apply_transforms(element, ranges, style)
return (clean_spikes,), style, {'dimensions': dims}
def update_handles(self, key, axis, element, ranges, style):
artist = self.handles['artist']
(data,), kwargs, axis_kwargs = self.get_data(element, ranges, style)
artist.set_paths(data)
artist.set_visible(style.get('visible', True))
if 'color' in kwargs:
artist.set_edgecolors(kwargs['color'])
if 'array' in kwargs or 'c' in kwargs:
artist.set_array(kwargs.get('array', kwargs.get('c')))
if 'vmin' in kwargs:
artist.set_clim((kwargs['vmin'], kwargs['vmax']))
if 'norm' in kwargs:
artist.norm = kwargs['norm']
if 'linewidth' in kwargs:
artist.set_linewidths(kwargs['linewidth'])
return axis_kwargs
class SideSpikesPlot(AdjoinedPlot, SpikesPlot):
bgcolor = param.Parameter(default=(1, 1, 1, 0), doc="""
Make plot background invisible.""")
border_size = param.Number(default=0, doc="""
The size of the border expressed as a fraction of the main plot.""")
subplot_size = param.Number(default=0.1, doc="""
The size subplots as expressed as a fraction of the main plot.""")
spike_length = param.Number(default=1, doc="""
The length of each spike if Spikes object is one dimensional.""")
xaxis = param.ObjectSelector(default='bare',
objects=['top', 'bottom', 'bare', 'top-bare',
'bottom-bare', None], doc="""
Whether and where to display the xaxis, bare options allow suppressing
all axis labels including ticks and xlabel. Valid options are 'top',
'bottom', 'bare', 'top-bare' and 'bottom-bare'.""")
yaxis = param.ObjectSelector(default='bare',
objects=['left', 'right', 'bare', 'left-bare',
'right-bare', None], doc="""
Whether and where to display the yaxis, bare options allow suppressing
all axis labels including ticks and ylabel. Valid options are 'left',
'right', 'bare' 'left-bare' and 'right-bare'.""")
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/io/formats/test_to_csv.py | 12 | 7505 | from pandas import DataFrame
import numpy as np
import pandas as pd
from pandas.util import testing as tm
class TestToCSV(object):
def test_to_csv_quotechar(self):
df = DataFrame({'col': [1, 2]})
expected = """\
"","col"
"0","1"
"1","2"
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1) # 1=QUOTE_ALL
with open(path, 'r') as f:
assert f.read() == expected
expected = """\
$$,$col$
$0$,$1$
$1$,$2$
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, quotechar="$")
with open(path, 'r') as f:
assert f.read() == expected
with tm.ensure_clean('test.csv') as path:
with tm.assert_raises_regex(TypeError, 'quotechar'):
df.to_csv(path, quoting=1, quotechar=None)
def test_to_csv_doublequote(self):
df = DataFrame({'col': ['a"a', '"bb"']})
expected = '''\
"","col"
"0","a""a"
"1","""bb"""
'''
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, doublequote=True) # QUOTE_ALL
with open(path, 'r') as f:
assert f.read() == expected
from _csv import Error
with tm.ensure_clean('test.csv') as path:
with tm.assert_raises_regex(Error, 'escapechar'):
df.to_csv(path, doublequote=False) # no escapechar set
def test_to_csv_escapechar(self):
df = DataFrame({'col': ['a"a', '"bb"']})
expected = '''\
"","col"
"0","a\\"a"
"1","\\"bb\\""
'''
with tm.ensure_clean('test.csv') as path: # QUOTE_ALL
df.to_csv(path, quoting=1, doublequote=False, escapechar='\\')
with open(path, 'r') as f:
assert f.read() == expected
df = DataFrame({'col': ['a,a', ',bb,']})
expected = """\
,col
0,a\\,a
1,\\,bb\\,
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=3, escapechar='\\') # QUOTE_NONE
with open(path, 'r') as f:
assert f.read() == expected
def test_csv_to_string(self):
df = DataFrame({'col': [1, 2]})
expected = ',col\n0,1\n1,2\n'
assert df.to_csv() == expected
def test_to_csv_decimal(self):
# GH 781
df = DataFrame({'col1': [1], 'col2': ['a'], 'col3': [10.1]})
expected_default = ',col1,col2,col3\n0,1,a,10.1\n'
assert df.to_csv() == expected_default
expected_european_excel = ';col1;col2;col3\n0;1;a;10,1\n'
assert df.to_csv(decimal=',', sep=';') == expected_european_excel
expected_float_format_default = ',col1,col2,col3\n0,1,a,10.10\n'
assert df.to_csv(float_format='%.2f') == expected_float_format_default
expected_float_format = ';col1;col2;col3\n0;1;a;10,10\n'
assert df.to_csv(decimal=',', sep=';',
float_format='%.2f') == expected_float_format
# GH 11553: testing if decimal is taken into account for '0.0'
df = pd.DataFrame({'a': [0, 1.1], 'b': [2.2, 3.3], 'c': 1})
expected = 'a,b,c\n0^0,2^2,1\n1^1,3^3,1\n'
assert df.to_csv(index=False, decimal='^') == expected
# same but for an index
assert df.set_index('a').to_csv(decimal='^') == expected
# same for a multi-index
assert df.set_index(['a', 'b']).to_csv(decimal="^") == expected
def test_to_csv_float_format(self):
# testing if float_format is taken into account for the index
# GH 11553
df = pd.DataFrame({'a': [0, 1], 'b': [2.2, 3.3], 'c': 1})
expected = 'a,b,c\n0,2.20,1\n1,3.30,1\n'
assert df.set_index('a').to_csv(float_format='%.2f') == expected
# same for a multi-index
assert df.set_index(['a', 'b']).to_csv(
float_format='%.2f') == expected
def test_to_csv_na_rep(self):
# testing if NaN values are correctly represented in the index
# GH 11553
df = DataFrame({'a': [0, np.NaN], 'b': [0, 1], 'c': [2, 3]})
expected = "a,b,c\n0.0,0,2\n_,1,3\n"
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
# now with an index containing only NaNs
df = DataFrame({'a': np.NaN, 'b': [0, 1], 'c': [2, 3]})
expected = "a,b,c\n_,0,2\n_,1,3\n"
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
# check if na_rep parameter does not break anything when no NaN
df = DataFrame({'a': 0, 'b': [0, 1], 'c': [2, 3]})
expected = "a,b,c\n0,0,2\n0,1,3\n"
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
def test_to_csv_date_format(self):
# GH 10209
df_sec = DataFrame({'A': pd.date_range('20130101', periods=5, freq='s')
})
df_day = DataFrame({'A': pd.date_range('20130101', periods=5, freq='d')
})
expected_default_sec = (',A\n0,2013-01-01 00:00:00\n1,'
'2013-01-01 00:00:01\n2,2013-01-01 00:00:02'
'\n3,2013-01-01 00:00:03\n4,'
'2013-01-01 00:00:04\n')
assert df_sec.to_csv() == expected_default_sec
expected_ymdhms_day = (',A\n0,2013-01-01 00:00:00\n1,'
'2013-01-02 00:00:00\n2,2013-01-03 00:00:00'
'\n3,2013-01-04 00:00:00\n4,'
'2013-01-05 00:00:00\n')
assert (df_day.to_csv(date_format='%Y-%m-%d %H:%M:%S') ==
expected_ymdhms_day)
expected_ymd_sec = (',A\n0,2013-01-01\n1,2013-01-01\n2,'
'2013-01-01\n3,2013-01-01\n4,2013-01-01\n')
assert df_sec.to_csv(date_format='%Y-%m-%d') == expected_ymd_sec
expected_default_day = (',A\n0,2013-01-01\n1,2013-01-02\n2,'
'2013-01-03\n3,2013-01-04\n4,2013-01-05\n')
assert df_day.to_csv() == expected_default_day
assert df_day.to_csv(date_format='%Y-%m-%d') == expected_default_day
# testing if date_format parameter is taken into account for
# multi-indexed dataframes (GH 7791)
df_sec['B'] = 0
df_sec['C'] = 1
expected_ymd_sec = 'A,B,C\n2013-01-01,0,1\n'
df_sec_grouped = df_sec.groupby([pd.Grouper(key='A', freq='1h'), 'B'])
assert (df_sec_grouped.mean().to_csv(date_format='%Y-%m-%d') ==
expected_ymd_sec)
def test_to_csv_multi_index(self):
# see gh-6618
df = DataFrame([1], columns=pd.MultiIndex.from_arrays([[1], [2]]))
exp = ",1\n,2\n0,1\n"
assert df.to_csv() == exp
exp = "1\n2\n1\n"
assert df.to_csv(index=False) == exp
df = DataFrame([1], columns=pd.MultiIndex.from_arrays([[1], [2]]),
index=pd.MultiIndex.from_arrays([[1], [2]]))
exp = ",,1\n,,2\n1,2,1\n"
assert df.to_csv() == exp
exp = "1\n2\n1\n"
assert df.to_csv(index=False) == exp
df = DataFrame(
[1], columns=pd.MultiIndex.from_arrays([['foo'], ['bar']]))
exp = ",foo\n,bar\n0,1\n"
assert df.to_csv() == exp
exp = "foo\nbar\n1\n"
assert df.to_csv(index=False) == exp
| mit |
lail3344/sms-tools | lectures/07-Sinusoidal-plus-residual-model/plots-code/hprModelFrame.py | 22 | 2847 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import math
from scipy.fftpack import fft, ifft, fftshift
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
import harmonicModel as HM
(fs, x) = UF.wavread('../../../sounds/flute-A4.wav')
pos = .8*fs
M = 601
hM1 = int(math.floor((M+1)/2))
hM2 = int(math.floor(M/2))
w = np.hamming(M)
N = 1024
t = -100
nH = 40
minf0 = 420
maxf0 = 460
f0et = 5
maxnpeaksTwm = 5
minSineDur = .1
harmDevSlope = 0.01
Ns = 512
H = Ns/4
x1 = x[pos-hM1:pos+hM2]
x2 = x[pos-Ns/2-1:pos+Ns/2-1]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
ipfreq = fs*iploc/N
f0 = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0)
hfreqp = []
hfreq, hmag, hphase = HM.harmonicDetection(ipfreq, ipmag, ipphase, f0, nH, hfreqp, fs, harmDevSlope)
Yh = UF.genSpecSines(hfreq, hmag, hphase, Ns, fs)
mYh = 20 * np.log10(abs(Yh[:Ns/2]))
pYh = np.unwrap(np.angle(Yh[:Ns/2]))
bh=blackmanharris(Ns)
X2 = fft(fftshift(x2*bh/sum(bh)))
Xr = X2-Yh
mXr = 20 * np.log10(abs(Xr[:Ns/2]))
pXr = np.unwrap(np.angle(Xr[:Ns/2]))
xrw = np.real(fftshift(ifft(Xr))) * H * 2
yhw = np.real(fftshift(ifft(Yh))) * H * 2
maxplotfreq = 8000.0
plt.figure(1, figsize=(9, 7))
plt.subplot(3,2,1)
plt.plot(np.arange(M), x[pos-hM1:pos+hM2]*w, lw=1.5)
plt.axis([0, M, min(x[pos-hM1:pos+hM2]*w), max(x[pos-hM1:pos+hM2]*w)])
plt.title('x (flute-A4.wav)')
plt.subplot(3,2,3)
binFreq = (fs/2.0)*np.arange(mX.size)/(mX.size)
plt.plot(binFreq,mX,'r', lw=1.5)
plt.axis([0,maxplotfreq,-90,max(mX)+2])
plt.plot(hfreq, hmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + harmonics')
plt.subplot(3,2,5)
plt.plot(binFreq,pX,'c', lw=1.5)
plt.axis([0,maxplotfreq,0,16])
plt.plot(hfreq, hphase, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('pX + harmonics')
plt.subplot(3,2,4)
binFreq = (fs/2.0)*np.arange(mXr.size)/(mXr.size)
plt.plot(binFreq,mYh,'r', lw=.8, label='mYh')
plt.plot(binFreq,mXr,'r', lw=1.5, label='mXr')
plt.axis([0,maxplotfreq,-90,max(mYh)+2])
plt.legend(prop={'size':10})
plt.title('mYh + mXr')
plt.subplot(3,2,6)
binFreq = (fs/2.0)*np.arange(mXr.size)/(mXr.size)
plt.plot(binFreq,pYh,'c', lw=.8, label='pYh')
plt.plot(binFreq,pXr,'c', lw=1.5, label ='pXr')
plt.axis([0,maxplotfreq,-5,25])
plt.legend(prop={'size':10})
plt.title('pYh + pXr')
plt.subplot(3,2,2)
plt.plot(np.arange(Ns), yhw, 'b', lw=.8, label='yh')
plt.plot(np.arange(Ns), xrw, 'b', lw=1.5, label='xr')
plt.axis([0, Ns, min(yhw), max(yhw)])
plt.legend(prop={'size':10})
plt.title('yh + xr')
plt.tight_layout()
plt.savefig('hprModelFrame.png')
plt.show()
| agpl-3.0 |
ssh0/sotsuron_for_public | 06_model_3_4.py | 1 | 8113 | #!/usr/bin/env python
# coding: utf-8
## model 3-4:近距離の点をクラスター化するモデル
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial.distance import euclidean as euc
import collections
import operator
import random
import bisect
from itertools import chain
from scipy.optimize import leastsq
__author__ = "Shotaro Fujimoto"
def uniq_list(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
def accumulate(iterable, func=operator.add):
"""Return running totals
Usage:
accumulate([1,2,3,4,5]) --> 1 3 6 10 15
accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
"""
it = iter(iterable)
total = next(it)
yield total
for element in it:
total = func(total, element)
yield total
def weighted_choice(d):
choices, weights = zip(*d)
cumdist = list(accumulate(weights))
x = random.random() * cumdist[-1]
return choices[bisect.bisect(cumdist, x)]
class Person:
def __init__(self, master, id, ideas, w):
"""Initialize argmunets.
Keyword arguments:
master : Master class (call from "Meeting")
self.id : Id for each person [0, 1, ..., N-1]
self.ideas: ideas in space [0,1] × [0,1]
self.w : probability weight for the person to speak
"""
self.id = id
self.ideas = ideas
self.w = w
# add_ideas : place, tag : (x, y), [person_id, cluster_id]
master.ideas += [[(i1, i2), [self.id, 0, self.w]] for i1, i2 in self.ideas]
class Cluster:
def __init__(self, ideas, r):
"""make cluster with self.r
cluster_link:
"""
self.ideas = ideas
self.r = r
self.l = 0
self.cluster_link = []
self.clustering()
def clustering(self):
self.cell_num = int(1./self.r)
lr = 1./self.cell_num
self.cell = dict() # key: (cellx,celly), value: list of ids
self.rcell = []
for i, idea in enumerate(self.ideas):
cellx = int(idea[0][0]/lr)
celly = int(idea[0][1]/lr)
if self.cell.has_key((cellx, celly)):
self.cell[(cellx, celly)] += [i]
else:
self.cell[(cellx, celly)] = [i]
self.rcell.append((cellx, celly))
num = 1
for i in range(len(self.ideas)):
num += self.find_nearest(i, num)
return self.cluster_link
def find_nearest(self, idea_id, num):
"""find nearest idea
idea_id: index in self.ideas
"""
cx, cy = self.rcell[idea_id]
place = self.ideas[idea_id][0]
CX = uniq_list([max(0, cx - 1), cx, min(cx + 1, self.cell_num - 1)])
CY = uniq_list([max(0, cy - 1), cy, min(cy + 1, self.cell_num - 1)])
tmp = [self.cell[(i, j)] for i in CX for j in CY if self.cell.has_key((i, j))]
tmp = list(chain.from_iterable(tmp))
tmp.remove(idea_id)
if len(tmp) == 0:
self.ideas[idea_id][1][1] = num
return 1
nearest = []
cid = [num]
for k in tmp:
if euc(self.ideas[k][0], place) > self.r:
continue
nearest.append(k)
prenum = self.ideas[k][1][1]
if prenum == 0:
cid.append(num)
self.cluster_link.append((idea_id, k))
elif prenum < num:
cid.append(prenum)
if not (k, idea_id) in self.cluster_link:
self.cluster_link.append((idea_id, k))
self.l += len(nearest)
cluster_id = min(cid)
if cluster_id < num:
ans = 0
else:
ans = 1
self.ideas[idea_id][1][1] = cluster_id
for i in nearest:
self.ideas[i][1][1] = cluster_id
cid.remove(num)
if len(cid) == 0:
return ans
cid.remove(cluster_id)
if len(cid) == 0:
return ans
for i in cid:
for x in self.ideas:
if x[1][1] == i:
x[1][1] = cluster_id
return ans
class Meeting:
def __init__(self, K, N, S=20, r=0.06, draw=True):
self.K = K
self.N = N
self.S = S
self.r = r
self.ideas = []
self.minutes = []
self.ave_l = 0
self.draw = draw
def gather_people(self, ideass=None, weights=None):
"""Gather participants.
Keyword arguments:
ideas : list of ideas for each person
ex) [((0.3,0.1),(0.2,0.5)), ((0.5,0.6))] when N = 2
weights: list of weights for the probability of the person to speak
"""
if not ideass:
x = np.random.rand(self.N, self.S*2)
ideass = []
for _x in x:
ideass.append([(i,j) for i,j in zip(_x[::2], _x[1::2])])
if not weights:
weights = [1.] * self.N
for i, ideas, w in zip(range(self.N), ideass, weights):
Person(self, i, ideas, w)
def init(self):
self.gather_people()
cluster = Cluster(self.ideas, self.r)
self.cluster_link = cluster.cluster_link
self.ave_l = cluster.l/float(len(self.ideas))
if self.draw:
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
self.fig = plt.figure(figsize=(9, 9))
self.ax = self.fig.add_subplot(1, 1, 1)
self.labels = []
self.s1 = []
for idea, tag in self.ideas:
x = idea[0]
y = idea[1]
s = self.ax.scatter(x, y,
c=colors[tag[0]%len(colors)],
alpha=0.2)
self.s1.append(s)
data = []
for link in self.cluster_link:
ix = self.ideas[link[0]][0][0]
iy = self.ideas[link[0]][0][1]
jx = self.ideas[link[1]][0][0]
jy = self.ideas[link[1]][0][1]
data += [(ix, jx), (iy, jy), 'k']
self.ax.plot(*data, alpha=0.5)
def progress(self):
self.init()
preidea = self.ideas[np.random.choice(range(len(self.ideas)))]
self.minutes.append(preidea)
l = list(self.ideas)
self.k = 1
while self.k < self.K + 1:
# remove ideas in the same cluster
l = [idea for idea in l if idea[1][1] != preidea[1][1]]
# if no one can speak: meeting ends.
if len(l) == 0:
break
# confirm cluster id which is nearest from the preidea
distance = [(euc(preidea[0], i[0]), i) for i in l]
minclusterid = min(distance)[1][1][1]
# gather ideas in the cluster
tmp = [idea for idea in l if idea[1][1] == minclusterid]
d = dict()
for t in tmp:
d[t[1][0]] = d.get(t[1][0], 0) + t[1][2]
d = [(k, v) for k, v in d.items()]
# chose whose ideas to be chosed from the cluster
whois = weighted_choice(d)
# gather ideas
who = [idea for idea in tmp if idea[1][0] == whois]
p = [(idea, idea[1][2]) for idea in who]
# chose the next idea from the id is "whois"
idea = weighted_choice(p)
self.minutes.append(idea)
preidea = idea
self.callback()
self.k += 1
self.after()
def callback(self):
if self.draw:
ix = self.minutes[-2][0][0]
iy = self.minutes[-2][0][1]
jx = self.minutes[-1][0][0]
jy = self.minutes[-1][0][1]
l1 = self.ax.plot([ix, jx], [iy, jy], color='b', alpha=0.5)
self.ax.text((ix+jx)/2, (iy+jy)/2, self.k)
else:
pass
def after(self):
if self.draw:
plt.show()
else:
pass
if __name__ == '__main__':
meeting = Meeting(K=20, N=4, S=20, r=0.07, draw=True)
meeting.progress()
| mit |
huazhisong/race_code | kaggle_ws/dog_cat_ws/codes/new_version/train_and_val.py | 1 | 7333 | #By @Kevin Xu
#[email protected]
#Youtube: https://www.youtube.com/channel/UCVCSn4qQXTDAtGWpWAe4Plw
#
#The aim of this project is to use TensorFlow to process our own data.
# - input_data.py: read in data and generate batches
# - model: build the model architecture
# - training: train
# I used Ubuntu with Python 3.5, TensorFlow 1.0*, other OS should also be good.
# With current settings, 10000 traing steps needed 50 minutes on my laptop.
# data: cats vs. dogs from Kaggle
# Download link: https://www.kaggle.com/c/dogs-vs-cats-redux-kernels-edition/data
# data size: ~540M
# How to run?
# 1. run the training.py once
# 2. call the run_training() in the console to train the model.
# Note:
# it is suggested to restart your kenel to train the model multiple times
#(in order to clear all the variables in the memory)
# Otherwise errors may occur: conv1/weights/biases already exist......
#%%
import os
import numpy as np
import tensorflow as tf
import input_train_val_split
import model
#%%
N_CLASSES = 2
IMG_W = 208 # resize the image, if the input image is too large, training will be very slow.
IMG_H = 208
RATIO = 0.2 # take 20% of dataset as validation data
BATCH_SIZE = 64
CAPACITY = 2000
MAX_STEP = 6000 # with current parameters, it is suggested to use MAX_STEP>10k
learning_rate = 0.0001 # with current parameters, it is suggested to use learning rate<0.0001
#%%
def run_training():
# you need to change the directories to yours.
train_dir = '/home/kevin/tensorflow/cats_vs_dogs/data/train/'
logs_train_dir = '/home/kevin/tensorflow/cats_vs_dogs/logs/train/'
logs_val_dir = '/home/kevin/tensorflow/cats_vs_dogs/logs/val/'
train, train_label, val, val_label = input_train_val_split.get_files(train_dir, RATIO)
train_batch, train_label_batch = input_train_val_split.get_batch(train,
train_label,
IMG_W,
IMG_H,
BATCH_SIZE,
CAPACITY)
val_batch, val_label_batch = input_train_val_split.get_batch(val,
val_label,
IMG_W,
IMG_H,
BATCH_SIZE,
CAPACITY)
logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
loss = model.losses(logits, train_label_batch)
train_op = model.trainning(loss, learning_rate)
acc = model.evaluation(logits, train_label_batch)
x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE])
with tf.Session() as sess:
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess= sess, coord=coord)
summary_op = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
val_writer = tf.summary.FileWriter(logs_val_dir, sess.graph)
try:
for step in np.arange(MAX_STEP):
if coord.should_stop():
break
tra_images,tra_labels = sess.run([train_batch, train_label_batch])
_, tra_loss, tra_acc = sess.run([train_op, loss, acc],
feed_dict={x:tra_images, y_:tra_labels})
if step % 50 == 0:
print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0))
summary_str = sess.run(summary_op)
train_writer.add_summary(summary_str, step)
if step % 200 == 0 or (step + 1) == MAX_STEP:
val_images, val_labels = sess.run([val_batch, val_label_batch])
val_loss, val_acc = sess.run([loss, acc],
feed_dict={x:val_images, y_:val_labels})
print('** Step %d, val loss = %.2f, val accuracy = %.2f%% **' %(step, val_loss, val_acc*100.0))
summary_str = sess.run(summary_op)
val_writer.add_summary(summary_str, step)
if step % 2000 == 0 or (step + 1) == MAX_STEP:
checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
finally:
coord.request_stop()
coord.join(threads)
#%% Evaluate one image
# when training, comment the following codes.
#from PIL import Image
#import matplotlib.pyplot as plt
#
#def get_one_image(train):
# '''Randomly pick one image from training data
# Return: ndarray
# '''
# n = len(train)
# ind = np.random.randint(0, n)
# img_dir = train[ind]
#
# image = Image.open(img_dir)
# plt.imshow(image)
# image = image.resize([208, 208])
# image = np.array(image)
# return image
#
#def evaluate_one_image():
# '''Test one image against the saved models and parameters
# '''
#
# # you need to change the directories to yours.
# train_dir = '/home/kevin/tensorflow/cats_vs_dogs/data/train/'
# train, train_label = input_data.get_files(train_dir)
# image_array = get_one_image(train)
#
# with tf.Graph().as_default():
# BATCH_SIZE = 1
# N_CLASSES = 2
#
# image = tf.cast(image_array, tf.float32)
# image = tf.image.per_image_standardization(image)
# image = tf.reshape(image, [1, 208, 208, 3])
#
# logit = model.inference(image, BATCH_SIZE, N_CLASSES)
#
# logit = tf.nn.softmax(logit)
#
# x = tf.placeholder(tf.float32, shape=[208, 208, 3])
#
# # you need to change the directories to yours.
# logs_train_dir = '/home/kevin/tensorflow/cats_vs_dogs/logs/train/'
#
# saver = tf.train.Saver()
#
# with tf.Session() as sess:
#
# print("Reading checkpoints...")
# ckpt = tf.train.get_checkpoint_state(logs_train_dir)
# if ckpt and ckpt.model_checkpoint_path:
# global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
# saver.restore(sess, ckpt.model_checkpoint_path)
# print('Loading success, global_step is %s' % global_step)
# else:
# print('No checkpoint file found')
#
# prediction = sess.run(logit, feed_dict={x: image_array})
# max_index = np.argmax(prediction)
# if max_index==0:
# print('This is a cat with possibility %.6f' %prediction[:, 0])
# else:
# print('This is a dog with possibility %.6f' %prediction[:, 1])
#%%
| gpl-3.0 |
shyamalschandra/scikit-learn | sklearn/linear_model/coordinate_descent.py | 8 | 76416 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..model_selection import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..exceptions import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False,
check_input=True, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
check_input : bool, default True
Skip input validation checks, including the Gram matrix when provided
assuming there are handled by the caller when check_input=False.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already float64 Fortran ordered when bypassing
# checks
if check_input:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(Xy, dtype=np.float64, order='C', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False, copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=np.float64,
order='C')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
# We expect X and y to be already float64 Fortran ordered arrays
# when bypassing checks
if check_input:
y = np.asarray(y, dtype=np.float64)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F',
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
y = check_array(y, dtype=np.float64, order='F', copy=False,
ensure_2d=False)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | array-like, default=False
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv.split(X))
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float or array of floats, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = check_array(y, dtype=np.float64, ensure_2d=False)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
aparai/Echo-State-Network | esn_class_check.py | 1 | 1761 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 23 17:48:03 2014
@author: parai
"""
from esn_class import esn_design, esn_prediction
import MySQLdb as mdb
import matplotlib.pyplot as plt
import numpy as np
conn = mdb.connect('localhost', 'root', 'iitkgp12', 'Ed_Kiers')
c=conn.cursor()
c.execute("SELECT * FROM week WHERE the_day=1")
data = c.fetchall()
k=[]
for ind in range(len(data))[1:]:
k.append(1 - data[ind][3]*1.0/data[ind-1][3])
data_tr=[]
out_tr = []
N = 30
l=[]
for ind in range(N):
l.append(k[ind])
for ind in range(len(k))[N+1:]:
data_tr.append(l)
out_tr.append(k[ind])
l = l[1:]
l.append(k[ind])
data_pred = []
out_pred = []
c.execute("SELECT * FROM week WHERE the_day=2")
data = c.fetchall()
k2=[]
l = []
for ind in range(len(data))[1:]:
k2.append(1 - data[ind][3]/data[ind-1][3])
for ind in range(N):
l.append(k2[ind])
for ind in range(len(k2))[N+1:]:
data_pred.append(l)
out_pred.append(k2[ind])
l = l[1:]
l.append(k2[ind])
u =[]
y = []
l = []
for ind in range(100):
l.append(ind)
l.append(ind+1)
u.append(l)
l = []
y.append(ind+2)
esn_instance = esn_design()
esn_instance.esn_training(data_tr, out_tr)
p = esn_prediction(esn_instance)
g = []
for ind in range(len(data_pred)):
g.append(p.predict(data_pred[ind]).item((0,0)))
g_final = []
out_p = []
n = 0
for ind in range(len(g)):
if(ind==0):
g_final.append(g[ind])
out_p.append(out_pred[ind])
else:
g_final.append(g[ind]+g[-1])
out_p.append(out_p[-1] + out_pred[-1])
if(g[ind]*1.0/out_pred[ind] >= 0):
n +=1
print n*1.0/len(g)
#plt.plot(range(len(out_pred)), g, range(len(out_pred)), out_pred)
#plt.show()
| gpl-2.0 |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/core/sorting.py | 3 | 12261 | """ miscellaneous sorting / groupby utilities """
import numpy as np
from pandas.compat import long
from pandas.core.categorical import Categorical
from pandas.core.dtypes.common import (
_ensure_platform_int,
_ensure_int64,
is_categorical_dtype)
from pandas.core.dtypes.missing import isnull
import pandas.core.algorithms as algorithms
from pandas._libs import lib, algos, hashtable
from pandas._libs.hashtable import unique_label_indices
_INT64_MAX = np.iinfo(np.int64).max
def get_group_index(labels, shape, sort, xnull):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels: sequence of arrays
Integers identifying levels at each location
shape: sequence of ints same length as labels
Number of unique levels at each location
sort: boolean
If the ranks of returned ids should match lexical ranks of labels
xnull: boolean
If true nulls are excluded. i.e. -1 values in the labels are
passed through
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
"""
def _int64_cut_off(shape):
acc = long(1)
for i, mul in enumerate(shape):
acc *= long(mul)
if not acc < _INT64_MAX:
return i
return len(shape)
def loop(labels, shape):
# how many levels can be done without overflow:
nlev = _int64_cut_off(shape)
# compute flat ids for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
out = stride * labels[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
if shape[i] == 0:
stride = 0
else:
stride //= shape[i]
out += labels[i] * stride
if xnull: # exclude nulls
mask = labels[0] == -1
for lab in labels[1:nlev]:
mask |= lab == -1
out[mask] = -1
if nlev == len(shape): # all levels done!
return out
# compress what has been done so far in order to avoid overflow
# to retain lexical ranks, obs_ids should be sorted
comp_ids, obs_ids = compress_group_index(out, sort=sort)
labels = [comp_ids] + labels[nlev:]
shape = [len(obs_ids)] + shape[nlev:]
return loop(labels, shape)
def maybe_lift(lab, size): # pormote nan values
return (lab + 1, size + 1) if (lab == -1).any() else (lab, size)
labels = map(_ensure_int64, labels)
if not xnull:
labels, shape = map(list, zip(*map(maybe_lift, labels, shape)))
return loop(list(labels), list(shape))
def get_compressed_ids(labels, sizes):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
Parameters
----------
labels : list of label arrays
sizes : list of size of the levels
Returns
-------
tuple of (comp_ids, obs_group_ids)
"""
ids = get_group_index(labels, sizes, sort=True, xnull=False)
return compress_group_index(ids, sort=True)
def is_int64_overflow_possible(shape):
the_prod = long(1)
for x in shape:
the_prod *= long(x)
return the_prod >= _INT64_MAX
def decons_group_index(comp_labels, shape):
# reconstruct labels
if is_int64_overflow_possible(shape):
# at some point group indices are factorized,
# and may not be deconstructed here! wrong path!
raise ValueError('cannot deconstruct factorized group indices!')
label_list = []
factor = 1
y = 0
x = comp_labels
for i in reversed(range(len(shape))):
labels = (x - y) % (factor * shape[i]) // factor
np.putmask(labels, comp_labels < 0, -1)
label_list.append(labels)
y = labels * factor
factor *= shape[i]
return label_list[::-1]
def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull):
"""
reconstruct labels from observed group ids
Parameters
----------
xnull: boolean,
if nulls are excluded; i.e. -1 labels are passed through
"""
if not xnull:
lift = np.fromiter(((a == -1).any() for a in labels), dtype='i8')
shape = np.asarray(shape, dtype='i8') + lift
if not is_int64_overflow_possible(shape):
# obs ids are deconstructable! take the fast route!
out = decons_group_index(obs_ids, shape)
return out if xnull or not lift.any() \
else [x - y for x, y in zip(out, lift)]
i = unique_label_indices(comp_ids)
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
return [i8copy(lab[i]) for lab in labels]
def indexer_from_factorized(labels, shape, compress=True):
ids = get_group_index(labels, shape, sort=True, xnull=False)
if not compress:
ngroups = (ids.size and ids.max()) + 1
else:
ids, obs = compress_group_index(ids, sort=True)
ngroups = len(obs)
return get_group_index_sorter(ids, ngroups)
def lexsort_indexer(keys, orders=None, na_position='last'):
labels = []
shape = []
if isinstance(orders, bool):
orders = [orders] * len(keys)
elif orders is None:
orders = [True] * len(keys)
for key, order in zip(keys, orders):
# we are already a Categorical
if is_categorical_dtype(key):
c = key
# create the Categorical
else:
c = Categorical(key, ordered=True)
if na_position not in ['last', 'first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
n = len(c.categories)
codes = c.codes.copy()
mask = (c.codes == -1)
if order: # ascending
if na_position == 'last':
codes = np.where(mask, n, codes)
elif na_position == 'first':
codes += 1
else: # not order means descending
if na_position == 'last':
codes = np.where(mask, n, n - codes - 1)
elif na_position == 'first':
codes = np.where(mask, 0, n - codes)
if mask.any():
n += 1
shape.append(n)
labels.append(codes)
return indexer_from_factorized(labels, shape)
def nargsort(items, kind='quicksort', ascending=True, na_position='last'):
"""
This is intended to be a drop-in replacement for np.argsort which
handles NaNs. It adds ascending and na_position parameters.
GH #6399, #5231
"""
# specially handle Categorical
if is_categorical_dtype(items):
return items.argsort(ascending=ascending, kind=kind)
items = np.asanyarray(items)
idx = np.arange(len(items))
mask = isnull(items)
non_nans = items[~mask]
non_nan_idx = idx[~mask]
nan_idx = np.nonzero(mask)[0]
if not ascending:
non_nans = non_nans[::-1]
non_nan_idx = non_nan_idx[::-1]
indexer = non_nan_idx[non_nans.argsort(kind=kind)]
if not ascending:
indexer = indexer[::-1]
# Finally, place the NaNs at the end or the beginning according to
# na_position
if na_position == 'last':
indexer = np.concatenate([indexer, nan_idx])
elif na_position == 'first':
indexer = np.concatenate([nan_idx, indexer])
else:
raise ValueError('invalid na_position: {!r}'.format(na_position))
return indexer
class _KeyMapper(object):
"""
Ease my suffering. Map compressed group id -> key tuple
"""
def __init__(self, comp_ids, ngroups, levels, labels):
self.levels = levels
self.labels = labels
self.comp_ids = comp_ids.astype(np.int64)
self.k = len(labels)
self.tables = [hashtable.Int64HashTable(ngroups)
for _ in range(self.k)]
self._populate_tables()
def _populate_tables(self):
for labs, table in zip(self.labels, self.tables):
table.map(self.comp_ids, labs.astype(np.int64))
def get_key(self, comp_id):
return tuple(level[table.get_item(comp_id)]
for table, level in zip(self.tables, self.levels))
def get_flattened_iterator(comp_ids, ngroups, levels, labels):
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, levels, labels)
return [mapper.get_key(i) for i in range(ngroups)]
def get_indexer_dict(label_list, keys):
""" return a diction of {labels} -> {indexers} """
shape = list(map(len, keys))
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
ngroups = ((group_index.size and group_index.max()) + 1) \
if is_int64_overflow_possible(shape) \
else np.prod(shape, dtype='i8')
sorter = get_group_index_sorter(group_index, ngroups)
sorted_labels = [lab.take(sorter) for lab in label_list]
group_index = group_index.take(sorter)
return lib.indices_fast(sorter, group_index, keys, sorted_labels)
# ----------------------------------------------------------------------
# sorting levels...cleverly?
def get_group_index_sorter(group_index, ngroups):
"""
algos.groupsort_indexer implements `counting sort` and it is at least
O(ngroups), where
ngroups = prod(shape)
shape = map(len, keys)
that is, linear in the number of combinations (cartesian product) of unique
values of groupby keys. This can be huge when doing multi-key groupby.
np.argsort(kind='mergesort') is O(count x log(count)) where count is the
length of the data-frame;
Both algorithms are `stable` sort and that is necessary for correctness of
groupby operations. e.g. consider:
df.groupby(key)[col].transform('first')
"""
count = len(group_index)
alpha = 0.0 # taking complexities literally; there may be
beta = 1.0 # some room for fine-tuning these parameters
do_groupsort = (count > 0 and ((alpha + beta * ngroups) <
(count * np.log(count))))
if do_groupsort:
sorter, _ = algos.groupsort_indexer(_ensure_int64(group_index),
ngroups)
return _ensure_platform_int(sorter)
else:
return group_index.argsort(kind='mergesort')
def compress_group_index(group_index, sort=True):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
"""
size_hint = min(len(group_index), hashtable._SIZE_HINT_LIMIT)
table = hashtable.Int64HashTable(size_hint)
group_index = _ensure_int64(group_index)
# note, group labels come out ascending (ie, 1,2,3 etc)
comp_ids, obs_group_ids = table.get_labels_groupby(group_index)
if sort and len(obs_group_ids) > 0:
obs_group_ids, comp_ids = _reorder_by_uniques(obs_group_ids, comp_ids)
return comp_ids, obs_group_ids
def _reorder_by_uniques(uniques, labels):
# sorter is index where elements ought to go
sorter = uniques.argsort()
# reverse_indexer is where elements came from
reverse_indexer = np.empty(len(sorter), dtype=np.int64)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = labels < 0
# move labels to right locations (ie, unsort ascending labels)
labels = algorithms.take_nd(reverse_indexer, labels, allow_fill=False)
np.putmask(labels, mask, -1)
# sort observed ids
uniques = algorithms.take_nd(uniques, sorter, allow_fill=False)
return uniques, labels
| mit |
razz0/DataMiningProject | src/observation_basket_analysis.py | 1 | 1225 | '''
Analyse observation basket
'''
import argparse
import joblib
import pandas as pd
import apriori
import helpers
from rules import RuleGenerator
parser = argparse.ArgumentParser(description='Convert Halias RDF dataset for data mining')
parser.add_argument('minsup', help='Minimum support', nargs='?', type=float, default=0.8)
args = parser.parse_args()
apriori.NUM_CORES = 1
MINSUP = args.minsup
itemsets = helpers.read_observation_basket(helpers.DATA_DIR + 'observation.basket')
all_items = list(set([item for itemset in itemsets for item in itemset]))
print(len(itemsets))
print(len(all_items))
#print(itemsets[:1])
print('\nSupport {:.3f} frequent itemsets:\n'.format(MINSUP))
freq_items = apriori.apriori(itemsets, all_items, MINSUP, verbose=True)
print(freq_items[-1])
print(len(freq_items))
joblib.dump(freq_items, helpers.DATA_DIR + 'freq_items_{:.3f}.pkl'.format(MINSUP))
ruler = RuleGenerator(itemsets, freq_items)
rules = ruler.rule_generation(0.5) #, fixed_consequents=[('varis',)])
print(len(rules))
joblib.dump(rules, helpers.DATA_DIR + 'freq_rules_{:.3f}.pkl'.format(MINSUP))
#for (rule, conf) in rules:
# print(' -> %s \t conf: {:.2f} \t supp: {:.3f}'.format(conf, ruler.support(*rule))) | mit |
pprett/scikit-learn | sklearn/ensemble/__init__.py | 153 | 1382 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification, regression and anomaly detection.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .iforest import IsolationForest
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "IsolationForest", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
Mogeng/IOHMM | tests/test_OLS.py | 2 | 33960 | from __future__ import print_function
from __future__ import division
# import json
from past.utils import old_div
import unittest
import numpy as np
import statsmodels.api as sm
from IOHMM import OLS
# //TODO sample weight all zero
# Corner cases
# General
# 1. sample_weight is all zero
# 2. sample_weight is all one
# 3. sample_weight is a scale of all one
# 4. sample_weight is mixed of 0 and 1
# 6. when number of data is 1/or very small, less than the number of features
# 7. standard dataset compare with sklearn/statsmodels
# 8. output dimensions
# 9. collinearty in X
# 10. to/from json
# MultivariateOLS
# 1. Y is not column/row independent
# Discrete/CrossEntropyMNL
# 1. number of class is 1
# 2. number of class is 2
class UnivariateOLSTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data_longley = sm.datasets.longley.load()
def test_ols(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.data_longley.exog, self.data_longley.endog)
# coefficient
self.assertEqual(self.model.coef.shape, (1, 7))
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([-3482258.63459582, 15.0618722713733, -0.358191792925910E-01,
-2.02022980381683, -1.03322686717359, -0.511041056535807E-01,
1829.15146461355]).reshape(1, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertEqual(self.model.stderr.shape, (1, 7))
np.testing.assert_array_almost_equal(
old_div(self.model.stderr, np.sqrt(old_div(9., self.data_longley.exog.shape[0]))),
np.array([890420.383607373, 84.9149257747669, 0.03349,
0.488399681651699, 0.214274163161675, 0.226073200069370,
455.478499142212]).reshape(1, -1),
decimal=2)
# scale
self.assertEqual(self.model.dispersion.shape, (1, 1))
np.testing.assert_array_almost_equal(
old_div(self.model.dispersion, (old_div(9., self.data_longley.exog.shape[0]))),
np.array([[92936.0061673238]]),
decimal=3)
# predict
np.testing.assert_array_almost_equal(
self.data_longley.endog.reshape(-1, 1) - self.model.predict(self.data_longley.exog),
np.array([267.34003, -94.01394, 46.28717, -410.11462,
309.71459, -249.31122, -164.04896, -13.18036, 14.30477, 455.39409,
-17.26893, -39.05504, -155.54997, -85.67131, 341.93151,
-206.75783]).reshape(-1, 1),
decimal=3)
# loglike/_per_sample
self.assertAlmostEqual(
self.model.loglike(self.data_longley.exog, self.data_longley.endog),
-109.61743480849013,
places=3)
# to_json
json_dict = self.model.to_json('./tests/linear_models/OLS/UnivariateOLS/')
self.assertEqual(json_dict['properties']['solver'], 'pinv')
# from_json
self.model_from_json = OLS.from_json(json_dict)
np.testing.assert_array_almost_equal(
self.model.coef,
self.model_from_json.coef,
decimal=3)
np.testing.assert_array_almost_equal(
self.model.stderr,
self.model_from_json.stderr,
decimal=3)
self.assertEqual(
self.model.dispersion,
self.model_from_json.dispersion)
def test_ols_l1_regularized(self):
# sklearn elastic net and l1 does not take sample_weights, will not test
pass
def test_ols_l2_regularized(self):
# there is a bug in sklearn with weights, it can only use list right now
self.model = OLS(
solver='auto', fit_intercept=True, est_stderr=True,
reg_method='l2', alpha=0.1, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.data_longley.exog, self.data_longley.endog, sample_weight=0.5)
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([-2.0172203, -52.14364269, 0.07089677, -0.42552125,
-0.57305292, -0.41272483, 48.32484052]).reshape(1, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertTrue(self.model.stderr is None)
# scale
self.assertEqual(self.model.dispersion.shape, (1, 1))
np.testing.assert_array_almost_equal(
old_div(self.model.dispersion, (old_div(9., self.data_longley.exog.shape[0]))),
np.array([[250870.081]]),
decimal=3)
# predict
np.testing.assert_array_almost_equal(
self.data_longley.endog.reshape(-1, 1) - self.model.predict(self.data_longley.exog),
np.array([[280.31871146],
[-131.6981265],
[90.64414685],
[-400.10244445],
[-440.59604167],
[-543.88595187],
[200.70483416],
[215.88629903],
[74.9456573],
[913.85128645],
[424.15996133],
[-9.5797488],
[-360.96841852],
[27.214226],
[150.87705909],
[-492.17489392]]),
decimal=3)
# loglike/_per_sample
self.assertAlmostEqual(
self.model.loglike(self.data_longley.exog, self.data_longley.endog),
-117.561627187,
places=3)
self.assertEqual(
self.model.loglike_per_sample(self.data_longley.exog, self.data_longley.endog).shape,
(16, ))
def test_ols_elastic_net_regularized(self):
# sklearn elastic net and l1 does not take sample_weights, will not test
pass
def test_ols_sample_weight_all_half(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.data_longley.exog, self.data_longley.endog, sample_weight=0.5)
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
np.array((-3482258.63459582, 15.0618722713733, -0.358191792925910E-01,
-2.02022980381683, -1.03322686717359, -0.511041056535807E-01,
1829.15146461355)).reshape(1, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
np.testing.assert_array_almost_equal(
old_div(self.model.stderr, np.sqrt(old_div(9., self.data_longley.exog.shape[0]))),
np.array((890420.383607373, 84.9149257747669, 0.334910077722432E-01,
0.488399681651699, 0.214274163161675, 0.226073200069370,
455.478499142212)).reshape(1, -1),
decimal=1)
# scale
np.testing.assert_array_almost_equal(
old_div(self.model.dispersion, (old_div(9., self.data_longley.exog.shape[0]))),
np.array((92936.0061673238)))
# predict
np.testing.assert_array_almost_equal(
self.data_longley.endog.reshape(-1, 1) - self.model.predict(self.data_longley.exog),
np.array((267.34003, -94.01394, 46.28717, -410.11462,
309.71459, -249.31122, -164.04896, -13.18036, 14.30477, 455.39409,
-17.26893, -39.05504, -155.54997, -85.67131, 341.93151,
-206.75783)).reshape(-1, 1),
decimal=3)
# loglike/_per_sample
self.assertAlmostEqual(
self.model.loglike(self.data_longley.exog, self.data_longley.endog),
-109.61743480849013,
places=3)
self.assertEqual(
self.model.loglike_per_sample(self.data_longley.exog, self.data_longley.endog).shape,
(16, ))
def test_ols_sample_weight_all_zero(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.assertRaises(ValueError, self.model.fit,
self.data_longley.exog, self.data_longley.endog, 0)
def test_ols_sample_weight_half_zero_half_one(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
len_half = 8
self.model.fit(self.data_longley.exog, self.data_longley.endog,
sample_weight=np.array([1] * len_half +
[0] * (self.data_longley.exog.shape[0] - len_half)))
self.model_half = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model_half.fit(self.data_longley.exog[:len_half], self.data_longley.endog[:len_half])
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
self.model_half.coef,
decimal=3)
# std.err
np.testing.assert_array_almost_equal(
self.model.stderr,
self.model_half.stderr,
decimal=3)
# scale
np.testing.assert_array_almost_equal(
self.model.dispersion,
self.model_half.dispersion,
decimal=3)
# corner cases
def test_ols_one_data_point(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.data_longley.exog[0:1, :],
self.data_longley.endog[0:1, ], sample_weight=0.5)
# coef
self.assertEqual(self.model.coef.shape, (1, 7))
# scale
np.testing.assert_array_almost_equal(self.model.dispersion, np.array([[0]]))
# loglike_per_sample
np.testing.assert_array_equal(self.model.loglike_per_sample(
self.data_longley.exog[0:1, :], self.data_longley.endog[0:1, ]), np.array([0]))
np.testing.assert_array_almost_equal(self.model.loglike_per_sample(
np.array(self.data_longley.exog[0:1, :].tolist() * 6),
np.array([60323, 0, 60323, 60322, 60322, 60323])),
np.array([0, -np.Infinity, 0, -np.Infinity, -np.Infinity, 0]), decimal=3)
def test_ols_multicolinearty(self):
self.model_col = OLS(
solver='pinv', fit_intercept=False, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
X = np.hstack([self.data_longley.exog[:, 0:1], self.data_longley.exog[:, 0:1]])
self.model_col.fit(X,
self.data_longley.endog, sample_weight=0.8)
self.model = OLS(
solver='pinv', fit_intercept=False, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.data_longley.exog[:, 0:1],
self.data_longley.endog, sample_weight=0.8)
# coef
np.testing.assert_array_almost_equal(
self.model_col.coef, np.array([319.47969664, 319.47969664]).reshape(1, -1), decimal=3)
# stderr
self.assertEqual(self.model_col.stderr, None)
# scale
np.testing.assert_array_almost_equal(
self.model_col.dispersion, self.model.dispersion, decimal=3)
# loglike_per_sample
np.testing.assert_array_almost_equal(
self.model_col.loglike_per_sample(X, self.data_longley.endog),
self.model.loglike_per_sample(self.data_longley.exog[:, 0:1],
self.data_longley.endog), decimal=3)
np.testing.assert_array_almost_equal(
self.model_col.predict(X),
self.model.predict(self.data_longley.exog[:, 0:1]), decimal=3)
class IndependentMultivariateOLSTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(0)
cls.X = np.random.normal(size=(1000, 1))
cls.Y = np.random.normal(size=(cls.X.shape[0], 2))
def test_ols(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X, self.Y)
# coefficient
self.assertEqual(self.model.coef.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([[-0.02924966, -0.03484827],
[-0.00978688, 0.00336316]]).reshape(2, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertEqual(self.model.stderr.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.stderr,
np.array([[0.03083908, 0.03121143],
[0.03002101, 0.03038348]]).reshape(2, -1),
decimal=2)
# scale
self.assertEqual(self.model.dispersion.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.dispersion,
np.array([[0.94905363, 0.0164185],
[0.0164185, 0.89937019]]),
decimal=3)
# loglike/_per_sample
self.assertAlmostEqual(
self.model.loglike(self.X, self.Y),
-2758.54387369,
places=3)
# to_json
json_dict = self.model.to_json('./tests/linear_models/OLS/MultivariateOLS/')
self.assertEqual(json_dict['properties']['solver'], 'pinv')
# from_json
self.model_from_json = OLS.from_json(json_dict)
np.testing.assert_array_almost_equal(
self.model.coef,
self.model_from_json.coef,
decimal=3)
np.testing.assert_array_almost_equal(
self.model.stderr,
self.model_from_json.stderr,
decimal=3)
np.testing.assert_array_almost_equal(
self.model.dispersion,
self.model_from_json.dispersion,
decimal=3)
def test_ols_l2_regularized(self):
self.model = OLS(
solver='auto', fit_intercept=True, est_stderr=True,
reg_method='l2', alpha=0.1, l1_ratio=1, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X, self.Y)
# coefficient
self.assertEqual(self.model.coef.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([[-0.0292465, -0.03484456],
[-0.00978591, 0.00336286]]).reshape(2, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertTrue(self.model.stderr is None)
# scale
self.assertEqual(self.model.dispersion.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.dispersion,
np.array([[0.94905363, 0.0164185],
[0.0164185, 0.89937019]]),
decimal=3)
# loglike/_per_sample
self.assertAlmostEqual(
self.model.loglike(self.X, self.Y),
-2758.5438737,
places=3)
def test_ols_l1_regularized(self):
# sklearn l1 and elstic net does not support sample weight
pass
def test_ols_sample_weight_all_half(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X, self.Y, sample_weight=0.5)
# coefficient
self.assertEqual(self.model.coef.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([[-0.02924966, -0.03484827],
[-0.00978688, 0.00336316]]).reshape(2, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertEqual(self.model.stderr.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.stderr,
np.array([[0.03083908, 0.03121143],
[0.03002101, 0.03038348]]).reshape(2, -1),
decimal=2)
# scale
self.assertEqual(self.model.dispersion.shape, (2, 2))
np.testing.assert_array_almost_equal(
self.model.dispersion,
np.array([[0.94905363, 0.0164185],
[0.0164185, 0.89937019]]),
decimal=3)
# loglike/_per_sample
self.assertAlmostEqual(
self.model.loglike(self.X, self.Y, 0.5),
old_div(-2758.54387369, 2.),
places=3)
self.assertEqual(
self.model.loglike_per_sample(self.X, self.Y).shape,
(1000, ))
def test_ols_sample_weight_all_zero(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.assertRaises(ValueError, self.model.fit, self.X, self.Y, 0)
def test_ols_sample_weight_half_zero_half_one(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
len_half = 500
self.model.fit(self.X, self.Y,
sample_weight=np.array([1] * len_half +
[0] * (self.X.shape[0] - len_half)))
self.model_half = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model_half.fit(self.X[:len_half], self.Y[:len_half])
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
self.model_half.coef,
decimal=3)
# std.err
np.testing.assert_array_almost_equal(
self.model.stderr,
self.model_half.stderr,
decimal=3)
# scale
np.testing.assert_array_almost_equal(
self.model.dispersion,
self.model_half.dispersion,
decimal=3)
# corner cases
def test_ols_one_data_point(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X[0:1, :],
self.Y[0:1, ], sample_weight=0.5)
# coef
self.assertEqual(self.model.coef.shape, (2, 2))
# scale
np.testing.assert_array_almost_equal(
self.model.dispersion, np.array([[0, 0], [0, 0]]), decimal=6)
# loglike_per_sample
np.testing.assert_array_equal(self.model.loglike_per_sample(
self.X[0:1, :], self.Y[0:1, ]), np.array([0]))
np.testing.assert_array_almost_equal(self.model.loglike_per_sample(
np.array(self.X[0:1, :].tolist() * 6),
np.array([self.Y[0, ], self.Y[1, ], self.Y[0, ],
self.Y[1, ], self.Y[1, ], self.Y[0, ]])),
np.array([0, -np.Infinity, 0, -np.Infinity, -np.Infinity, 0]), decimal=3)
def test_ols_multicolinearty(self):
self.model_col = OLS(
solver='pinv', fit_intercept=False, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
X = np.hstack([self.X[:, 0:1], self.X[:, 0:1]])
self.model_col.fit(X,
self.Y, sample_weight=0.5)
self.model = OLS(
solver='pinv', fit_intercept=False, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X[:, 0:1],
self.Y, sample_weight=0.5)
# stderr
self.assertEqual(self.model_col.stderr, None)
# scale
np.testing.assert_array_almost_equal(
self.model_col.dispersion, self.model.dispersion, decimal=3)
# loglike_per_sample
np.testing.assert_array_almost_equal(
self.model_col.loglike_per_sample(X, self.Y),
self.model.loglike_per_sample(self.X[:, 0:1],
self.Y), decimal=0)
np.testing.assert_array_almost_equal(
self.model_col.predict(X),
self.model.predict(self.X[:, 0:1]), decimal=1)
class PerfectCorrelationMultivariateOLSTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
np.random.seed(0)
cls.data_longley = sm.datasets.longley.load()
cls.X = cls.data_longley.exog
cls.Y = np.hstack((cls.data_longley.endog.reshape(-1, 1),
cls.data_longley.endog.reshape(-1, 1)))
def test_ols(self):
self.model = OLS(
solver='auto', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X, self.Y)
# coefficient
self.assertEqual(self.model.coef.shape, (2, 7))
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([[-3482258.63459582, 15.0618722713733, -0.358191792925910E-01,
-2.02022980381683, -1.03322686717359, -0.511041056535807E-01,
1829.15146461355],
[-3482258.63459582, 15.0618722713733, -0.358191792925910E-01,
-2.02022980381683, -1.03322686717359, -0.511041056535807E-01,
1829.15146461355]]).reshape(2, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertEqual(self.model.stderr.shape, (2, 7))
np.testing.assert_array_almost_equal(
old_div(self.model.stderr, np.sqrt(old_div(9., self.data_longley.exog.shape[0]))),
np.array([[890420.383607373, 84.9149257747669, 0.03349,
0.488399681651699, 0.214274163161675, 0.226073200069370,
455.478499142212],
[890420.383607373, 84.9149257747669, 0.03349,
0.488399681651699, 0.214274163161675, 0.226073200069370,
455.478499142212]]).reshape(2, -1),
decimal=2)
# scale
self.assertEqual(self.model.dispersion.shape, (2, 2))
np.testing.assert_array_almost_equal(
old_div(self.model.dispersion, (old_div(9., self.data_longley.exog.shape[0]))),
np.array([[92936.0061673238, 92936.0061673238],
[92936.0061673238, 92936.0061673238]]),
decimal=3)
# predict
np.testing.assert_array_almost_equal(
self.Y - self.model.predict(self.X),
np.hstack((np.array([267.34003, -94.01394, 46.28717, -410.11462,
309.71459, -249.31122, -164.04896, -13.18036, 14.30477, 455.39409,
-17.26893, -39.05504, -155.54997, -85.67131, 341.93151,
-206.75783]).reshape(-1, 1),
np.array([267.34003, -94.01394, 46.28717, -410.11462,
309.71459, -249.31122, -164.04896, -13.18036, 14.30477, 455.39409,
-17.26893, -39.05504, -155.54997, -85.67131, 341.93151,
-206.75783]).reshape(-1, 1))),
decimal=3)
# loglike/_per_sample
self.assertRaises(ValueError,
self.model.loglike_per_sample, self.X, self.Y)
def test_ols_l1_regularized(self):
# sklearn elastic net and l1 does not take sample_weights, will not test
pass
def test_ols_l2_regularized(self):
# there is a bug in sklearn with weights, it can only use list right now
self.model = OLS(
solver='auto', fit_intercept=True, est_stderr=True,
reg_method='l2', alpha=0.1, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X, self.Y, sample_weight=0.5)
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
np.array([[-2.0172203, -52.14364269, 0.07089677, -0.42552125,
-0.57305292, -0.41272483, 48.32484052],
[-2.0172203, -52.14364269, 0.07089677, -0.42552125,
-0.57305292, -0.41272483, 48.32484052]]).reshape(2, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
self.assertTrue(self.model.stderr is None)
# scale
self.assertEqual(self.model.dispersion.shape, (2, 2))
np.testing.assert_array_almost_equal(
old_div(self.model.dispersion, (old_div(9., self.data_longley.exog.shape[0]))),
np.array([[250870.081, 250870.081],
[250870.081, 250870.081]]),
decimal=3)
# predict
res = np.array([[280.31871146],
[-131.6981265],
[90.64414685],
[-400.10244445],
[-440.59604167],
[-543.88595187],
[200.70483416],
[215.88629903],
[74.9456573],
[913.85128645],
[424.15996133],
[-9.5797488],
[-360.96841852],
[27.214226],
[150.87705909],
[-492.17489392]])
np.testing.assert_array_almost_equal(
self.Y - self.model.predict(self.X),
np.hstack((res, res)),
decimal=3)
# loglike/_per_sample
self.assertRaises(ValueError,
self.model.loglike, self.X, self.Y)
def test_ols_elastic_net_regularized(self):
# sklearn elastic net and l1 does not take sample_weights, will not test
pass
def test_ols_sample_weight_all_half(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X, self.Y, sample_weight=0.5)
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
np.array(((-3482258.63459582, 15.0618722713733, -0.358191792925910E-01,
-2.02022980381683, -1.03322686717359, -0.511041056535807E-01,
1829.15146461355),
(-3482258.63459582, 15.0618722713733, -0.358191792925910E-01,
-2.02022980381683, -1.03322686717359, -0.511041056535807E-01,
1829.15146461355))).reshape(2, -1),
decimal=3)
# std.err of coefficient (calibrated by df_resid)
np.testing.assert_array_almost_equal(
old_div(self.model.stderr, np.sqrt(old_div(9., self.data_longley.exog.shape[0]))),
np.array(((890420.383607373, 84.9149257747669, 0.334910077722432E-01,
0.488399681651699, 0.214274163161675, 0.226073200069370,
455.478499142212),
(890420.383607373, 84.9149257747669, 0.334910077722432E-01,
0.488399681651699, 0.214274163161675, 0.226073200069370,
455.478499142212))).reshape(2, -1),
decimal=1)
# scale
np.testing.assert_array_almost_equal(
old_div(self.model.dispersion, (old_div(9., self.data_longley.exog.shape[0]))),
np.array(((92936.0061673238, 92936.0061673238),
(92936.0061673238, 92936.0061673238))),
decimal=3)
# predict
res = np.array((267.34003, -94.01394, 46.28717, -410.11462,
309.71459, -249.31122, -164.04896, -13.18036, 14.30477, 455.39409,
-17.26893, -39.05504, -155.54997, -85.67131, 341.93151,
-206.75783)).reshape(-1, 1)
np.testing.assert_array_almost_equal(
self.Y - self.model.predict(self.X),
np.hstack((res, res)),
decimal=3)
# loglike/_per_sample
self.assertRaises(ValueError,
self.model.loglike, self.X, self.Y)
def test_ols_sample_weight_all_zero(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.assertRaises(ValueError, self.model.fit, self.X, self.Y, 0)
def test_ols_sample_weight_half_zero_half_one(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
len_half = 8
self.model.fit(self.X, self.Y,
sample_weight=np.array([1] * len_half +
[0] * (self.data_longley.exog.shape[0] - len_half)))
self.model_half = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model_half.fit(self.X[:len_half], self.Y[:len_half])
# coefficient
np.testing.assert_array_almost_equal(
self.model.coef,
self.model_half.coef,
decimal=3)
# std.err
np.testing.assert_array_almost_equal(
self.model.stderr,
self.model_half.stderr,
decimal=3)
# scale
np.testing.assert_array_almost_equal(
self.model.dispersion,
self.model_half.dispersion,
decimal=3)
# corner cases
def test_ols_one_data_point(self):
self.model = OLS(
solver='pinv', fit_intercept=True, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X[0:1, :],
self.Y[0:1, ], sample_weight=0.5)
# coef
self.assertEqual(self.model.coef.shape, (2, 7))
# scale
np.testing.assert_array_almost_equal(
self.model.dispersion, np.array([[0, 0], [0, 0]]), decimal=6)
# loglike_per_sample
np.testing.assert_array_equal(self.model.loglike_per_sample(
self.X[0:1, :], self.Y[0:1, ]), np.array([0]))
np.testing.assert_array_almost_equal(self.model.loglike_per_sample(
np.array(self.X[0:1, :].tolist() * 6),
np.array([[60323, 60323], [0, 60323], [60323, 60323],
[60322, 60323], [60322, 60322], [60323, 60323]])),
np.array([0, -np.Infinity, 0, -np.Infinity, -np.Infinity, 0]), decimal=3)
def test_ols_multicolinearty(self):
self.model_col = OLS(
solver='pinv', fit_intercept=False, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
X = np.hstack([self.X[:, 0:1], self.X[:, 0:1]])
self.model_col.fit(X,
self.Y, sample_weight=0.8)
self.model = OLS(
solver='pinv', fit_intercept=False, est_stderr=True,
reg_method=None, alpha=0, l1_ratio=0, tol=1e-4, max_iter=100,
coef=None, stderr=None, dispersion=None)
self.model.fit(self.X[:, 0:1],
self.Y, sample_weight=0.8)
# coef
np.testing.assert_array_almost_equal(
self.model_col.coef, np.array([[319.47969664, 319.47969664],
[319.47969664, 319.47969664]]).reshape(2, -1), decimal=3)
# stderr
self.assertEqual(self.model_col.stderr, None)
# scale
np.testing.assert_array_almost_equal(
self.model_col.dispersion, self.model.dispersion, decimal=3)
# loglike_per_sample
self.assertRaises(ValueError,
self.model_col.loglike, X, self.Y)
np.testing.assert_array_almost_equal(
self.model_col.predict(X),
self.model.predict(self.X[:, 0:1]), decimal=3)
| bsd-3-clause |
vibhorag/scikit-learn | sklearn/utils/validation.py | 30 | 24618 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
warnings.warn(
"Passing 1d arrays as data is deprecated in 0.17 and will"
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample.",
DeprecationWarning)
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we acually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| bsd-3-clause |
bardin-lab/readtagger | readtagger/cli/plot_coverage.py | 1 | 1420 | import click
from matplotlib.style import available
from readtagger.plot_coverage import plot_coverage_in_regions
from readtagger import VERSION
@click.command('Plot relative coverage for alignment files.')
@click.option('-f',
'--file',
type=(str, str, int),
multiple=True,
help="File, label and number of total reads in file.")
@click.argument('output_path')
@click.option('-c',
'--cores',
help='Cores to use when calculating coverage',
default=1)
@click.option('-r',
'--regions',
help='Regions to plot. If not specified plots all contigs.')
@click.option('-k',
'--plot_kind',
default='area',
type=click.Choice(['area', 'line']),
help='Kind of plot.')
@click.option('-s',
'--style',
type=click.Choice(available),
default='ggplot')
@click.version_option(version=VERSION)
def plot_coverage(**kwargs):
"""Plot coverage differences between file1 and file2."""
file_tuples = kwargs.pop('file')
kwargs['files'] = [_[0] for _ in file_tuples]
kwargs['labels'] = [_[1] for _ in file_tuples]
kwargs['total_reads'] = [_[2] for _ in file_tuples]
regions = kwargs.get('regions')
if regions:
kwargs['regions'] = regions.split(',')
plot_coverage_in_regions(**kwargs)
| mit |
markelg/xray | xray/core/ops.py | 2 | 15861 | from functools import partial
import contextlib
import inspect
import operator
import warnings
import numpy as np
import pandas as pd
from . import npcompat
from .pycompat import PY3, range, dask_array_type
from .nputils import (
nanfirst, nanlast, interleaved_concat as _interleaved_concat_numpy,
array_eq, array_ne, _validate_axis, _calc_concat_shape
)
try:
import bottleneck as bn
except ImportError:
# use numpy methods instead
bn = np
try:
import dask.array as da
has_dask = True
except ImportError:
has_dask = False
UNARY_OPS = ['neg', 'pos', 'abs', 'invert']
CMP_BINARY_OPS = ['lt', 'le', 'ge', 'gt']
NUM_BINARY_OPS = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod',
'pow', 'and', 'xor', 'or']
if not PY3:
NUM_BINARY_OPS.append('div')
# methods which pass on the numpy return value unchanged
# be careful not to list methods that we would want to wrap later
NUMPY_SAME_METHODS = ['item', 'searchsorted']
# methods which don't modify the data shape, so the result should still be
# wrapped in an Variable/DataArray
NUMPY_UNARY_METHODS = ['astype', 'argsort', 'clip', 'conj', 'conjugate']
PANDAS_UNARY_FUNCTIONS = ['isnull', 'notnull']
# methods which remove an axis
NUMPY_REDUCE_METHODS = ['all', 'any']
NAN_REDUCE_METHODS = ['argmax', 'argmin', 'max', 'min', 'mean', 'prod', 'sum',
'std', 'var', 'median']
# TODO: wrap cumprod/cumsum, take, dot, sort
def _dask_or_eager_func(name, eager_module=np, dispatch_elemwise=False):
if has_dask:
def f(data, *args, **kwargs):
target = data[0] if dispatch_elemwise else data
module = da if isinstance(target, da.Array) else eager_module
return getattr(module, name)(data, *args, **kwargs)
else:
def f(data, *args, **kwargs):
return getattr(eager_module, name)(data, *args, **kwargs)
return f
def _fail_on_dask_array_input(values, msg=None, func_name=None):
if isinstance(values, dask_array_type):
if msg is None:
msg = '%r is not a valid method on dask arrays'
if func_name is None:
func_name = inspect.stack()[1][3]
raise NotImplementedError(msg % func_name)
around = _dask_or_eager_func('around')
isclose = _dask_or_eager_func('isclose')
isnull = _dask_or_eager_func('isnull', pd)
notnull = _dask_or_eager_func('notnull', pd)
transpose = _dask_or_eager_func('transpose')
where = _dask_or_eager_func('where')
insert = _dask_or_eager_func('insert')
take = _dask_or_eager_func('take')
broadcast_to = _dask_or_eager_func('broadcast_to', npcompat)
concatenate = _dask_or_eager_func('concatenate', dispatch_elemwise=True)
stack = _dask_or_eager_func('stack', npcompat, dispatch_elemwise=True)
def _interleaved_indices_required(indices):
"""With dask, we care about data locality and would rather avoid splitting
splitting up each arrays into single elements. This routine checks to see
if we really need the "interleaved" part of interleaved_concat.
We don't use for the pure numpy version of interleaved_concat, because it's
just as fast or faster to directly do the interleaved concatenate rather
than check if we could simply it.
"""
next_expected = 0
for ind in indices:
if isinstance(ind, slice):
if ((ind.start or 0) != next_expected
or ind.step not in (1, None)):
return True
next_expected = ind.stop
else:
ind = np.asarray(ind)
expected = np.arange(next_expected, next_expected + ind.size)
if (ind != expected).any():
return True
next_expected = ind[-1] + 1
return False
def _interleaved_concat_slow(arrays, indices, axis=0):
"""A slow version of interleaved_concat that also works on dask arrays
"""
axis = _validate_axis(arrays[0], axis)
result_shape = _calc_concat_shape(arrays, axis=axis)
length = result_shape[axis]
array_lookup = np.empty(length, dtype=int)
element_lookup = np.empty(length, dtype=int)
for n, ind in enumerate(indices):
if isinstance(ind, slice):
ind = np.arange(*ind.indices(length))
for m, i in enumerate(ind):
array_lookup[i] = n
element_lookup[i] = m
split_arrays = [arrays[n][(slice(None),) * axis + (slice(m, m + 1),)]
for (n, m) in zip(array_lookup, element_lookup)]
return concatenate(split_arrays, axis)
def interleaved_concat(arrays, indices, axis=0):
"""Concatenate each array along the given axis, but also assign each array
element into the location given by indices. This operation is used for
groupby.transform.
"""
if has_dask and isinstance(arrays[0], da.Array):
if not _interleaved_indices_required(indices):
return da.concatenate(arrays, axis)
else:
return _interleaved_concat_slow(arrays, indices, axis)
else:
return _interleaved_concat_numpy(arrays, indices, axis)
def asarray(data):
return data if isinstance(data, dask_array_type) else np.asarray(data)
def as_like_arrays(*data):
if all(isinstance(d, dask_array_type) for d in data):
return data
else:
return tuple(np.asarray(d) for d in data)
def allclose_or_equiv(arr1, arr2, rtol=1e-5, atol=1e-8):
"""Like np.allclose, but also allows values to be NaN in both arrays
"""
arr1, arr2 = as_like_arrays(arr1, arr2)
if arr1.shape != arr2.shape:
return False
return bool(isclose(arr1, arr2, rtol=rtol, atol=atol, equal_nan=True).all())
def array_equiv(arr1, arr2):
"""Like np.array_equal, but also allows values to be NaN in both arrays
"""
arr1, arr2 = as_like_arrays(arr1, arr2)
if arr1.shape != arr2.shape:
return False
return bool(((arr1 == arr2) | (isnull(arr1) & isnull(arr2))).all())
def _call_possibly_missing_method(arg, name, args, kwargs):
try:
method = getattr(arg, name)
except AttributeError:
_fail_on_dask_array_input(arg, func_name=name)
if hasattr(arg, 'data'):
_fail_on_dask_array_input(arg.data, func_name=name)
raise
else:
return method(*args, **kwargs)
def _values_method_wrapper(name):
def func(self, *args, **kwargs):
return _call_possibly_missing_method(self.data, name, args, kwargs)
func.__name__ = name
func.__doc__ = getattr(np.ndarray, name).__doc__
return func
def _method_wrapper(name):
def func(self, *args, **kwargs):
return _call_possibly_missing_method(self, name, args, kwargs)
func.__name__ = name
func.__doc__ = getattr(np.ndarray, name).__doc__
return func
def _func_slash_method_wrapper(f, name=None):
# try to wrap a method, but if not found use the function
# this is useful when patching in a function as both a DataArray and
# Dataset method
if name is None:
name = f.__name__
def func(self, *args, **kwargs):
try:
return getattr(self, name)(*args, **kwargs)
except AttributeError:
return f(self, *args, **kwargs)
func.__name__ = name
func.__doc__ = f.__doc__
return func
_REDUCE_DOCSTRING_TEMPLATE = \
"""Reduce this {cls}'s data by applying `{name}` along some
dimension(s).
Parameters
----------
{extra_args}
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `{name}`.
Returns
-------
reduced : {cls}
New {cls} object with `{name}` applied to its data and the
indicated dimension(s) removed.
"""
def count(data, axis=None):
"""Count the number of non-NA in this array along the given axis or axes
"""
return sum(~isnull(data), axis=axis)
def fillna(data, other):
"""Fill missing values in this object with data from the other object.
Follows normal broadcasting and alignment rules.
"""
return where(isnull(data), other, data)
def where_method(data, cond, other=np.nan):
"""Select values from this object that are True in cond. Everything else
gets masked with other. Follows normal broadcasting and alignment rules.
"""
return where(cond, data, other)
@contextlib.contextmanager
def _ignore_warnings_if(condition):
if condition:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
yield
else:
yield
def _create_nan_agg_method(name, numeric_only=False, coerce_strings=False):
def f(values, axis=None, skipna=None, **kwargs):
# ignore keyword args inserted by np.mean and other numpy aggreagators
# automatically:
kwargs.pop('dtype', None)
kwargs.pop('out', None)
values = asarray(values)
if coerce_strings and values.dtype.kind in 'SU':
values = values.astype(object)
if skipna or (skipna is None and values.dtype.kind == 'f'):
if values.dtype.kind not in ['i', 'f']:
raise NotImplementedError(
'skipna=True not yet implemented for %s with dtype %s'
% (name, values.dtype))
nanname = 'nan' + name
if isinstance(axis, tuple) or not values.dtype.isnative:
# bottleneck can't handle multiple axis arguments or non-native
# endianness
eager_module = np
else:
eager_module = bn
func = _dask_or_eager_func(nanname, eager_module)
using_numpy_nan_func = eager_module is np
else:
func = _dask_or_eager_func(name)
using_numpy_nan_func = False
with _ignore_warnings_if(using_numpy_nan_func):
try:
return func(values, axis=axis, **kwargs)
except AttributeError:
if isinstance(values, dask_array_type):
msg = '%s is not yet implemented on dask arrays' % name
else:
assert using_numpy_nan_func
msg = ('%s is not available with skipna=False with the '
'installed version of numpy; upgrade to numpy 1.9 '
'or newer to use skipna=True or skipna=None' % name)
raise NotImplementedError(msg)
f.numeric_only = numeric_only
return f
argmax = _create_nan_agg_method('argmax', coerce_strings=True)
argmin = _create_nan_agg_method('argmin', coerce_strings=True)
max = _create_nan_agg_method('max', coerce_strings=True)
min = _create_nan_agg_method('min', coerce_strings=True)
sum = _create_nan_agg_method('sum', numeric_only=True)
mean = _create_nan_agg_method('mean', numeric_only=True)
std = _create_nan_agg_method('std', numeric_only=True)
var = _create_nan_agg_method('var', numeric_only=True)
median = _create_nan_agg_method('median', numeric_only=True)
_fail_on_dask_array_input_skipna = partial(
_fail_on_dask_array_input,
msg='%r with skipna=True is not yet implemented on dask arrays')
_prod = _dask_or_eager_func('prod')
def prod(values, axis=None, skipna=None, **kwargs):
if skipna or (skipna is None and values.dtype.kind == 'f'):
if values.dtype.kind not in ['i', 'f']:
raise NotImplementedError(
'skipna=True not yet implemented for prod with dtype %s'
% values.dtype)
_fail_on_dask_array_input_skipna(values)
return npcompat.nanprod(values, axis=axis, **kwargs)
return _prod(values, axis=axis, **kwargs)
prod.numeric_only = True
def first(values, axis, skipna=None):
"""Return the first non-NA elements in this array along the given axis
"""
if (skipna or skipna is None) and values.dtype.kind not in 'iSU':
# only bother for dtypes that can hold NaN
_fail_on_dask_array_input_skipna(values)
return nanfirst(values, axis)
return take(values, 0, axis=axis)
def last(values, axis, skipna=None):
"""Return the last non-NA elements in this array along the given axis
"""
if (skipna or skipna is None) and values.dtype.kind not in 'iSU':
# only bother for dtypes that can hold NaN
_fail_on_dask_array_input_skipna(values)
return nanlast(values, axis)
return take(values, -1, axis=axis)
def inject_reduce_methods(cls):
methods = ([(name, getattr(np, name), False) for name
in NUMPY_REDUCE_METHODS]
+ [(name, globals()[name], True) for name
in NAN_REDUCE_METHODS]
+ [('count', count, False)])
for name, f, include_skipna in methods:
numeric_only = getattr(f, 'numeric_only', False)
func = cls._reduce_method(f, include_skipna, numeric_only)
func.__name__ = name
func.__doc__ = _REDUCE_DOCSTRING_TEMPLATE.format(
name=name, cls=cls.__name__,
extra_args=cls._reduce_extra_args_docstring)
setattr(cls, name, func)
def op_str(name):
return '__%s__' % name
def get_op(name):
return getattr(operator, op_str(name))
NON_INPLACE_OP = dict((get_op('i' + name), get_op(name))
for name in NUM_BINARY_OPS)
def inplace_to_noninplace_op(f):
return NON_INPLACE_OP[f]
def inject_binary_ops(cls, inplace=False):
for name in CMP_BINARY_OPS + NUM_BINARY_OPS:
setattr(cls, op_str(name), cls._binary_op(get_op(name)))
for name, f in [('eq', array_eq), ('ne', array_ne)]:
setattr(cls, op_str(name), cls._binary_op(f))
# patch in fillna
f = _func_slash_method_wrapper(fillna)
method = cls._binary_op(f, join='left', drop_na_vars=False)
setattr(cls, '_fillna', method)
# patch in where
f = _func_slash_method_wrapper(where_method, 'where')
setattr(cls, '_where', cls._binary_op(f))
for name in NUM_BINARY_OPS:
# only numeric operations have in-place and reflexive variants
setattr(cls, op_str('r' + name),
cls._binary_op(get_op(name), reflexive=True))
if inplace:
setattr(cls, op_str('i' + name),
cls._inplace_binary_op(get_op('i' + name)))
def inject_all_ops_and_reduce_methods(cls, priority=50, array_only=True):
# priortize our operations over those of numpy.ndarray (priority=1)
# and numpy.matrix (priority=10)
cls.__array_priority__ = priority
# patch in standard special operations
for name in UNARY_OPS:
setattr(cls, op_str(name), cls._unary_op(get_op(name)))
inject_binary_ops(cls, inplace=True)
# patch in numpy/pandas methods
for name in NUMPY_UNARY_METHODS:
setattr(cls, name, cls._unary_op(_method_wrapper(name)))
for name in PANDAS_UNARY_FUNCTIONS:
f = _func_slash_method_wrapper(getattr(pd, name))
setattr(cls, name, cls._unary_op(f))
f = _func_slash_method_wrapper(around, name='round')
setattr(cls, 'round', cls._unary_op(f))
if array_only:
# these methods don't return arrays of the same shape as the input, so
# don't try to patch these in for Dataset objects
for name in NUMPY_SAME_METHODS:
setattr(cls, name, _values_method_wrapper(name))
inject_reduce_methods(cls)
| apache-2.0 |
pratapvardhan/scikit-learn | examples/decomposition/plot_pca_vs_lda.py | 176 | 2027 | """
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LinearDiscriminantAnalysis(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
colors = ['navy', 'turquoise', 'darkorange']
lw = 2
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA of IRIS dataset')
plt.figure()
for color, i, target_name in zip(colors, [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=.8, color=color,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('LDA of IRIS dataset')
plt.show()
| bsd-3-clause |
Windy-Ground/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
mdhaber/scipy | scipy/ndimage/interpolation.py | 12 | 35344 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import itertools
import warnings
import numpy
from numpy.core.multiarray import normalize_axis_index
from scipy import special
from . import _ni_support
from . import _nd_image
from ._ni_docstrings import docfiller
__all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform',
'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate']
@docfiller
def spline_filter1d(input, order=3, axis=-1, output=numpy.float64,
mode='mirror'):
"""
Calculate a 1-D spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
Parameters
----------
%(input)s
order : int, optional
The order of the spline, default is 3.
axis : int, optional
The axis along which the spline filter is applied. Default is the last
axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array. Default is ``numpy.float64``.
%(mode_interp_mirror)s
Returns
-------
spline_filter1d : ndarray
The filtered input.
Notes
-----
All functions in `ndimage.interpolation` do spline interpolation of
the input image. If using B-splines of `order > 1`, the input image
values have to be converted to B-spline coefficients first, which is
done by applying this 1-D filter sequentially along all
axes of the input. All functions that require B-spline coefficients
will automatically filter their inputs, a behavior controllable with
the `prefilter` keyword argument. For functions that accept a `mode`
parameter, the result will only be correct if it matches the `mode`
used when filtering.
For complex-valued `input`, this function processes the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
See Also
--------
spline_filter : Multidimensional spline filter.
Examples
--------
We can filter an image using 1-D spline along the given axis:
>>> from scipy.ndimage import spline_filter1d
>>> import matplotlib.pyplot as plt
>>> orig_img = np.eye(20) # create an image
>>> orig_img[10, :] = 1.0
>>> sp_filter_axis_0 = spline_filter1d(orig_img, axis=0)
>>> sp_filter_axis_1 = spline_filter1d(orig_img, axis=1)
>>> f, ax = plt.subplots(1, 3, sharex=True)
>>> for ind, data in enumerate([[orig_img, "original image"],
... [sp_filter_axis_0, "spline filter (axis=0)"],
... [sp_filter_axis_1, "spline filter (axis=1)"]]):
... ax[ind].imshow(data[0], cmap='gray_r')
... ax[ind].set_title(data[1])
>>> plt.tight_layout()
>>> plt.show()
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input,
complex_output=complex_output)
if complex_output:
spline_filter1d(input.real, order, axis, output.real, mode)
spline_filter1d(input.imag, order, axis, output.imag, mode)
return output
if order in [0, 1]:
output[...] = numpy.array(input)
else:
mode = _ni_support._extend_mode_to_code(mode)
axis = normalize_axis_index(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output, mode)
return output
def spline_filter(input, order=3, output=numpy.float64, mode='mirror'):
"""
Multidimensional spline filter.
For more details, see `spline_filter1d`.
See Also
--------
spline_filter1d : Calculate a 1-D spline filter along the given axis.
Notes
-----
The multidimensional filter is implemented as a sequence of
1-D spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
For complex-valued `input`, this function processes the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
Examples
--------
We can filter an image using multidimentional splines:
>>> from scipy.ndimage import spline_filter
>>> import matplotlib.pyplot as plt
>>> orig_img = np.eye(20) # create an image
>>> orig_img[10, :] = 1.0
>>> sp_filter = spline_filter(orig_img, order=3)
>>> f, ax = plt.subplots(1, 2, sharex=True)
>>> for ind, data in enumerate([[orig_img, "original image"],
... [sp_filter, "spline filter"]]):
... ax[ind].imshow(data[0], cmap='gray_r')
... ax[ind].set_title(data[1])
>>> plt.tight_layout()
>>> plt.show()
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input,
complex_output=complex_output)
if complex_output:
spline_filter(input.real, order, output.real, mode)
spline_filter(input.imag, order, output.imag, mode)
return output
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output=output, mode=mode)
input = output
else:
output[...] = input[...]
return output
def _prepad_for_spline_filter(input, mode, cval):
if mode in ['nearest', 'grid-constant']:
npad = 12
if mode == 'grid-constant':
padded = numpy.pad(input, npad, mode='constant',
constant_values=cval)
elif mode == 'nearest':
padded = numpy.pad(input, npad, mode='edge')
else:
# other modes have exact boundary conditions implemented so
# no prepadding is needed
npad = 0
padded = input
return padded, npad
@docfiller
def geometric_transform(input, mapping, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True,
extra_arguments=(), extra_keywords={}):
"""
Apply an arbitrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
Parameters
----------
%(input)s
mapping : {callable, scipy.LowLevelCallable}
A callable object that accepts a tuple of length equal to the output
array rank, and returns the corresponding input coordinates as a tuple
of length equal to the input array rank.
output_shape : tuple of ints, optional
Shape tuple.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode_interp_constant)s
%(cval)s
%(prefilter)s
extra_arguments : tuple, optional
Extra arguments passed to `mapping`.
extra_keywords : dict, optional
Extra keywords passed to `mapping`.
Returns
-------
output : ndarray
The filtered input.
See Also
--------
map_coordinates, affine_transform, spline_filter1d
Notes
-----
This function also accepts low-level callback functions with one
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int mapping(npy_intp *output_coordinates, double *input_coordinates,
int output_rank, int input_rank, void *user_data)
int mapping(intptr_t *output_coordinates, double *input_coordinates,
int output_rank, int input_rank, void *user_data)
The calling function iterates over the elements of the output array,
calling the callback function at each element. The coordinates of the
current output element are passed through ``output_coordinates``. The
callback function must return the coordinates at which the input must
be interpolated in ``input_coordinates``. The rank of the input and
output arrays are given by ``input_rank`` and ``output_rank``
respectively. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the Python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
For complex-valued `input`, this function transforms the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
Examples
--------
>>> import numpy as np
>>> from scipy.ndimage import geometric_transform
>>> a = np.arange(12.).reshape((4, 3))
>>> def shift_func(output_coords):
... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
...
>>> geometric_transform(a, shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.362, 2.738],
[ 0. , 4.812, 6.187],
[ 0. , 8.263, 9.637]])
>>> b = [1, 2, 3, 4, 5]
>>> def shift_func(output_coords):
... return (output_coords[0] - 3,)
...
>>> geometric_transform(b, shift_func, mode='constant')
array([0, 0, 0, 1, 2])
>>> geometric_transform(b, shift_func, mode='nearest')
array([1, 1, 1, 1, 2])
>>> geometric_transform(b, shift_func, mode='reflect')
array([3, 2, 1, 1, 2])
>>> geometric_transform(b, shift_func, mode='wrap')
array([2, 3, 4, 1, 2])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input, shape=output_shape,
complex_output=complex_output)
if complex_output:
kwargs = dict(order=order, mode=mode, prefilter=prefilter,
output_shape=output_shape,
extra_arguments=extra_arguments,
extra_keywords=extra_keywords)
geometric_transform(input.real, mapping, output=output.real,
cval=numpy.real(cval), **kwargs)
geometric_transform(input.imag, mapping, output=output.imag,
cval=numpy.imag(cval), **kwargs)
return output
if prefilter and order > 1:
padded, npad = _prepad_for_spline_filter(input, mode, cval)
filtered = spline_filter(padded, order, output=numpy.float64,
mode=mode)
else:
npad = 0
filtered = input
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.geometric_transform(filtered, mapping, None, None, None, output,
order, mode, cval, npad, extra_arguments,
extra_keywords)
return output
@docfiller
def map_coordinates(input, coordinates, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
%(input)s
coordinates : array_like
The coordinates at which `input` is evaluated.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode_interp_constant)s
%(cval)s
%(prefilter)s
Returns
-------
map_coordinates : ndarray
The result of transforming the input. The shape of the output is
derived from that of `coordinates` by dropping the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Notes
-----
For complex-valued `input`, this function maps the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(12.).reshape((4, 3))
>>> a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
array([ 2., 7.])
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool)
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input, shape=output_shape,
complex_output=complex_output)
if complex_output:
kwargs = dict(order=order, mode=mode, prefilter=prefilter)
map_coordinates(input.real, coordinates, output=output.real,
cval=numpy.real(cval), **kwargs)
map_coordinates(input.imag, coordinates, output=output.imag,
cval=numpy.imag(cval), **kwargs)
return output
if prefilter and order > 1:
padded, npad = _prepad_for_spline_filter(input, mode, cval)
filtered = spline_filter(padded, order, output=numpy.float64,
mode=mode)
else:
npad = 0
filtered = input
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, npad, None, None)
return output
@docfiller
def affine_transform(input, matrix, offset=0.0, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Apply an affine transformation.
Given an output image pixel index vector ``o``, the pixel value
is determined from the input image at position
``np.dot(matrix, o) + offset``.
This does 'pull' (or 'backward') resampling, transforming the output space
to the input to locate data. Affine transformations are often described in
the 'push' (or 'forward') direction, transforming input to output. If you
have a matrix for the 'push' transformation, use its inverse
(:func:`numpy.linalg.inv`) in this function.
Parameters
----------
%(input)s
matrix : ndarray
The inverse coordinate transformation matrix, mapping output
coordinates to input coordinates. If ``ndim`` is the number of
dimensions of ``input``, the given matrix must have one of the
following shapes:
- ``(ndim, ndim)``: the linear transformation matrix for each
output coordinate.
- ``(ndim,)``: assume that the 2-D transformation matrix is
diagonal, with the diagonal specified by the given value. A more
efficient algorithm is then used that exploits the separability
of the problem.
- ``(ndim + 1, ndim + 1)``: assume that the transformation is
specified using homogeneous coordinates [1]_. In this case, any
value passed to ``offset`` is ignored.
- ``(ndim, ndim + 1)``: as above, but the bottom row of a
homogeneous transformation matrix is always ``[0, 0, ..., 1]``,
and may be omitted.
offset : float or sequence, optional
The offset into the array where the transform is applied. If a float,
`offset` is the same for each axis. If a sequence, `offset` should
contain one value for each axis.
output_shape : tuple of ints, optional
Shape tuple.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode_interp_constant)s
%(cval)s
%(prefilter)s
Returns
-------
affine_transform : ndarray
The transformed input.
Notes
-----
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode.
.. versionchanged:: 0.18.0
Previously, the exact interpretation of the affine transformation
depended on whether the matrix was supplied as a 1-D or a
2-D array. If a 1-D array was supplied
to the matrix parameter, the output pixel value at index ``o``
was determined from the input image at position
``matrix * (o + offset)``.
For complex-valued `input`, this function transforms the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
References
----------
.. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if output_shape is None:
if isinstance(output, numpy.ndarray):
output_shape = output.shape
else:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input, shape=output_shape,
complex_output=complex_output)
if complex_output:
kwargs = dict(offset=offset, output_shape=output_shape, order=order,
mode=mode, prefilter=prefilter)
affine_transform(input.real, matrix, output=output.real,
cval=numpy.real(cval), **kwargs)
affine_transform(input.imag, matrix, output=output.imag,
cval=numpy.imag(cval), **kwargs)
return output
if prefilter and order > 1:
padded, npad = _prepad_for_spline_filter(input, mode, cval)
filtered = spline_filter(padded, order, output=numpy.float64,
mode=mode)
else:
npad = 0
filtered = input
mode = _ni_support._extend_mode_to_code(mode)
matrix = numpy.asarray(matrix, dtype=numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and
(matrix.shape[0] in [input.ndim, input.ndim + 1])):
if matrix.shape[0] == input.ndim + 1:
exptd = [0] * input.ndim + [1]
if not numpy.all(matrix[input.ndim] == exptd):
msg = ('Expected homogeneous transformation matrix with '
'shape %s for image shape %s, but bottom row was '
'not equal to %s' % (matrix.shape, input.shape, exptd))
raise ValueError(msg)
# assume input is homogeneous coordinate transformation matrix
offset = matrix[:input.ndim, input.ndim]
matrix = matrix[:input.ndim, :input.ndim]
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype=numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
warnings.warn(
"The behavior of affine_transform with a 1-D "
"array supplied for the matrix parameter has changed in "
"SciPy 0.18.0."
)
_nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order,
mode, cval, npad, False)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, npad, None,
None)
return output
@docfiller
def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Shift an array.
The array is shifted using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode.
Parameters
----------
%(input)s
shift : float or sequence
The shift along the axes. If a float, `shift` is the same for each
axis. If a sequence, `shift` should contain one value for each axis.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode_interp_constant)s
%(cval)s
%(prefilter)s
Returns
-------
shift : ndarray
The shifted input.
Notes
-----
For complex-valued `input`, this function shifts the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input,
complex_output=complex_output)
if complex_output:
# import under different name to avoid confusion with shift parameter
from scipy.ndimage.interpolation import shift as _shift
kwargs = dict(order=order, mode=mode, prefilter=prefilter)
_shift(input.real, shift, output=output.real, cval=numpy.real(cval),
**kwargs)
_shift(input.imag, shift, output=output.imag, cval=numpy.imag(cval),
**kwargs)
return output
if prefilter and order > 1:
padded, npad = _prepad_for_spline_filter(input, mode, cval)
filtered = spline_filter(padded, order, output=numpy.float64,
mode=mode)
else:
npad = 0
filtered = input
mode = _ni_support._extend_mode_to_code(mode)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype=numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval,
npad, False)
return output
@docfiller
def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
prefilter=True, *, grid_mode=False):
"""
Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Parameters
----------
%(input)s
zoom : float or sequence
The zoom factor along the axes. If a float, `zoom` is the same for each
axis. If a sequence, `zoom` should contain one value for each axis.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode_interp_constant)s
%(cval)s
%(prefilter)s
grid_mode : bool, optional
If False, the distance from the pixel centers is zoomed. Otherwise, the
distance including the full pixel extent is used. For example, a 1d
signal of length 5 is considered to have length 4 when `grid_mode` is
False, but length 5 when `grid_mode` is True. See the following
visual illustration:
.. code-block:: text
| pixel 1 | pixel 2 | pixel 3 | pixel 4 | pixel 5 |
|<-------------------------------------->|
vs.
|<----------------------------------------------->|
The starting point of the arrow in the diagram above corresponds to
coordinate location 0 in each mode.
Returns
-------
zoom : ndarray
The zoomed input.
Notes
-----
For complex-valued `input`, this function zooms the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.zoom(ascent, 3.0)
>>> ax1.imshow(ascent, vmin=0, vmax=255)
>>> ax2.imshow(result, vmin=0, vmax=255)
>>> plt.show()
>>> print(ascent.shape)
(512, 512)
>>> print(result.shape)
(1536, 1536)
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple(
[int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)])
complex_output = numpy.iscomplexobj(input)
output = _ni_support._get_output(output, input, shape=output_shape,
complex_output=complex_output)
if complex_output:
# import under different name to avoid confusion with zoom parameter
from scipy.ndimage.interpolation import zoom as _zoom
kwargs = dict(order=order, mode=mode, prefilter=prefilter)
_zoom(input.real, zoom, output=output.real, cval=numpy.real(cval),
**kwargs)
_zoom(input.imag, zoom, output=output.imag, cval=numpy.imag(cval),
**kwargs)
return output
if prefilter and order > 1:
padded, npad = _prepad_for_spline_filter(input, mode, cval)
filtered = spline_filter(padded, order, output=numpy.float64,
mode=mode)
else:
npad = 0
filtered = input
if grid_mode:
# warn about modes that may have surprising behavior
suggest_mode = None
if mode == 'constant':
suggest_mode = 'grid-constant'
elif mode == 'wrap':
suggest_mode = 'grid-wrap'
if suggest_mode is not None:
warnings.warn(
("It is recommended to use mode = {} instead of {} when "
"grid_mode is True."
).format(suggest_mode, mode)
)
mode = _ni_support._extend_mode_to_code(mode)
zoom_div = numpy.array(output_shape)
zoom_nominator = numpy.array(input.shape)
if not grid_mode:
zoom_div -= 1
zoom_nominator -= 1
# Zooming to infinite values is unpredictable, so just choose
# zoom factor 1 instead
zoom = numpy.divide(zoom_nominator, zoom_div,
out=numpy.ones_like(input.shape, dtype=numpy.float64),
where=zoom_div != 0)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval, npad,
grid_mode)
return output
@docfiller
def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Rotate an array.
The array is rotated in the plane defined by the two axes given by the
`axes` parameter using spline interpolation of the requested order.
Parameters
----------
%(input)s
angle : float
The rotation angle in degrees.
axes : tuple of 2 ints, optional
The two axes that define the plane of rotation. Default is the first
two axes.
reshape : bool, optional
If `reshape` is true, the output shape is adapted so that the input
array is contained completely in the output. Default is True.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode_interp_constant)s
%(cval)s
%(prefilter)s
Returns
-------
rotate : ndarray
The rotated input.
Notes
-----
For complex-valued `input`, this function rotates the real and imaginary
components independently.
.. versionadded:: 1.6.0
Complex-valued support added.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(10, 3))
>>> ax1, ax2, ax3 = fig.subplots(1, 3)
>>> img = misc.ascent()
>>> img_45 = ndimage.rotate(img, 45, reshape=False)
>>> full_img_45 = ndimage.rotate(img, 45, reshape=True)
>>> ax1.imshow(img, cmap='gray')
>>> ax1.set_axis_off()
>>> ax2.imshow(img_45, cmap='gray')
>>> ax2.set_axis_off()
>>> ax3.imshow(full_img_45, cmap='gray')
>>> ax3.set_axis_off()
>>> fig.set_tight_layout(True)
>>> plt.show()
>>> print(img.shape)
(512, 512)
>>> print(img_45.shape)
(512, 512)
>>> print(full_img_45.shape)
(724, 724)
"""
input_arr = numpy.asarray(input)
ndim = input_arr.ndim
if ndim < 2:
raise ValueError('input array should be at least 2D')
axes = list(axes)
if len(axes) != 2:
raise ValueError('axes should contain exactly two values')
if not all([float(ax).is_integer() for ax in axes]):
raise ValueError('axes should contain only integer values')
if axes[0] < 0:
axes[0] += ndim
if axes[1] < 0:
axes[1] += ndim
if axes[0] < 0 or axes[1] < 0 or axes[0] >= ndim or axes[1] >= ndim:
raise ValueError('invalid rotation plane specified')
axes.sort()
c, s = special.cosdg(angle), special.sindg(angle)
rot_matrix = numpy.array([[c, s],
[-s, c]])
img_shape = numpy.asarray(input_arr.shape)
in_plane_shape = img_shape[axes]
if reshape:
# Compute transformed input bounds
iy, ix = in_plane_shape
out_bounds = rot_matrix @ [[0, 0, iy, iy],
[0, ix, 0, ix]]
# Compute the shape of the transformed input plane
out_plane_shape = (out_bounds.ptp(axis=1) + 0.5).astype(int)
else:
out_plane_shape = img_shape[axes]
out_center = rot_matrix @ ((out_plane_shape - 1) / 2)
in_center = (in_plane_shape - 1) / 2
offset = in_center - out_center
output_shape = img_shape
output_shape[axes] = out_plane_shape
output_shape = tuple(output_shape)
complex_output = numpy.iscomplexobj(input_arr)
output = _ni_support._get_output(output, input_arr, shape=output_shape,
complex_output=complex_output)
if ndim <= 2:
affine_transform(input_arr, rot_matrix, offset, output_shape, output,
order, mode, cval, prefilter)
else:
# If ndim > 2, the rotation is applied over all the planes
# parallel to axes
planes_coord = itertools.product(
*[[slice(None)] if ax in axes else range(img_shape[ax])
for ax in range(ndim)])
out_plane_shape = tuple(out_plane_shape)
for coordinates in planes_coord:
ia = input_arr[coordinates]
oa = output[coordinates]
affine_transform(ia, rot_matrix, offset, out_plane_shape,
oa, order, mode, cval, prefilter)
return output
| bsd-3-clause |
mattilyra/scikit-learn | benchmarks/bench_mnist.py | 38 | 6799 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
------------------------------------------------------------
MLP_adam 53.46s 0.11s 0.0224
Nystroem-SVM 112.97s 0.92s 0.0228
MultilayerPerceptron 24.33s 0.14s 0.0287
ExtraTrees 42.99s 0.57s 0.0294
RandomForest 42.70s 0.49s 0.0318
SampledRBF-SVM 135.81s 0.56s 0.0486
LinearRegression-SAG 16.67s 0.06s 0.0824
CART 20.69s 0.02s 0.1219
dummy 0.00s 0.01s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM': make_pipeline(
Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM': make_pipeline(
RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LinearRegression-SAG': LogisticRegression(solver='sag', tol=1e-1, C=1e4),
'MultilayerPerceptron': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
algorithm='sgd', learning_rate_init=0.2, momentum=0.9, verbose=1,
tol=1e-4, random_state=1),
'MLP-adam': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
algorithm='adam', learning_rate_init=0.001, verbose=1,
tol=1e-4, random_state=1)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
AlexSafatli/Pylogeny | setup.py | 1 | 1813 | ''' Installation script for Pylogeny. '''
# Date: Oct 16 2014
# Author: Alex Safatli
# E-mail: [email protected]
from setuptools import setup, Extension as extension
import os
# Metadata
from pylogeny.__version__ import VERSION
DESCRIP = 'A code framework for phylogenetic tree reconstruction, rearrangement, scoring, and for the manipulation, heuristic search, and analysis of the phylogenetic tree combinatorial space.'
LONG = 'A Python library and code framework for phylogenetic tree reconstruction, rearrangement, scoring, and for the manipulation and analysis of the phylogenetic tree combinatorial space. Also possesses features to execute popular heuristic programs such as FastTree and RAxML to acquire approximate ML trees.'
URL = 'http://www.github.com/AlexSafatli/Pylogeny'
AUTHOR = 'Alex Safatli'
EMAIL = '[email protected]'
DEPNDS = ['networkx','pandas','mysql-python','p4']
LINKS = ['http://p4-phylogenetics.googlecode.com/archive/4491de464e68fdb49c7a11e06737cd34a98143ec.tar.gz#egg=p4']
PKGDATA = {'pylogeny':['fitch.cpp','libpllWrapper.c']}
FITCHCC = os.path.join('pylogeny','fitch.cpp')
PLLC = os.path.join('pylogeny','libpllWrapper.c')
# Compilation for C/C++ Extensions (Fitch, Pylibpll)
pllExtension = extension('libpllWrapper',sources=[PLLC],include_dirs=['/usr/local/include'],libraries=['pll-sse3'],library_dirs=['/usr/local/lib'])
fitchExtension = extension('fitch',sources=[FITCHCC],include_dirs=['/usr/local/include'],language="c++",extra_compile_args=['-std=c++11'])
# Setup
setup(name='pylogeny',version=VERSION,description=DESCRIP,long_description=LONG,url=URL,author=AUTHOR,author_email=EMAIL,license='MIT',packages=['pylogeny'],package_data=PKGDATA,ext_modules=[pllExtension,fitchExtension],dependency_links=LINKS,install_requires=DEPNDS,zip_safe=False) | gpl-2.0 |
FireCARES/firecares | firecares/firestation/management/commands/import-domains.py | 1 | 1497 | import argparse
import pandas as pd
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from firecares.firestation.models import FireDepartment
class Command(BaseCommand):
help = """Imports domain names from a csv roster file into FireCARES. The CSV file is expected
to have the headers 'DepartmentID' and 'Email'"""
def add_arguments(self, parser):
parser.add_argument('file', help='source filename for a roster in CSV format', type=argparse.FileType('r'))
parser.add_argument(
'--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Do not execute update statements.',
)
def handle(self, *args, **options):
dry_run = options.get('dry_run')
csv = pd.read_csv(options['file'])
cols = ['DepartmentID', 'Email']
items = csv.groupby(cols)['DepartmentID', 'Email'].sum()
for i, data in enumerate(items.iterrows()):
fdid = data[0][0]
domain = data[0][1].split('@')[1]
self.stdout.write(str(fdid) + ' - ' + domain)
try:
fd = FireDepartment.objects.get(id=fdid)
fd.domain_name = domain
if not dry_run:
fd.save()
except ObjectDoesNotExist:
self.stdout.write('WARNING: FireDepartment not found: ' + str(fdid))
self.stdout.write('...done')
| mit |
nvoron23/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
AIML/scikit-learn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
unoebauer/tardis | tardis/model.py | 1 | 27296 | # This module contains the model class
import logging
import os
import itertools
import numpy as np
import pandas as pd
from astropy import constants, units as u
import scipy.special
from tardis import packet_source, plasma_array
from tardis.montecarlo import montecarlo
from util import intensity_black_body
logger = logging.getLogger(__name__)
c = constants.c.cgs.value
h = constants.h.cgs.value
kb = constants.k_B.cgs.value
w_estimator_constant = (c ** 2 / (2 * h)) * (15 / np.pi ** 4) * (h / kb) ** 4 / (4 * np.pi)
t_rad_estimator_constant = (np.pi**4 / (15 * 24 * scipy.special.zeta(5, 1))) * h / kb
class Radial1DModel(object):
"""
Class to hold the states of the individual shells (the state of the plasma (as a `~plasma.BasePlasma`-object or one of its subclasses),
, the plasma parameters (e.g. temperature, dilution factor), the dimensions of the shell).
Parameters
----------
tardis_configuration : `tardis.config_reader.Configuration`
velocities : `np.ndarray`
an array with n+1 (for n shells) velocities (in cm/s) for each of the boundaries (velocities[0] describing
the inner boundary and velocities[-1] the outer boundary
densities : `np.ndarray`
an array with n densities - being the density mid-shell (assumed for the whole shell)
abundances : `list` or `dict`
a dictionary for uniform abundances throughout all shells, e.g. dict(Fe=0.5, Si=0.5)
For a different abundance for each shell list of abundance dictionaries.
time_explosion : `float`
time since explosion in seconds
atom_data : `~tardis.atom_data.AtomData` class or subclass
Containing the atom data needed for the plasma calculations
ws : `None` or `list`-like
ws can only be specified for plasma_type 'nebular'. If `None` is specified at first initialization the class
calculates an initial geometric dilution factor. When giving a list positive values will be accepted, whereas
negative values trigger the usage of the geometric calculation
plasma_type : `str`
plasma type currently supports 'lte' (using `tardis.plasma.LTEPlasma`)
or 'nebular' (using `tardis.plasma.NebularPlasma`)
initial_t_rad : `float`-like or `list`-like
initial radiative temperature for each shell, if a scalar is specified it initializes with a uniform
temperature for all shells
"""
@classmethod
def from_h5(cls, buffer_or_fname):
raise NotImplementedError("This is currently not implemented")
def __init__(self, tardis_config):
#final preparation for configuration object
self.tardis_config = tardis_config
self.gui = None
self.converged = False
self.atom_data = tardis_config.atom_data
selected_atomic_numbers = self.tardis_config.abundances.index
self.atom_data.prepare_atom_data(selected_atomic_numbers,
line_interaction_type=tardis_config.plasma.line_interaction_type,
nlte_species=tardis_config.plasma.nlte.species)
if tardis_config.plasma.ionization == 'nebular':
if not self.atom_data.has_zeta_data:
raise ValueError("Requiring Recombination coefficients Zeta for 'nebular' plasma ionization")
self.packet_src = packet_source.SimplePacketSource.from_wavelength(tardis_config.montecarlo.black_body_sampling.start,
tardis_config.montecarlo.black_body_sampling.end,
blackbody_sampling=tardis_config.montecarlo.black_body_sampling.samples,
seed=self.tardis_config.montecarlo.seed)
self.current_no_of_packets = tardis_config.montecarlo.no_of_packets
self.t_inner = tardis_config.plasma.t_inner
self.t_rads = tardis_config.plasma.t_rads
self.iterations_max_requested = tardis_config.montecarlo.iterations
self.iterations_remaining = self.iterations_max_requested
self.iterations_executed = 0
if tardis_config.montecarlo.convergence_strategy.type == 'specific':
self.global_convergence_parameters = (tardis_config.montecarlo.
convergence_strategy.
deepcopy())
self.t_rads = tardis_config.plasma.t_rads
t_inner_lock_cycle = [False] * (tardis_config.montecarlo.
convergence_strategy.
lock_t_inner_cycles)
t_inner_lock_cycle[0] = True
self.t_inner_update = itertools.cycle(t_inner_lock_cycle)
self.ws = (0.5 * (1 - np.sqrt(1 -
(tardis_config.structure.r_inner[0] ** 2 / tardis_config.structure.r_middle ** 2).to(1).value)))
self.plasma_array = plasma_array.BasePlasmaArray(tardis_config.number_densities, tardis_config.atom_data,
tardis_config.supernova.time_explosion.to('s').value,
nlte_config=tardis_config.plasma.nlte,
delta_treatment=tardis_config.plasma.delta_treatment,
ionization_mode=tardis_config.plasma.ionization,
excitation_mode=tardis_config.plasma.excitation)
self.spectrum = TARDISSpectrum(tardis_config.spectrum.frequency, tardis_config.supernova.distance)
self.spectrum_virtual = TARDISSpectrum(tardis_config.spectrum.frequency, tardis_config.supernova.distance)
self.spectrum_reabsorbed = TARDISSpectrum(tardis_config.spectrum.frequency, tardis_config.supernova.distance)
@property
def line_interaction_type(self):
return self._line_interaction_type
@line_interaction_type.setter
def line_interaction_type(self, value):
if value in ['scatter', 'downbranch', 'macroatom']:
self._line_interaction_type = value
self.tardis_config.plasma.line_interaction_type = value
#final preparation for atom_data object - currently building data
self.atom_data.prepare_atom_data(self.tardis_config.number_densities.columns,
line_interaction_type=self.line_interaction_type, max_ion_number=None,
nlte_species=self.tardis_config.plasma.nlte.species)
else:
raise ValueError('line_interaction_type can only be "scatter", "downbranch", or "macroatom"')
@property
def t_inner(self):
return self._t_inner
@t_inner.setter
def t_inner(self, value):
self._t_inner = value
self.luminosity_inner = (4 * np.pi * constants.sigma_sb.cgs * self.tardis_config.structure.r_inner[0] ** 2 * \
self.t_inner ** 4).to('erg/s')
self.time_of_simulation = (1.0 * u.erg / self.luminosity_inner)
self.j_blues_norm_factor = constants.c.cgs * self.tardis_config.supernova.time_explosion / \
(4 * np.pi * self.time_of_simulation * self.tardis_config.structure.volumes)
def calculate_j_blues(self, init_detailed_j_blues=False):
nus = self.atom_data.lines.nu.values
radiative_rates_type = self.tardis_config.plasma.radiative_rates_type
w_epsilon = self.tardis_config.plasma.w_epsilon
if radiative_rates_type == 'lte':
logger.info('Calculating J_blues for radiative_rates_type=lte')
j_blues = intensity_black_body(nus[np.newaxis].T, self.t_rads.value)
self.j_blues = pd.DataFrame(j_blues, index=self.atom_data.lines.index, columns=np.arange(len(self.t_rads)))
elif radiative_rates_type == 'dilute-blackbody' or init_detailed_j_blues:
logger.info('Calculating J_blues for radiative_rates_type=dilute-blackbody')
j_blues = self.ws * intensity_black_body(nus[np.newaxis].T, self.t_rads.value)
self.j_blues = pd.DataFrame(j_blues, index=self.atom_data.lines.index, columns=np.arange(len(self.t_rads)))
elif radiative_rates_type == 'detailed':
logger.info('Calculating J_blues for radiate_rates_type=detailed')
self.j_blues = pd.DataFrame(self.j_blue_estimators.transpose() * self.j_blues_norm_factor.value,
index=self.atom_data.lines.index, columns=np.arange(len(self.t_rads)))
for i in xrange(self.tardis_config.structure.no_of_shells):
zero_j_blues = self.j_blues[i] == 0.0
self.j_blues[i][zero_j_blues] = w_epsilon * intensity_black_body(
self.atom_data.lines.nu.values[zero_j_blues], self.t_rads.value[i])
else:
raise ValueError('radiative_rates_type type unknown - %s', radiative_rates_type)
def calculate_updated_radiationfield(self, nubar_estimator, j_estimator):
"""
Calculate an updated radiation field from the :math:`\\bar{nu}_\\textrm{estimator}` and :math:`\\J_\\textrm{estimator}`
calculated in the montecarlo simulation. The details of the calculation can be found in the documentation.
Parameters
----------
nubar_estimator : ~np.ndarray (float)
j_estimator : ~np.ndarray (float)
Returns
-------
updated_t_rads : ~np.ndarray (float)
updated_ws : ~np.ndarray (float)
"""
updated_t_rads = t_rad_estimator_constant * nubar_estimator / j_estimator
updated_ws = j_estimator / (
4 * constants.sigma_sb.cgs.value * updated_t_rads ** 4 * self.time_of_simulation.value
* self.tardis_config.structure.volumes.value)
return updated_t_rads * u.K, updated_ws
def update_plasmas(self, initialize_nlte=False):
self.plasma_array.update_radiationfield(self.t_rads.value, self.ws, j_blues=self.j_blues,
initialize_nlte=initialize_nlte)
if self.tardis_config.plasma.line_interaction_type in ('downbranch', 'macroatom'):
self.transition_probabilities = self.plasma_array.calculate_transition_probabilities()
def update_radiationfield(self, log_sampling=5):
"""
Updating radiation field
"""
convergence_section = self.tardis_config.montecarlo.convergence_strategy
updated_t_rads, updated_ws = self.calculate_updated_radiationfield(self.nubar_estimators, self.j_estimators)
old_t_rads = self.t_rads.copy()
old_ws = self.ws.copy()
old_t_inner = self.t_inner
luminosity_wavelength_filter = (self.montecarlo_nu > self.tardis_config.supernova.luminosity_nu_start) & \
(self.montecarlo_nu < self.tardis_config.supernova.luminosity_nu_end)
emitted_filter = self.montecarlo_luminosity.value >= 0
emitted_luminosity = np.sum(self.montecarlo_luminosity.value[emitted_filter & luminosity_wavelength_filter]) \
* self.montecarlo_luminosity.unit
absorbed_luminosity = -np.sum(self.montecarlo_luminosity.value[~emitted_filter & luminosity_wavelength_filter]) \
* self.montecarlo_luminosity.unit
updated_t_inner = self.t_inner \
* (emitted_luminosity / self.tardis_config.supernova.luminosity_requested).to(1).value \
** convergence_section.t_inner_update_exponent
#updated_t_inner = np.max([np.min([updated_t_inner, 30000]), 3000])
convergence_t_rads = (abs(old_t_rads - updated_t_rads) / updated_t_rads).value
convergence_ws = (abs(old_ws - updated_ws) / updated_ws)
convergence_t_inner = (abs(old_t_inner - updated_t_inner) / updated_t_inner).value
if convergence_section.type == 'damped' or convergence_section.type == 'specific':
self.t_rads += convergence_section.t_rad.damping_constant * (updated_t_rads - self.t_rads)
self.ws += convergence_section.w.damping_constant * (updated_ws - self.ws)
if self.t_inner_update.next():
t_inner_new = self.t_inner + convergence_section.t_inner.damping_constant * (updated_t_inner - self.t_inner)
else:
t_inner_new = self.t_inner
if convergence_section.type == 'specific':
t_rad_converged = (float(np.sum(convergence_t_rads < convergence_section.t_rad['threshold'])) \
/ self.tardis_config.structure.no_of_shells) >= convergence_section.t_rad['fraction']
w_converged = (float(np.sum(convergence_t_rads < convergence_section.w['threshold'])) \
/ self.tardis_config.structure.no_of_shells) >= convergence_section.w['fraction']
t_inner_converged = convergence_t_inner < convergence_section.t_inner['threshold']
if t_rad_converged and t_inner_converged and w_converged:
if not self.converged:
self.converged = True
self.iterations_remaining = self.global_convergence_parameters['hold']
else:
if self.converged:
self.iterations_remaining = self.iterations_max_requested - self.iterations_executed
self.converged = False
self.temperature_logging = pd.DataFrame(
{'t_rads': old_t_rads.value, 'updated_t_rads': updated_t_rads.value,
'converged_t_rads': convergence_t_rads, 'new_trads': self.t_rads.value, 'ws': old_ws,
'updated_ws': updated_ws, 'converged_ws': convergence_ws,
'new_ws': self.ws})
self.temperature_logging.index.name = 'Shell'
temperature_logging = str(self.temperature_logging[::log_sampling])
temperature_logging = ''.join(['\t%s\n' % item for item in temperature_logging.split('\n')])
logger.info('Plasma stratification:\n%s\n', temperature_logging)
logger.info("Luminosity emitted = %.5e Luminosity absorbed = %.5e Luminosity requested = %.5e",
emitted_luminosity.value, absorbed_luminosity.value,
self.tardis_config.supernova.luminosity_requested.value)
logger.info('Calculating new t_inner = %.3f', updated_t_inner.value)
return t_inner_new
def simulate(self, update_radiation_field=True, enable_virtual=False, initialize_j_blues=False,
initialize_nlte=False):
"""
Run a simulation
"""
if update_radiation_field:
t_inner_new = self.update_radiationfield()
else:
t_inner_new = self.t_inner
self.calculate_j_blues(init_detailed_j_blues=initialize_j_blues)
self.update_plasmas(initialize_nlte=initialize_nlte)
self.t_inner = t_inner_new
self.packet_src.create_packets(self.current_no_of_packets, self.t_inner.value)
if enable_virtual:
no_of_virtual_packets = self.tardis_config.montecarlo.no_of_virtual_packets
else:
no_of_virtual_packets = 0
if np.any(np.isnan(self.plasma_array.tau_sobolevs.values)) or np.any(np.isinf(self.plasma_array.tau_sobolevs.values)) \
or np.any(np.isneginf(self.plasma_array.tau_sobolevs.values)):
raise ValueError('Some tau_sobolevs are nan, inf, -inf in tau_sobolevs. Something went wrong!')
self.j_blue_estimators = np.zeros((len(self.t_rads), len(self.atom_data.lines)))
self.montecarlo_virtual_luminosity = np.zeros_like(self.spectrum.frequency.value)
montecarlo_nu, montecarlo_energies, self.j_estimators, self.nubar_estimators, \
last_line_interaction_in_id, last_line_interaction_out_id, \
self.last_interaction_type, self.last_line_interaction_shell_id = \
montecarlo.montecarlo_radial1d(self,
virtual_packet_flag=no_of_virtual_packets)
if np.sum(montecarlo_energies < 0) == len(montecarlo_energies):
logger.critical("No r-packet escaped through the outer boundary.")
self.montecarlo_nu = montecarlo_nu * u.Hz
self.montecarlo_luminosity = montecarlo_energies * 1 * u.erg / self.time_of_simulation
montecarlo_reabsorbed_luminosity = -np.histogram(self.montecarlo_nu.value[self.montecarlo_luminosity.value < 0],
weights=self.montecarlo_luminosity.value[self.montecarlo_luminosity.value < 0],
bins=self.tardis_config.spectrum.frequency.value)[0] \
* self.montecarlo_luminosity.unit
montecarlo_emitted_luminosity = np.histogram(self.montecarlo_nu.value[self.montecarlo_luminosity.value >= 0],
weights=self.montecarlo_luminosity.value[self.montecarlo_luminosity.value >= 0],
bins=self.tardis_config.spectrum.frequency.value)[0] \
* self.montecarlo_luminosity.unit
self.spectrum.update_luminosity(montecarlo_emitted_luminosity)
self.spectrum_reabsorbed.update_luminosity(montecarlo_reabsorbed_luminosity)
if no_of_virtual_packets > 0:
self.montecarlo_virtual_luminosity = self.montecarlo_virtual_luminosity \
* 1 * u.erg / self.time_of_simulation
self.spectrum_virtual.update_luminosity(self.montecarlo_virtual_luminosity)
self.last_line_interaction_in_id = self.atom_data.lines_index.index.values[last_line_interaction_in_id]
self.last_line_interaction_in_id = self.last_line_interaction_in_id[last_line_interaction_in_id != -1]
self.last_line_interaction_out_id = self.atom_data.lines_index.index.values[last_line_interaction_out_id]
self.last_line_interaction_out_id = self.last_line_interaction_out_id[last_line_interaction_out_id != -1]
self.last_line_interaction_angstrom = self.montecarlo_nu[last_line_interaction_in_id != -1].to('angstrom',
u.spectral())
self.iterations_executed += 1
self.iterations_remaining -= 1
if self.gui is not None:
self.gui.update_data(self)
self.gui.show()
def save_spectra(self, fname):
self.spectrum.to_ascii(fname)
self.spectrum_virtual.to_ascii('virtual_' + fname)
def to_hdf5(self, buffer_or_fname, path='', close_h5=True):
"""
This allows the model to be written to an HDF5 file for later analysis. Currently, the saved properties
are specified hard coded in include_from_model_in_hdf5. This is a dict where the key corresponds to the
name of the property and the value describes the type. If the value is None the property can be dumped
to hdf via its attribute to_hdf or by converting it to a pd.DataFrame. For more complex properties
which can not simply be dumped to an hdf file the dict can contain a function which is called with
the parameters key, path, and hdf_store. This function then should dump the data to the given
hdf_store object. To dump properties of sub-properties of the model, you can use a dict as value.
This dict is then treated in the same way as described above.
Parameters
----------
buffer_or_fname: buffer or ~str
buffer or filename for HDF5 file (see pandas.HDFStore for description)
path: ~str, optional
path in the HDF5 file
close_h5: ~bool
close the HDF5 file or not.
"""
# Functions to save properties of the model without to_hdf attribute and no simple conversion to a pd.DataFrame.
#This functions are always called with the parameters key, path and, hdf_store.
def _save_luminosity_density(key, path, hdf_store):
luminosity_density = pd.DataFrame.from_dict(dict(wave=self.spectrum.wavelength.value,
flux=self.spectrum.luminosity_density_lambda.value))
luminosity_density.to_hdf(hdf_store, os.path.join(path, key))
def _save_spectrum_virtual(key, path, hdf_store):
if self.spectrum_virtual.luminosity_density_lambda is not None:
luminosity_density_virtual = pd.DataFrame.from_dict(dict(wave=self.spectrum_virtual.wavelength.value,
flux=self.spectrum_virtual.luminosity_density_lambda.value))
luminosity_density_virtual.to_hdf(hdf_store, os.path.join(path, key))
def _save_configuration_dict(key, path, hdf_store):
configuration_dict = dict(t_inner=self.t_inner.value)
configuration_dict_path = os.path.join(path, 'configuration')
pd.Series(configuration_dict).to_hdf(hdf_store, configuration_dict_path)
include_from_plasma_ = {'level_populations': None, 'ion_populations': None, 'tau_sobolevs': None,
'electron_densities': None,
't_rads': None, 'ws': None}
include_from_model_in_hdf5 = {'plasma_array': include_from_plasma_, 'j_blues': None,
'last_line_interaction_in_id': None,
'last_line_interaction_out_id': None,
'last_line_interaction_shell_id': None, 'montecarlo_nu': None,
'luminosity_density': _save_luminosity_density,
'luminosity_density_virtual': _save_spectrum_virtual,
'configuration_dict': _save_configuration_dict,
'last_line_interaction_angstrom': None}
if isinstance(buffer_or_fname, basestring):
hdf_store = pd.HDFStore(buffer_or_fname)
elif isinstance(buffer_or_fname, pd.HDFStore):
hdf_store = buffer_or_fname
else:
raise IOError('Please specify either a filename or an HDFStore')
logger.info('Writing to path %s', path)
def _get_hdf5_path(path, property_name):
return os.path.join(path, property_name)
def _to_smallest_pandas(object):
try:
return pd.Series(object)
except Exception:
return pd.DataFrame(object)
def _save_model_property(object, property_name, path, hdf_store):
property_path = _get_hdf5_path(path, property_name)
try:
object.to_hdf(hdf_store, property_path)
except AttributeError:
_to_smallest_pandas(object).to_hdf(hdf_store, property_path)
for key in include_from_model_in_hdf5:
if include_from_model_in_hdf5[key] is None:
_save_model_property(getattr(self, key), key, path, hdf_store)
elif callable(include_from_model_in_hdf5[key]):
include_from_model_in_hdf5[key](key, path, hdf_store)
else:
try:
for subkey in include_from_model_in_hdf5[key]:
if include_from_model_in_hdf5[key][subkey] is None:
_save_model_property(getattr(getattr(self, key), subkey), subkey, os.path.join(path, key),
hdf_store)
elif callable(include_from_model_in_hdf5[key][subkey]):
include_from_model_in_hdf5[key][subkey](subkey, os.path.join(path, key), hdf_store)
else:
logger.critical('Can not save %s', str(os.path.join(path, key, subkey)))
except:
logger.critical('An error occurred while dumping %s to HDF.', str(os.path.join(path, key)))
hdf_store.flush()
if close_h5:
hdf_store.close()
else:
return hdf_store
class TARDISSpectrum(object):
"""
TARDIS Spectrum object
"""
def __init__(self, frequency, distance=None):
self._frequency = frequency
self.wavelength = self.frequency.to('angstrom', u.spectral())
self.distance = distance
self.delta_frequency = frequency[1] - frequency[0]
self._flux_nu = np.zeros_like(frequency.value) * u.Unit('erg / (s Hz cm^2)')
self._flux_lambda = np.zeros_like(frequency.value) * u.Unit('erg / (s Angstrom cm^2)')
self.luminosity_density_nu = np.zeros_like(self.frequency) * u.Unit('erg / (s Hz)')
self.luminosity_density_lambda = np.zeros_like(self.frequency) * u.Unit('erg / (s Angstrom)')
@property
def frequency(self):
return self._frequency[:-1]
@property
def flux_nu(self):
if self.distance is None:
raise AttributeError('supernova distance not supplied - flux calculation impossible')
else:
return self._flux_nu
@property
def flux_lambda(self):
if self.distance is None:
raise AttributeError('supernova distance not supplied - flux calculation impossible')
return self._flux_lambda
def update_luminosity(self, spectrum_luminosity):
self.luminosity_density_nu = (spectrum_luminosity / self.delta_frequency).to('erg / (s Hz)')
self.luminosity_density_lambda = self.f_nu_to_f_lambda(self.luminosity_density_nu.value) \
* u.Unit('erg / (s Angstrom)')
if self.distance is not None:
self._flux_nu = (self.luminosity_density_nu / (4 * np.pi * self.distance.to('cm')**2))
self._flux_lambda = self.f_nu_to_f_lambda(self.flux_nu.value) * u.Unit('erg / (s Angstrom cm^2)')
def f_nu_to_f_lambda(self, f_nu):
return f_nu * self.frequency.value**2 / constants.c.cgs.value / 1e8
def plot(self, ax, mode='wavelength'):
if mode == 'wavelength':
ax.plot(self.wavelength.value, self.flux_lambda.value)
ax.set_xlabel('Wavelength [%s]' % self.wavelength.unit._repr_latex_())
ax.set_ylabel('Flux [%s]' % self.flux_lambda.unit._repr_latex_())
def to_ascii(self, fname, mode='luminosity_density'):
if mode == 'luminosity_density':
np.savetxt(fname, zip(self.wavelength.value, self.luminosity_density_lambda.value))
elif mode == 'flux':
np.savetxt(fname, zip(self.wavelength.value, self.flux_lambda.value))
else:
raise NotImplementedError('only mode "luminosity_density" and "flux" are implemented')
| bsd-3-clause |
btabibian/scikit-learn | sklearn/utils/tests/test_class_weight.py | 55 | 9891 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
# Fix exception in error message formatting when missing label is a string
# https://github.com/scikit-learn/scikit-learn/issues/8312
assert_raise_message(ValueError,
'Class label label_not_present not present',
compute_class_weight,
{'label_not_present': 1.}, classes, y)
# Raise error when y has items not in classes
classes = np.arange(2)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
assert_raises(ValueError, compute_class_weight, {0: 1., 1: 2.}, classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# duplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_balanced_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_balanced_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777,
0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "balanced" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
marcharper/python-ternary | readme_images/heatmap_styles.py | 2 | 2474 | from __future__ import division
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import ternary
SQRT3OVER2 = np.sqrt(3) / 2
def project(p):
# project using the same transformation that was used for the triangles
a, b, c = p
x = a/2 + b
y = SQRT3OVER2 * a
return (x, y)
def matplotlib_plot(scale, cmap, filename=None):
points = list(ternary.helpers.simplex_iterator(scale))
xs, ys = zip(*map(project, points))
values = range(len(points))
f, axes = plt.subplots(1,3, figsize=(8.5, 4.5))
styles = ['triangular', 'dual-triangular', 'hexagonal']
ticks_list = [range(scale + 1), range(scale + 2), range(scale + 1)]
shift = True
for ax, style, ticks in zip(axes, styles, ticks_list):
ax.set_aspect('equal')
ax.set_title(style)
ternary.heatmap(dict(zip(points, values)),
scale=scale, ax=ax,
cmap=cmap, vmax=len(points) + 1,
style=style, colorbar=False)
if style == 'dual-triangular' and shift:
xvals = np.array(xs) + .5
yvals = np.array(ys) + 1/3
else:
xvals = xs
yvals = ys
ax.scatter(xvals, yvals, s=150, c='c', zorder=3)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
for x, y, value in zip(xvals, yvals, values):
ax.text(x, y, str(value),
fontsize=8,
horizontalalignment='center',
verticalalignment='center')
# Colorbar
f.tight_layout()
cbax = f.add_axes([0.025, 0.1, 0.95, 0.10])
norm = mpl.colors.Normalize(vmin=0, vmax=len(points))
ticks = np.linspace(0, len(points), num=len(points) + 1)
cb1 = mpl.colorbar.ColorbarBase(cbax, cmap=cmap,
norm=norm,
orientation='horizontal')
cb1.set_ticks(ticks)
if filename is not None:
plt.savefig(filename)
return ax
if __name__ == '__main__':
import subprocess
scale = 3
cmaps = [plt.cm.gray, plt.cm.cubehelix]
basename = 'heatmap_styles_{}.pdf'
filenames = [basename.format(cmap.name) for cmap in cmaps]
cmd = 'convert -density 300 -trim {} -quality 100 {}'
for cmap, pdf in zip(cmaps, filenames):
png = pdf[:-3] + 'png'
matplotlib_plot(scale, cmap, filename=pdf)
subprocess.call(cmd.format(pdf, png), shell=True)
| mit |
Titan-C/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
JasonKessler/scattertext | scattertext/CorpusDF.py | 1 | 2761 | import pandas as pd
from scattertext.DataFrameCorpus import DataFrameCorpus
class CorpusDF(DataFrameCorpus):
def __init__(self,
df,
X,
mX,
y,
text_col,
term_idx_store,
category_idx_store,
metadata_idx_store,
unigram_frequency_path=None):
'''
Parameters
----------
X : csr_matrix
term document matrix
mX : csr_matrix
metadata-document matrix
y : np.array
category index array
term_idx_store : IndexStore
Term indices
category_idx_store : IndexStore
Catgory indices
metadata_idx_store : IndexStore
Document metadata indices
text_col: np.array or pd.Series
Raw texts
unigram_frequency_path : str or None
Path to term frequency file.
'''
self._text_col = text_col
DataFrameCorpus.__init__(self,
X,
mX,
y,
term_idx_store,
category_idx_store,
metadata_idx_store,
df[text_col],
df,
unigram_frequency_path)
def get_texts(self):
'''
Returns
-------
pd.Series, all raw documents
'''
return self._df[self._text_col]
def _make_new_term_doc_matrix(self,
new_X=None,
new_mX=None,
new_y=None,
new_term_idx_store=None,
new_category_idx_store=None,
new_metadata_idx_store=None,
new_y_mask=None,
new_df=None):
X, mX, y = self._update_X_mX_y(new_X, new_mX, new_y, new_y_mask)
return CorpusDF(
df=self._apply_mask_to_df(new_y_mask, new_df),
X=X,
mX=mX,
y=y,
term_idx_store=new_term_idx_store if new_term_idx_store is not None else self._term_idx_store,
category_idx_store=new_category_idx_store if new_category_idx_store is not None else self._category_idx_store,
metadata_idx_store=new_metadata_idx_store if new_metadata_idx_store is not None else self._metadata_idx_store,
text_col=self._text_col,
unigram_frequency_path=self._unigram_frequency_path
)
| apache-2.0 |
thorwhalen/ut | dacc/mong/com.py | 1 | 4599 | __author__ = 'thorwhalen'
import pymongo as mg
import pandas as pd
from ut.util.imports.ipython_utils import PPR
from ut.daf.to import dict_list_of_rows
from ut.daf.manip import rm_cols_if_present
from ut.daf.ch import to_utf8
def mdb_info(mg_element=None):
if mdb_info is None:
return mdb_info(mg.MongoClient())
else:
if isinstance(mg_element, mg.MongoClient):
return {dbname: mdb_info(getattr(mg_element, dbname)) for dbname in mg_element.database_names()}
elif isinstance(mg_element, mg.database.Database):
return {coll_name: getattr(mg_element, coll_name).count() for coll_name in mg_element.collection_names()}
def get_db(db_name='test-database'):
import pymongo as mg
connection = mg.MongoClient()
db = connection[db_name]
return db
class MongoStruct:
def __init__(self, obj=None):
"""
MongoStruct() assigns MongoClient() to .obj
MongoStruct(mongo_client) assigns the mongo_client to .obj
MongoStruct(database) assigns the database to .obj
MongStruct(database_name) assigns the
"""
self.obj = obj or mg.MongoClient()
# if isinstance(self.obj, mg.MongoClient):
# for dbname in self.obj.database_names():
# setattr(self, dbname, MongoStruct(self.obj[dbname]))
# elif isinstance(self.obj, mg.database.Database):
# for coll_name in self.obj.collection_names():
# setattr(self, coll_name, self.obj[coll_name])
if isinstance(self.obj, str):
self.obj = getattr(mg.MongoClient(), self.obj)
self.refresh()
def __getitem__(self, val):
return self.__dict__[val]
def __str__(self):
return '{}'.format(str(', '.join('%s : %s' % (k, repr(v)) for (k, v) in self.__dict__.items())))
def __repr__(self):
return PPR.format_str(mdb_info(self.obj))
def refresh(self):
if isinstance(self.obj, mg.MongoClient):
for dbname in self.obj.database_names():
setattr(self, dbname, MongoStruct(self.obj[dbname]))
elif isinstance(self.obj, mg.database.Database):
for coll_name in self.obj.collection_names():
setattr(self, coll_name, self.obj[coll_name])
# elif isinstance(self.obj, mg.collection.Collection):
# for coll_name in self.obj.collection_names():
# setattr(self, coll_name, self.obj[coll_name])
def create_collection_ignore_if_exists(self, collection_name):
if not isinstance(self.obj, mg.database.Database):
raise ValueError("self.obj must be a database to do that!")
try:
self.obj.create_collection(collection_name)
self.refresh()
except Exception:
pass
def recreate_collection(self, collection_name):
if not isinstance(self.obj, mg.database.Database):
raise ValueError("self.obj must be a database to do that!")
try:
self.obj.drop_collection(collection_name)
except Exception:
pass
try:
self.obj.create_collection(collection_name)
except Exception:
pass
self.refresh()
@staticmethod
def get_dict_with_key_from_collection(key, collection):
try:
return collection.find_one({key: {'$exists': True}}).get(key)
except AttributeError:
return None
@staticmethod
def insert_df(df, collection, delete_previous_contents=False, dropna=False, **kwargs):
"""
insert the rows of the dataframe df (as dicts) in the given collection.
If you want to do it given a mongo_db and a collection_name:
insert_in_mongdb(df, getattr(mongo_db, collection_name), **kwargs):
If you want to do it given (a client, and...) a db name and collection name:
insert_in_mongdb(df, getattr(getattr(client, db_name), collection_name), **kwargs):
"""
if delete_previous_contents:
collection_name = collection.name
mother_db = collection.database
mother_db.drop_collection(collection_name)
mother_db.create_collection(collection_name)
kwargs = dict(kwargs, **{'w': 0}) # default is w=0 (no replicas)
if kwargs.get('to_utf8'):
to_utf8(df, columns=df.columns, inplace=True)
collection.insert(dict_list_of_rows(df, dropna=dropna), **kwargs)
@staticmethod
def to_df(cursor):
return rm_cols_if_present(pd.DataFrame(list(cursor)), ['_id'])
| mit |
EmreAtes/spack | var/spack/repos/builtin/packages/py-pybedtools/package.py | 5 | 2071 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyPybedtools(PythonPackage):
"""pybedtools wraps and extends BEDTools and offers
feature-level manipulations from within Python."""
homepage = "http://daler.github.io/pybedtools"
url = "https://pypi.io/packages/source/p/pybedtools/pybedtools-0.7.10.tar.gz"
version('0.7.10', 'f003c67e22c48b77f070538368ece70c')
version('0.6.9', 'b7df049036422d8c6951412a90e83dca')
depends_on('py-setuptools', type='build')
depends_on('bedtools2', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-pandas', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'), when='@0.7.0:')
depends_on('[email protected]', type=('build', 'run'), when='@0.6.9')
depends_on('py-six', type=('build', 'run'))
| lgpl-2.1 |
darshanthaker/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_qt.py | 69 | 16846 | from __future__ import division
import math
import os
import sys
import matplotlib
from matplotlib import verbose
from matplotlib.cbook import is_string_like, onetrue
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.widgets import SubplotTool
try:
import qt
except ImportError:
raise ImportError("Qt backend requires pyqt to be installed.")
backend_version = "0.9.1"
def fn_name(): return sys._getframe(1).f_code.co_name
DEBUG = False
cursord = {
cursors.MOVE : qt.Qt.PointingHandCursor,
cursors.HAND : qt.Qt.WaitCursor,
cursors.POINTER : qt.Qt.ArrowCursor,
cursors.SELECT_REGION : qt.Qt.CrossCursor,
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager != None:
figManager.canvas.draw()
def _create_qApp():
"""
Only one qApp can exist at a time, so check before creating one
"""
if qt.QApplication.startingUp():
if DEBUG: print "Starting up QApplication"
global qApp
qApp = qt.QApplication( [" "] )
qt.QObject.connect( qApp, qt.SIGNAL( "lastWindowClosed()" ),
qApp, qt.SLOT( "quit()" ) )
#remember that matplotlib created the qApp - will be used by show()
_create_qApp.qAppCreatedHere = True
_create_qApp.qAppCreatedHere = False
def show():
"""
Show all the figures and enter the qt main loop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.window.show()
if DEBUG: print 'Inside show'
figManager = Gcf.get_active()
if figManager != None:
figManager.canvas.draw()
if _create_qApp.qAppCreatedHere:
qt.qApp.exec_loop()
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasQT( thisFig )
manager = FigureManagerQT( canvas, num )
return manager
class FigureCanvasQT( qt.QWidget, FigureCanvasBase ):
keyvald = { qt.Qt.Key_Control : 'control',
qt.Qt.Key_Shift : 'shift',
qt.Qt.Key_Alt : 'alt',
}
# left 1, middle 2, right 3
buttond = {1:1, 2:3, 4:2}
def __init__( self, figure ):
if DEBUG: print 'FigureCanvasQt: ', figure
_create_qApp()
qt.QWidget.__init__( self, None, "QWidget figure" )
FigureCanvasBase.__init__( self, figure )
self.figure = figure
self.setMouseTracking( True )
w,h = self.get_width_height()
self.resize( w, h )
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, event)
def leaveEvent(self, event):
FigureCanvasBase.leave_notify_event(self, event)
def mousePressEvent( self, event ):
x = event.pos().x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.pos().y()
button = self.buttond[event.button()]
FigureCanvasBase.button_press_event( self, x, y, button )
if DEBUG: print 'button pressed:', event.button()
def mouseMoveEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
FigureCanvasBase.motion_notify_event( self, x, y )
if DEBUG: print 'mouse move'
def mouseReleaseEvent( self, event ):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
button = self.buttond[event.button()]
FigureCanvasBase.button_release_event( self, x, y, button )
if DEBUG: print 'button released'
def keyPressEvent( self, event ):
key = self._get_key( event )
FigureCanvasBase.key_press_event( self, key )
if DEBUG: print 'key press', key
def keyReleaseEvent( self, event ):
key = self._get_key(event)
FigureCanvasBase.key_release_event( self, key )
if DEBUG: print 'key release', key
def resizeEvent( self, event ):
if DEBUG: print 'resize (%d x %d)' % (event.size().width(), event.size().height())
qt.QWidget.resizeEvent( self, event )
w = event.size().width()
h = event.size().height()
if DEBUG: print "FigureCanvasQt.resizeEvent(", w, ",", h, ")"
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches( winch, hinch )
self.draw()
def resize( self, w, h ):
# Pass through to Qt to resize the widget.
qt.QWidget.resize( self, w, h )
# Resize the figure by converting pixels to inches.
pixelPerInch = self.figure.dpi
wInch = w / pixelPerInch
hInch = h / pixelPerInch
self.figure.set_size_inches( wInch, hInch )
# Redraw everything.
self.draw()
def sizeHint( self ):
w, h = self.get_width_height()
return qt.QSize( w, h )
def minumumSizeHint( self ):
return qt.QSize( 10, 10 )
def _get_key( self, event ):
if event.key() < 256:
key = event.text().latin1()
elif event.key() in self.keyvald.has_key:
key = self.keyvald[ event.key() ]
else:
key = None
return key
def flush_events(self):
qt.qApp.processEvents()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerQT( FigureManagerBase ):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The qt.QToolBar
window : The qt.QMainWindow
"""
def __init__( self, canvas, num ):
if DEBUG: print 'FigureManagerQT.%s' % fn_name()
FigureManagerBase.__init__( self, canvas, num )
self.canvas = canvas
self.window = qt.QMainWindow( None, None, qt.Qt.WDestructiveClose )
self.window.closeEvent = self._widgetCloseEvent
centralWidget = qt.QWidget( self.window )
self.canvas.reparent( centralWidget, qt.QPoint( 0, 0 ) )
# Give the keyboard focus to the figure instead of the manager
self.canvas.setFocusPolicy( qt.QWidget.ClickFocus )
self.canvas.setFocus()
self.window.setCaption( "Figure %d" % num )
self.window._destroying = False
self.toolbar = self._get_toolbar(self.canvas, centralWidget)
# Use a vertical layout for the plot and the toolbar. Set the
# stretch to all be in the plot so the toolbar doesn't resize.
self.layout = qt.QVBoxLayout( centralWidget )
self.layout.addWidget( self.canvas, 1 )
if self.toolbar:
self.layout.addWidget( self.toolbar, 0 )
self.window.setCentralWidget( centralWidget )
# Reset the window height so the canvas will be the right
# size. This ALMOST works right. The first issue is that the
# height w/ a toolbar seems to be off by just a little bit (so
# we add 4 pixels). The second is that the total width/height
# is slightly smaller that we actually want. It seems like
# the border of the window is being included in the size but
# AFAIK there is no way to get that size.
w = self.canvas.width()
h = self.canvas.height()
if self.toolbar:
h += self.toolbar.height() + 4
self.window.resize( w, h )
if matplotlib.is_interactive():
self.window.show()
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.window.show()
def notify_axes_change( fig ):
# This will be called whenever the current axes is changed
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver( notify_axes_change )
def _widgetclosed( self ):
if self.window._destroying: return
self.window._destroying = True
Gcf.destroy(self.num)
def _widgetCloseEvent( self, event ):
self._widgetclosed()
qt.QWidget.closeEvent( self.window, event )
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'classic':
print "Classic toolbar is not yet supported"
elif matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent)
else:
toolbar = None
return toolbar
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height)
def destroy( self, *args ):
if self.window._destroying: return
self.window._destroying = True
if self.toolbar: self.toolbar.destroy()
if DEBUG: print "destroy figure manager"
self.window.close(True)
def set_window_title(self, title):
self.window.setCaption(title)
class NavigationToolbar2QT( NavigationToolbar2, qt.QWidget ):
# list of toolitems to add to the toolbar, format is:
# text, tooltip_text, image_file, callback(str)
toolitems = (
('Home', 'Reset original view', 'home.ppm', 'home'),
('Back', 'Back to previous view','back.ppm', 'back'),
('Forward', 'Forward to next view','forward.ppm', 'forward'),
(None, None, None, None),
('Pan', 'Pan axes with left mouse, zoom with right', 'move.ppm', 'pan'),
('Zoom', 'Zoom to rectangle','zoom_to_rect.ppm', 'zoom'),
(None, None, None, None),
('Subplots', 'Configure subplots','subplots.png', 'configure_subplots'),
('Save', 'Save the figure','filesave.ppm', 'save_figure'),
)
def __init__( self, canvas, parent ):
self.canvas = canvas
self.buttons = {}
qt.QWidget.__init__( self, parent )
# Layout toolbar buttons horizontally.
self.layout = qt.QHBoxLayout( self )
self.layout.setMargin( 2 )
NavigationToolbar2.__init__( self, canvas )
def _init_toolbar( self ):
basedir = os.path.join(matplotlib.rcParams[ 'datapath' ],'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text == None:
self.layout.addSpacing( 8 )
continue
fname = os.path.join( basedir, image_file )
image = qt.QPixmap()
image.load( fname )
button = qt.QPushButton( qt.QIconSet( image ), "", self )
qt.QToolTip.add( button, tooltip_text )
self.buttons[ text ] = button
# The automatic layout doesn't look that good - it's too close
# to the images so add a margin around it.
margin = 4
button.setFixedSize( image.width()+margin, image.height()+margin )
qt.QObject.connect( button, qt.SIGNAL( 'clicked()' ),
getattr( self, callback ) )
self.layout.addWidget( button )
self.buttons[ 'Pan' ].setToggleButton( True )
self.buttons[ 'Zoom' ].setToggleButton( True )
# Add the x,y location widget at the right side of the toolbar
# The stretch factor is 1 which means any resizing of the toolbar
# will resize this label instead of the buttons.
self.locLabel = qt.QLabel( "", self )
self.locLabel.setAlignment( qt.Qt.AlignRight | qt.Qt.AlignVCenter )
self.locLabel.setSizePolicy(qt.QSizePolicy(qt.QSizePolicy.Ignored,
qt.QSizePolicy.Ignored))
self.layout.addWidget( self.locLabel, 1 )
# reference holder for subplots_adjust window
self.adj_window = None
def destroy( self ):
for text, tooltip_text, image_file, callback in self.toolitems:
if text is not None:
qt.QObject.disconnect( self.buttons[ text ],
qt.SIGNAL( 'clicked()' ),
getattr( self, callback ) )
def pan( self, *args ):
self.buttons[ 'Zoom' ].setOn( False )
NavigationToolbar2.pan( self, *args )
def zoom( self, *args ):
self.buttons[ 'Pan' ].setOn( False )
NavigationToolbar2.zoom( self, *args )
def dynamic_update( self ):
self.canvas.draw()
def set_message( self, s ):
self.locLabel.setText( s )
def set_cursor( self, cursor ):
if DEBUG: print 'Set cursor' , cursor
qt.QApplication.restoreOverrideCursor()
qt.QApplication.setOverrideCursor( qt.QCursor( cursord[cursor] ) )
def draw_rubberband( self, event, x0, y0, x1, y1 ):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [ int(val)for val in min(x0,x1), min(y0, y1), w, h ]
self.canvas.drawRectangle( rect )
def configure_subplots(self):
self.adj_window = qt.QMainWindow(None, None, qt.Qt.WDestructiveClose)
win = self.adj_window
win.setCaption("Subplot Configuration Tool")
toolfig = Figure(figsize=(6,3))
toolfig.subplots_adjust(top=0.9)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
canvas = self._get_canvas(toolfig)
tool = SubplotTool(self.canvas.figure, toolfig)
centralWidget = qt.QWidget(win)
canvas.reparent(centralWidget, qt.QPoint(0, 0))
win.setCentralWidget(centralWidget)
layout = qt.QVBoxLayout(centralWidget)
layout.addWidget(canvas, 1)
win.resize(w, h)
canvas.setFocus()
win.show()
def _get_canvas(self, fig):
return FigureCanvasQT(fig)
def save_figure( self ):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
default_filetype = self.canvas.get_default_filetype()
start = "image." + default_filetype
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname = qt.QFileDialog.getSaveFileName(
start, filters, self, "Save image", "Choose a filename to save to",
selectedFilter)
if fname:
try:
self.canvas.print_figure( unicode(fname) )
except Exception, e:
qt.QMessageBox.critical(
self, "Error saving file", str(e),
qt.QMessageBox.Ok, qt.QMessageBox.NoButton)
def set_history_buttons( self ):
canBackward = ( self._views._pos > 0 )
canForward = ( self._views._pos < len( self._views._elements ) - 1 )
self.buttons[ 'Back' ].setEnabled( canBackward )
self.buttons[ 'Forward' ].setEnabled( canForward )
# set icon used when windows are minimized
try:
# TODO: This is badly broken
qt.window_set_default_icon_from_file (
os.path.join( matplotlib.rcParams['datapath'], 'images', 'matplotlib.svg' ) )
except:
verbose.report( 'Could not load matplotlib icon: %s' % sys.exc_info()[1] )
def error_msg_qt( msg, parent=None ):
if not is_string_like( msg ):
msg = ','.join( map( str,msg ) )
qt.QMessageBox.warning( None, "Matplotlib", msg, qt.QMessageBox.Ok )
def exception_handler( type, value, tb ):
"""Handle uncaught exceptions
It does not catch SystemExit
"""
msg = ''
# get the filename attribute if available (for IOError)
if hasattr(value, 'filename') and value.filename != None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror != None:
msg += value.strerror
else:
msg += str(value)
if len( msg ) : error_msg_qt( msg )
FigureManager = FigureManagerQT
| agpl-3.0 |
harisbal/pandas | pandas/tests/indexing/test_callable.py | 4 | 8722 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import numpy as np
import pandas as pd
import pandas.util.testing as tm
class TestIndexingCallable(object):
def test_frame_loc_ix_callable(self):
# GH 11485
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': list('aabb'),
'C': [1, 2, 3, 4]})
# iloc cannot use boolean Series (see GH3635)
# return bool indexer
res = df.loc[lambda x: x.A > 2]
tm.assert_frame_equal(res, df.loc[df.A > 2])
res = df.loc[lambda x: x.A > 2]
tm.assert_frame_equal(res, df.loc[df.A > 2])
res = df.loc[lambda x: x.A > 2, ]
tm.assert_frame_equal(res, df.loc[df.A > 2, ])
res = df.loc[lambda x: x.A > 2, ]
tm.assert_frame_equal(res, df.loc[df.A > 2, ])
res = df.loc[lambda x: x.B == 'b', :]
tm.assert_frame_equal(res, df.loc[df.B == 'b', :])
res = df.loc[lambda x: x.B == 'b', :]
tm.assert_frame_equal(res, df.loc[df.B == 'b', :])
res = df.loc[lambda x: x.A > 2, lambda x: x.columns == 'B']
tm.assert_frame_equal(res, df.loc[df.A > 2, [False, True, False]])
res = df.loc[lambda x: x.A > 2, lambda x: x.columns == 'B']
tm.assert_frame_equal(res, df.loc[df.A > 2, [False, True, False]])
res = df.loc[lambda x: x.A > 2, lambda x: 'B']
tm.assert_series_equal(res, df.loc[df.A > 2, 'B'])
res = df.loc[lambda x: x.A > 2, lambda x: 'B']
tm.assert_series_equal(res, df.loc[df.A > 2, 'B'])
res = df.loc[lambda x: x.A > 2, lambda x: ['A', 'B']]
tm.assert_frame_equal(res, df.loc[df.A > 2, ['A', 'B']])
res = df.loc[lambda x: x.A > 2, lambda x: ['A', 'B']]
tm.assert_frame_equal(res, df.loc[df.A > 2, ['A', 'B']])
res = df.loc[lambda x: x.A == 2, lambda x: ['A', 'B']]
tm.assert_frame_equal(res, df.loc[df.A == 2, ['A', 'B']])
res = df.loc[lambda x: x.A == 2, lambda x: ['A', 'B']]
tm.assert_frame_equal(res, df.loc[df.A == 2, ['A', 'B']])
# scalar
res = df.loc[lambda x: 1, lambda x: 'A']
assert res == df.loc[1, 'A']
res = df.loc[lambda x: 1, lambda x: 'A']
assert res == df.loc[1, 'A']
def test_frame_loc_ix_callable_mixture(self):
# GH 11485
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': list('aabb'),
'C': [1, 2, 3, 4]})
res = df.loc[lambda x: x.A > 2, ['A', 'B']]
tm.assert_frame_equal(res, df.loc[df.A > 2, ['A', 'B']])
res = df.loc[lambda x: x.A > 2, ['A', 'B']]
tm.assert_frame_equal(res, df.loc[df.A > 2, ['A', 'B']])
res = df.loc[[2, 3], lambda x: ['A', 'B']]
tm.assert_frame_equal(res, df.loc[[2, 3], ['A', 'B']])
res = df.loc[[2, 3], lambda x: ['A', 'B']]
tm.assert_frame_equal(res, df.loc[[2, 3], ['A', 'B']])
res = df.loc[3, lambda x: ['A', 'B']]
tm.assert_series_equal(res, df.loc[3, ['A', 'B']])
res = df.loc[3, lambda x: ['A', 'B']]
tm.assert_series_equal(res, df.loc[3, ['A', 'B']])
def test_frame_loc_callable(self):
# GH 11485
df = pd.DataFrame({'X': [1, 2, 3, 4],
'Y': list('aabb')},
index=list('ABCD'))
# return label
res = df.loc[lambda x: ['A', 'C']]
tm.assert_frame_equal(res, df.loc[['A', 'C']])
res = df.loc[lambda x: ['A', 'C'], ]
tm.assert_frame_equal(res, df.loc[['A', 'C'], ])
res = df.loc[lambda x: ['A', 'C'], :]
tm.assert_frame_equal(res, df.loc[['A', 'C'], :])
res = df.loc[lambda x: ['A', 'C'], lambda x: 'X']
tm.assert_series_equal(res, df.loc[['A', 'C'], 'X'])
res = df.loc[lambda x: ['A', 'C'], lambda x: ['X']]
tm.assert_frame_equal(res, df.loc[['A', 'C'], ['X']])
# mixture
res = df.loc[['A', 'C'], lambda x: 'X']
tm.assert_series_equal(res, df.loc[['A', 'C'], 'X'])
res = df.loc[['A', 'C'], lambda x: ['X']]
tm.assert_frame_equal(res, df.loc[['A', 'C'], ['X']])
res = df.loc[lambda x: ['A', 'C'], 'X']
tm.assert_series_equal(res, df.loc[['A', 'C'], 'X'])
res = df.loc[lambda x: ['A', 'C'], ['X']]
tm.assert_frame_equal(res, df.loc[['A', 'C'], ['X']])
def test_frame_loc_callable_setitem(self):
# GH 11485
df = pd.DataFrame({'X': [1, 2, 3, 4],
'Y': list('aabb')},
index=list('ABCD'))
# return label
res = df.copy()
res.loc[lambda x: ['A', 'C']] = -20
exp = df.copy()
exp.loc[['A', 'C']] = -20
tm.assert_frame_equal(res, exp)
res = df.copy()
res.loc[lambda x: ['A', 'C'], :] = 20
exp = df.copy()
exp.loc[['A', 'C'], :] = 20
tm.assert_frame_equal(res, exp)
res = df.copy()
res.loc[lambda x: ['A', 'C'], lambda x: 'X'] = -1
exp = df.copy()
exp.loc[['A', 'C'], 'X'] = -1
tm.assert_frame_equal(res, exp)
res = df.copy()
res.loc[lambda x: ['A', 'C'], lambda x: ['X']] = [5, 10]
exp = df.copy()
exp.loc[['A', 'C'], ['X']] = [5, 10]
tm.assert_frame_equal(res, exp)
# mixture
res = df.copy()
res.loc[['A', 'C'], lambda x: 'X'] = np.array([-1, -2])
exp = df.copy()
exp.loc[['A', 'C'], 'X'] = np.array([-1, -2])
tm.assert_frame_equal(res, exp)
res = df.copy()
res.loc[['A', 'C'], lambda x: ['X']] = 10
exp = df.copy()
exp.loc[['A', 'C'], ['X']] = 10
tm.assert_frame_equal(res, exp)
res = df.copy()
res.loc[lambda x: ['A', 'C'], 'X'] = -2
exp = df.copy()
exp.loc[['A', 'C'], 'X'] = -2
tm.assert_frame_equal(res, exp)
res = df.copy()
res.loc[lambda x: ['A', 'C'], ['X']] = -4
exp = df.copy()
exp.loc[['A', 'C'], ['X']] = -4
tm.assert_frame_equal(res, exp)
def test_frame_iloc_callable(self):
# GH 11485
df = pd.DataFrame({'X': [1, 2, 3, 4],
'Y': list('aabb')},
index=list('ABCD'))
# return location
res = df.iloc[lambda x: [1, 3]]
tm.assert_frame_equal(res, df.iloc[[1, 3]])
res = df.iloc[lambda x: [1, 3], :]
tm.assert_frame_equal(res, df.iloc[[1, 3], :])
res = df.iloc[lambda x: [1, 3], lambda x: 0]
tm.assert_series_equal(res, df.iloc[[1, 3], 0])
res = df.iloc[lambda x: [1, 3], lambda x: [0]]
tm.assert_frame_equal(res, df.iloc[[1, 3], [0]])
# mixture
res = df.iloc[[1, 3], lambda x: 0]
tm.assert_series_equal(res, df.iloc[[1, 3], 0])
res = df.iloc[[1, 3], lambda x: [0]]
tm.assert_frame_equal(res, df.iloc[[1, 3], [0]])
res = df.iloc[lambda x: [1, 3], 0]
tm.assert_series_equal(res, df.iloc[[1, 3], 0])
res = df.iloc[lambda x: [1, 3], [0]]
tm.assert_frame_equal(res, df.iloc[[1, 3], [0]])
def test_frame_iloc_callable_setitem(self):
# GH 11485
df = pd.DataFrame({'X': [1, 2, 3, 4],
'Y': list('aabb')},
index=list('ABCD'))
# return location
res = df.copy()
res.iloc[lambda x: [1, 3]] = 0
exp = df.copy()
exp.iloc[[1, 3]] = 0
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], :] = -1
exp = df.copy()
exp.iloc[[1, 3], :] = -1
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], lambda x: 0] = 5
exp = df.copy()
exp.iloc[[1, 3], 0] = 5
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], lambda x: [0]] = 25
exp = df.copy()
exp.iloc[[1, 3], [0]] = 25
tm.assert_frame_equal(res, exp)
# mixture
res = df.copy()
res.iloc[[1, 3], lambda x: 0] = -3
exp = df.copy()
exp.iloc[[1, 3], 0] = -3
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[[1, 3], lambda x: [0]] = -5
exp = df.copy()
exp.iloc[[1, 3], [0]] = -5
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], 0] = 10
exp = df.copy()
exp.iloc[[1, 3], 0] = 10
tm.assert_frame_equal(res, exp)
res = df.copy()
res.iloc[lambda x: [1, 3], [0]] = [-5, -5]
exp = df.copy()
exp.iloc[[1, 3], [0]] = [-5, -5]
tm.assert_frame_equal(res, exp)
| bsd-3-clause |
rvraghav93/scikit-learn | sklearn/model_selection/tests/test_validation.py | 3 | 43270 | """Test the validation module"""
from __future__ import division
import sys
import warnings
import tempfile
import os
from time import sleep
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.model_selection._validation import _check_is_permutation
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator
from sklearn.multiclass import OneVsRestClassifier
from sklearn.utils import shuffle
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection.tests.common import OneTimeSplitter
from sklearn.model_selection import GridSearchCV
try:
WindowsError
except NameError:
WindowsError = None
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
class MockEstimatorWithSingleFitCallAllowed(MockEstimatorWithParameter):
"""Dummy classifier that disallows repeated calls of fit method"""
def fit(self, X_subset, y_subset):
assert_false(
hasattr(self, 'fit_called_'),
'fit is called the second time'
)
self.fit_called_ = True
return super(type(self), self).fit(X_subset, y_subset)
def predict(self, X):
raise NotImplementedError
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
# XXX: use 2D array, since 1D X is being detected as a single sample in
# check_consistent_length
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
# The number of samples per class needs to be > n_splits,
# for StratifiedKFold(n_splits=3)
y2 = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 3])
P_sparse = coo_matrix(np.eye(5))
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cross_val_score(clf, X, y2)
assert_array_equal(scores, clf.score(X, y2))
# test with multioutput y
multioutput_y = np.column_stack([y2, y2[::-1]])
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
scores = cross_val_score(clf, X_sparse, y2)
assert_array_equal(scores, clf.score(X_sparse, y2))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cross_val_score(clf, X.tolist(), y2.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cross_val_score(clf, X, y2.tolist())
assert_raises(ValueError, cross_val_score, clf, X, y2, scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cross_val_score(clf, X_3d, y2)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cross_val_score, clf, X_3d, y2)
def test_cross_val_score_predict_groups():
# Check if ValueError (when groups is None) propagates to cross_val_score
# and cross_val_predict
# And also check if groups is correctly passed to the cv object
X, y = make_classification(n_samples=20, n_classes=2, random_state=0)
clf = SVC(kernel="linear")
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(),
GroupShuffleSplit()]
for cv in group_cvs:
assert_raise_message(ValueError,
"The 'groups' parameter should not be None.",
cross_val_score, estimator=clf, X=X, y=y, cv=cv)
assert_raise_message(ValueError,
"The 'groups' parameter should not be None.",
cross_val_predict, estimator=clf, X=X, y=y, cv=cv)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
# 3 fold cross val is used so we need atleast 3 samples per class
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
kfold = KFold(5)
scores_indices = cross_val_score(svm, X, y, cv=kfold)
kfold = KFold(5)
cv_masks = []
for train, test in kfold.split(X, y):
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cross_val_score(svm, X, y)
assert_array_almost_equal(score_precomputed, score_linear)
# test with callable
svm = SVC(kernel=lambda x, y: np.dot(x, y.T))
score_callable = cross_val_score(svm, X, y)
assert_array_almost_equal(score_precomputed, score_callable)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cross_val_score, BrokenEstimator(), X)
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cross_val_score(reg, X, y, cv=5,
scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_group, _, pvalue_group = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
groups=np.ones(y.size), random_state=0)
assert_true(score_group == score)
assert_true(pvalue_group == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = StratifiedKFold(2)
score_group, _, pvalue_group = permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", groups=np.ones(y.size), random_state=0)
assert_true(score_group == score)
assert_true(pvalue_group == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum()) /
y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
permutation_test_score(p, X, y, cv=5)
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cross_val_score(p, X, y, cv=5)
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cross_val_score(clf, X, y, scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = KFold()
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = LeaveOneOut()
preds = cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
class BadCV():
def split(self, X, y=None, groups=None):
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cross_val_predict, est, X, y, cv=BadCV())
def test_cross_val_predict_input_types():
iris = load_iris()
X, y = iris.data, iris.target
X_sparse = coo_matrix(X)
multioutput_y = np.column_stack([y, y[::-1]])
clf = Ridge(fit_intercept=False, random_state=0)
# 3 fold cv is used --> atleast 3 samples per class
# Smoke test
predictions = cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_equal(predictions.shape, (150, 2))
predictions = cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_array_equal(predictions.shape, (150, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (150,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_predict(clf, X_df, y_ser)
def test_cross_val_score_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_learning_curve():
n_samples = 30
n_splits = 3
X, y = make_classification(n_samples=n_samples, n_features=1,
n_informative=1, n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(n_samples * ((n_splits - 1) / n_splits))
for shuffle_train in [False, True]:
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=KFold(n_splits=n_splits),
train_sizes=np.linspace(0.1, 1.0, 10),
shuffle=shuffle_train)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
# Test a custom cv splitter that can iterate only once
with warnings.catch_warnings(record=True) as w:
train_sizes2, train_scores2, test_scores2 = learning_curve(
estimator, X, y,
cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples),
train_sizes=np.linspace(0.1, 1.0, 10),
shuffle=shuffle_train)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores2, train_scores)
assert_array_almost_equal(test_scores2, test_scores)
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
for shuffle_train in [False, True]:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10), shuffle=shuffle_train)
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(max_iter=1, tol=None,
shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n_splits=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_with_shuffle():
# Following test case was designed this way to verify the code
# changes made in pull request: #7506.
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [11, 12], [13, 14], [15, 16],
[17, 18], [19, 20], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18]])
y = np.array([1, 1, 1, 2, 3, 4, 1, 1, 2, 3, 4, 1, 2, 3, 4])
groups = np.array([1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 4, 4])
# Splits on these groups fail without shuffle as the first iteration
# of the learning curve doesn't contain label 4 in the training set.
estimator = PassiveAggressiveClassifier(max_iter=5, tol=None,
shuffle=False)
cv = GroupKFold(n_splits=2)
train_sizes_batch, train_scores_batch, test_scores_batch = learning_curve(
estimator, X, y, cv=cv, n_jobs=1, train_sizes=np.linspace(0.3, 1.0, 3),
groups=groups, shuffle=True, random_state=2)
assert_array_almost_equal(train_scores_batch.mean(axis=1),
np.array([0.75, 0.3, 0.36111111]))
assert_array_almost_equal(test_scores_batch.mean(axis=1),
np.array([0.36111111, 0.25, 0.25]))
assert_raises(ValueError, learning_curve, estimator, X, y, cv=cv, n_jobs=1,
train_sizes=np.linspace(0.3, 1.0, 3), groups=groups)
train_sizes_inc, train_scores_inc, test_scores_inc = learning_curve(
estimator, X, y, cv=cv, n_jobs=1, train_sizes=np.linspace(0.3, 1.0, 3),
groups=groups, shuffle=True, random_state=2,
exploit_incremental_learning=True)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
def test_validation_curve_clone_estimator():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(1, 0, 10)
_, _ = validation_curve(
MockEstimatorWithSingleFitCallAllowed(), X, y,
param_name="param", param_range=param_range, cv=2
)
def test_validation_curve_cv_splits_consistency():
n_samples = 100
n_splits = 5
X, y = make_classification(n_samples=100, random_state=0)
scores1 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=OneTimeSplitter(n_splits=n_splits,
n_samples=n_samples))
# The OneTimeSplitter is a non-re-entrant cv splitter. Unless, the
# `split` is called for each parameter, the following should produce
# identical results for param setting 1 and param setting 2 as both have
# the same C value.
assert_array_almost_equal(*np.vsplit(np.hstack(scores1)[(0, 2, 1, 3), :],
2))
scores2 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=KFold(n_splits=n_splits, shuffle=True))
# For scores2, compare the 1st and 2nd parameter's scores
# (Since the C value for 1st two param setting is 0.1, they must be
# consistent unless the train test folds differ between the param settings)
assert_array_almost_equal(*np.vsplit(np.hstack(scores2)[(0, 2, 1, 3), :],
2))
scores3 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=KFold(n_splits=n_splits))
# OneTimeSplitter is basically unshuffled KFold(n_splits=5). Sanity check.
assert_array_almost_equal(np.array(scores3), np.array(scores1))
def test_check_is_permutation():
rng = np.random.RandomState(0)
p = np.arange(100)
rng.shuffle(p)
assert_true(_check_is_permutation(p, 100))
assert_false(_check_is_permutation(np.delete(p, 23), 100))
p[0] = 23
assert_false(_check_is_permutation(p, 100))
# Check if the additional duplicate indices are caught
assert_false(_check_is_permutation(np.hstack((p, 0)), 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cross_val_predict(classif, X, y, cv=10)
preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
def check_cross_val_predict_with_method(est):
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=0)
classes = len(set(y))
kfold = KFold()
methods = ['decision_function', 'predict_proba', 'predict_log_proba']
for method in methods:
predictions = cross_val_predict(est, X, y, method=method)
assert_equal(len(predictions), len(y))
expected_predictions = np.zeros([len(y), classes])
func = getattr(est, method)
# Naive loop (should be same as cross_val_predict):
for train, test in kfold.split(X, y):
est.fit(X[train], y[train])
expected_predictions[test] = func(X[test])
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold)
assert_array_almost_equal(expected_predictions, predictions)
# Test alternative representations of y
predictions_y1 = cross_val_predict(est, X, y + 1, method=method,
cv=kfold)
assert_array_equal(predictions, predictions_y1)
predictions_y2 = cross_val_predict(est, X, y - 2, method=method,
cv=kfold)
assert_array_equal(predictions, predictions_y2)
predictions_ystr = cross_val_predict(est, X, y.astype('str'),
method=method, cv=kfold)
assert_array_equal(predictions, predictions_ystr)
def test_cross_val_predict_with_method():
check_cross_val_predict_with_method(LogisticRegression())
def test_gridsearchcv_cross_val_predict_with_method():
est = GridSearchCV(LogisticRegression(random_state=42),
{'C': [0.1, 1]},
cv=2)
check_cross_val_predict_with_method(est)
def get_expected_predictions(X, y, cv, classes, est, method):
expected_predictions = np.zeros([len(y), classes])
func = getattr(est, method)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
expected_predictions_ = func(X[test])
# To avoid 2 dimensional indexing
exp_pred_test = np.zeros((len(test), classes))
if method is 'decision_function' and len(est.classes_) == 2:
exp_pred_test[:, est.classes_[-1]] = expected_predictions_
else:
exp_pred_test[:, est.classes_] = expected_predictions_
expected_predictions[test] = exp_pred_test
return expected_predictions
def test_cross_val_predict_class_subset():
X = np.arange(8).reshape(4, 2)
y = np.array([0, 0, 1, 2])
classes = 3
kfold3 = KFold(n_splits=3)
kfold4 = KFold(n_splits=4)
le = LabelEncoder()
methods = ['decision_function', 'predict_proba', 'predict_log_proba']
for method in methods:
est = LogisticRegression()
# Test with n_splits=3
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold3)
# Runs a naive loop (should be same as cross_val_predict):
expected_predictions = get_expected_predictions(X, y, kfold3, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
# Test with n_splits=4
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold4)
expected_predictions = get_expected_predictions(X, y, kfold4, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
# Testing unordered labels
y = [1, 1, -4, 6]
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold3)
y = le.fit_transform(y)
expected_predictions = get_expected_predictions(X, y, kfold3, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
def test_score_memmap():
# Ensure a scalar score of memmap type is accepted
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
tf = tempfile.NamedTemporaryFile(mode='wb', delete=False)
tf.write(b'Hello world!!!!!')
tf.close()
scores = np.memmap(tf.name, dtype=np.float64)
score = np.memmap(tf.name, shape=(), mode='r', dtype=np.float64)
try:
cross_val_score(clf, X, y, scoring=lambda est, X, y: score)
# non-scalar should still fail
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=lambda est, X, y: scores)
finally:
# Best effort to release the mmap file handles before deleting the
# backing file under Windows
scores, score = None, None
for _ in range(3):
try:
os.unlink(tf.name)
break
except WindowsError:
sleep(1.)
def test_permutation_test_score_pandas():
# check permutation_test_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
iris = load_iris()
X, y = iris.data, iris.target
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
permutation_test_score(clf, X_df, y_ser)
| bsd-3-clause |
ud3sh/coursework | deeplearning.ai/coursera-improving-neural-networks/week2/Optimization_methods_v1b.py | 1 | 41625 |
# coding: utf-8
# # Optimization Methods
#
# Until now, you've always used Gradient Descent to update the parameters and minimize the cost. In this notebook, you will learn more advanced optimization methods that can speed up learning and perhaps even get you to a better final value for the cost function. Having a good optimization algorithm can be the difference between waiting days vs. just a few hours to get a good result.
#
# Gradient descent goes "downhill" on a cost function $J$. Think of it as trying to do this:
# <img src="images/cost.jpg" style="width:650px;height:300px;">
# <caption><center> <u> **Figure 1** </u>: **Minimizing the cost is like finding the lowest point in a hilly landscape**<br> At each step of the training, you update your parameters following a certain direction to try to get to the lowest possible point. </center></caption>
#
# **Notations**: As usual, $\frac{\partial J}{\partial a } = $ `da` for any variable `a`.
#
# To get started, run the following code to import the libraries you will need.
# ### <font color='darkblue'> Updates to Assignment <font>
#
# #### If you were working on a previous version
# * The current notebook filename is version "Optimization_methods_v1b".
# * You can find your work in the file directory as version "Optimization methods'.
# * To see the file directory, click on the Coursera logo at the top left of the notebook.
#
# #### List of Updates
# * op_utils is now opt_utils_v1a. Assertion statement in `initialize_parameters` is fixed.
# * opt_utils_v1a: `compute_cost` function now accumulates total cost of the batch without taking the average (average is taken for entire epoch instead).
# * In `model` function, the total cost per mini-batch is accumulated, and the average of the entire epoch is taken as the average cost. So the plot of the cost function over time is now a smooth downward curve instead of an oscillating curve.
# * Print statements used to check each function are reformatted, and 'expected output` is reformatted to match the format of the print statements (for easier visual comparisons).
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import math
import sklearn
import sklearn.datasets
from opt_utils_v1a import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation
from opt_utils_v1a import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset
from testCases import *
get_ipython().magic('matplotlib inline')
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# ## 1 - Gradient Descent
#
# A simple optimization method in machine learning is gradient descent (GD). When you take gradient steps with respect to all $m$ examples on each step, it is also called Batch Gradient Descent.
#
# **Warm-up exercise**: Implement the gradient descent update rule. The gradient descent rule is, for $l = 1, ..., L$:
# $$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{1}$$
# $$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{2}$$
#
# where L is the number of layers and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.
# In[2]:
# GRADED FUNCTION: update_parameters_with_gd
def update_parameters_with_gd(parameters, grads, learning_rate):
"""
Update parameters using one step of gradient descent
Arguments:
parameters -- python dictionary containing your parameters to be updated:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients to update each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
learning_rate -- the learning rate, scalar.
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * grads['dW' + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * grads['db' + str(l+1)]
### END CODE HERE ###
return parameters
# In[3]:
parameters, grads, learning_rate = update_parameters_with_gd_test_case()
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
print("W1 =\n" + str(parameters["W1"]))
print("b1 =\n" + str(parameters["b1"]))
print("W2 =\n" + str(parameters["W2"]))
print("b2 =\n" + str(parameters["b2"]))
# **Expected Output**:
#
# ```
# W1 =
# [[ 1.63535156 -0.62320365 -0.53718766]
# [-1.07799357 0.85639907 -2.29470142]]
# b1 =
# [[ 1.74604067]
# [-0.75184921]]
# W2 =
# [[ 0.32171798 -0.25467393 1.46902454]
# [-2.05617317 -0.31554548 -0.3756023 ]
# [ 1.1404819 -1.09976462 -0.1612551 ]]
# b2 =
# [[-0.88020257]
# [ 0.02561572]
# [ 0.57539477]]
# ```
# A variant of this is Stochastic Gradient Descent (SGD), which is equivalent to mini-batch gradient descent where each mini-batch has just 1 example. The update rule that you have just implemented does not change. What changes is that you would be computing gradients on just one training example at a time, rather than on the whole training set. The code examples below illustrate the difference between stochastic gradient descent and (batch) gradient descent.
#
# - **(Batch) Gradient Descent**:
#
# ``` python
# X = data_input
# Y = labels
# parameters = initialize_parameters(layers_dims)
# for i in range(0, num_iterations):
# # Forward propagation
# a, caches = forward_propagation(X, parameters)
# # Compute cost.
# cost += compute_cost(a, Y)
# # Backward propagation.
# grads = backward_propagation(a, caches, parameters)
# # Update parameters.
# parameters = update_parameters(parameters, grads)
#
# ```
#
# - **Stochastic Gradient Descent**:
#
# ```python
# X = data_input
# Y = labels
# parameters = initialize_parameters(layers_dims)
# for i in range(0, num_iterations):
# for j in range(0, m):
# # Forward propagation
# a, caches = forward_propagation(X[:,j], parameters)
# # Compute cost
# cost += compute_cost(a, Y[:,j])
# # Backward propagation
# grads = backward_propagation(a, caches, parameters)
# # Update parameters.
# parameters = update_parameters(parameters, grads)
# ```
#
# In Stochastic Gradient Descent, you use only 1 training example before updating the gradients. When the training set is large, SGD can be faster. But the parameters will "oscillate" toward the minimum rather than converge smoothly. Here is an illustration of this:
#
# <img src="images/kiank_sgd.png" style="width:750px;height:250px;">
# <caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **SGD vs GD**<br> "+" denotes a minimum of the cost. SGD leads to many oscillations to reach convergence. But each step is a lot faster to compute for SGD than for GD, as it uses only one training example (vs. the whole batch for GD). </center></caption>
#
# **Note** also that implementing SGD requires 3 for-loops in total:
# 1. Over the number of iterations
# 2. Over the $m$ training examples
# 3. Over the layers (to update all parameters, from $(W^{[1]},b^{[1]})$ to $(W^{[L]},b^{[L]})$)
#
# In practice, you'll often get faster results if you do not use neither the whole training set, nor only one training example, to perform each update. Mini-batch gradient descent uses an intermediate number of examples for each step. With mini-batch gradient descent, you loop over the mini-batches instead of looping over individual training examples.
#
# <img src="images/kiank_minibatch.png" style="width:750px;height:250px;">
# <caption><center> <u> <font color='purple'> **Figure 2** </u>: <font color='purple'> **SGD vs Mini-Batch GD**<br> "+" denotes a minimum of the cost. Using mini-batches in your optimization algorithm often leads to faster optimization. </center></caption>
#
# <font color='blue'>
# **What you should remember**:
# - The difference between gradient descent, mini-batch gradient descent and stochastic gradient descent is the number of examples you use to perform one update step.
# - You have to tune a learning rate hyperparameter $\alpha$.
# - With a well-turned mini-batch size, usually it outperforms either gradient descent or stochastic gradient descent (particularly when the training set is large).
# ## 2 - Mini-Batch Gradient descent
#
# Let's learn how to build mini-batches from the training set (X, Y).
#
# There are two steps:
# - **Shuffle**: Create a shuffled version of the training set (X, Y) as shown below. Each column of X and Y represents a training example. Note that the random shuffling is done synchronously between X and Y. Such that after the shuffling the $i^{th}$ column of X is the example corresponding to the $i^{th}$ label in Y. The shuffling step ensures that examples will be split randomly into different mini-batches.
#
# <img src="images/kiank_shuffle.png" style="width:550px;height:300px;">
#
# - **Partition**: Partition the shuffled (X, Y) into mini-batches of size `mini_batch_size` (here 64). Note that the number of training examples is not always divisible by `mini_batch_size`. The last mini batch might be smaller, but you don't need to worry about this. When the final mini-batch is smaller than the full `mini_batch_size`, it will look like this:
#
# <img src="images/kiank_partition.png" style="width:550px;height:300px;">
#
# **Exercise**: Implement `random_mini_batches`. We coded the shuffling part for you. To help you with the partitioning step, we give you the following code that selects the indexes for the $1^{st}$ and $2^{nd}$ mini-batches:
# ```python
# first_mini_batch_X = shuffled_X[:, 0 : mini_batch_size]
# second_mini_batch_X = shuffled_X[:, mini_batch_size : 2 * mini_batch_size]
# ...
# ```
#
# Note that the last mini-batch might end up smaller than `mini_batch_size=64`. Let $\lfloor s \rfloor$ represents $s$ rounded down to the nearest integer (this is `math.floor(s)` in Python). If the total number of examples is not a multiple of `mini_batch_size=64` then there will be $\lfloor \frac{m}{mini\_batch\_size}\rfloor$ mini-batches with a full 64 examples, and the number of examples in the final mini-batch will be ($m-mini_\_batch_\_size \times \lfloor \frac{m}{mini\_batch\_size}\rfloor$).
# In[4]:
# GRADED FUNCTION: random_mini_batches
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
np.random.seed(seed) # To make your "random" minibatches the same as ours
m = X.shape[1] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1,m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:, mini_batch_size * k : mini_batch_size * (k + 1)]
mini_batch_Y = shuffled_Y[:, mini_batch_size * k : mini_batch_size * (k + 1)]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:, mini_batch_size * k : m]
mini_batch_Y = shuffled_Y[:, mini_batch_size * k : m]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
# In[5]:
X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case()
mini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size)
print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape))
print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape))
print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape))
print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape))
print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape))
print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape))
print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3]))
# **Expected Output**:
#
# <table style="width:50%">
# <tr>
# <td > **shape of the 1st mini_batch_X** </td>
# <td > (12288, 64) </td>
# </tr>
#
# <tr>
# <td > **shape of the 2nd mini_batch_X** </td>
# <td > (12288, 64) </td>
# </tr>
#
# <tr>
# <td > **shape of the 3rd mini_batch_X** </td>
# <td > (12288, 20) </td>
# </tr>
# <tr>
# <td > **shape of the 1st mini_batch_Y** </td>
# <td > (1, 64) </td>
# </tr>
# <tr>
# <td > **shape of the 2nd mini_batch_Y** </td>
# <td > (1, 64) </td>
# </tr>
# <tr>
# <td > **shape of the 3rd mini_batch_Y** </td>
# <td > (1, 20) </td>
# </tr>
# <tr>
# <td > **mini batch sanity check** </td>
# <td > [ 0.90085595 -0.7612069 0.2344157 ] </td>
# </tr>
#
# </table>
# <font color='blue'>
# **What you should remember**:
# - Shuffling and Partitioning are the two steps required to build mini-batches
# - Powers of two are often chosen to be the mini-batch size, e.g., 16, 32, 64, 128.
# ## 3 - Momentum
#
# Because mini-batch gradient descent makes a parameter update after seeing just a subset of examples, the direction of the update has some variance, and so the path taken by mini-batch gradient descent will "oscillate" toward convergence. Using momentum can reduce these oscillations.
#
# Momentum takes into account the past gradients to smooth out the update. We will store the 'direction' of the previous gradients in the variable $v$. Formally, this will be the exponentially weighted average of the gradient on previous steps. You can also think of $v$ as the "velocity" of a ball rolling downhill, building up speed (and momentum) according to the direction of the gradient/slope of the hill.
#
# <img src="images/opt_momentum.png" style="width:400px;height:250px;">
# <caption><center> <u><font color='purple'>**Figure 3**</u><font color='purple'>: The red arrows shows the direction taken by one step of mini-batch gradient descent with momentum. The blue points show the direction of the gradient (with respect to the current mini-batch) on each step. Rather than just following the gradient, we let the gradient influence $v$ and then take a step in the direction of $v$.<br> <font color='black'> </center>
#
#
# **Exercise**: Initialize the velocity. The velocity, $v$, is a python dictionary that needs to be initialized with arrays of zeros. Its keys are the same as those in the `grads` dictionary, that is:
# for $l =1,...,L$:
# ```python
# v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
# v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
# ```
# **Note** that the iterator l starts at 0 in the for loop while the first parameters are v["dW1"] and v["db1"] (that's a "one" on the superscript). This is why we are shifting l to l+1 in the `for` loop.
# In[8]:
# GRADED FUNCTION: initialize_velocity
def initialize_velocity(parameters):
"""
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
Returns:
v -- python dictionary containing the current velocity.
v['dW' + str(l)] = velocity of dWl
v['db' + str(l)] = velocity of dbl
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
# Initialize velocity
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = np.zeros(parameters["W" + str(l+1)].shape)
v["db" + str(l+1)] = np.zeros(parameters["b" + str(l+1)].shape)
### END CODE HERE ###
return v
# In[9]:
parameters = initialize_velocity_test_case()
v = initialize_velocity(parameters)
print("v[\"dW1\"] =\n" + str(v["dW1"]))
print("v[\"db1\"] =\n" + str(v["db1"]))
print("v[\"dW2\"] =\n" + str(v["dW2"]))
print("v[\"db2\"] =\n" + str(v["db2"]))
# **Expected Output**:
#
# ```
# v["dW1"] =
# [[ 0. 0. 0.]
# [ 0. 0. 0.]]
# v["db1"] =
# [[ 0.]
# [ 0.]]
# v["dW2"] =
# [[ 0. 0. 0.]
# [ 0. 0. 0.]
# [ 0. 0. 0.]]
# v["db2"] =
# [[ 0.]
# [ 0.]
# [ 0.]]
# ```
# **Exercise**: Now, implement the parameters update with momentum. The momentum update rule is, for $l = 1, ..., L$:
#
# $$ \begin{cases}
# v_{dW^{[l]}} = \beta v_{dW^{[l]}} + (1 - \beta) dW^{[l]} \\
# W^{[l]} = W^{[l]} - \alpha v_{dW^{[l]}}
# \end{cases}\tag{3}$$
#
# $$\begin{cases}
# v_{db^{[l]}} = \beta v_{db^{[l]}} + (1 - \beta) db^{[l]} \\
# b^{[l]} = b^{[l]} - \alpha v_{db^{[l]}}
# \end{cases}\tag{4}$$
#
# where L is the number of layers, $\beta$ is the momentum and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$ (that's a "one" on the superscript). So you will need to shift `l` to `l+1` when coding.
# In[10]:
# GRADED FUNCTION: update_parameters_with_momentum
def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):
"""
Update parameters using Momentum
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- python dictionary containing the current velocity:
v['dW' + str(l)] = ...
v['db' + str(l)] = ...
beta -- the momentum hyperparameter, scalar
learning_rate -- the learning rate, scalar
Returns:
parameters -- python dictionary containing your updated parameters
v -- python dictionary containing your updated velocities
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Momentum update for each parameter
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
# compute velocities
v["dW" + str(l+1)] = beta * v["dW" + str(l+1)] + (1 - beta)* grads["dW" + str(l+1)]
v["db" + str(l+1)] = beta * v["db" + str(l+1)] + (1 - beta)* grads["db" + str(l+1)]
# update parameters
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * v["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * v["db" + str(l+1)]
### END CODE HERE ###
return parameters, v
# In[11]:
parameters, grads, v = update_parameters_with_momentum_test_case()
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)
print("W1 = \n" + str(parameters["W1"]))
print("b1 = \n" + str(parameters["b1"]))
print("W2 = \n" + str(parameters["W2"]))
print("b2 = \n" + str(parameters["b2"]))
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = v" + str(v["db2"]))
# **Expected Output**:
#
# ```
# W1 =
# [[ 1.62544598 -0.61290114 -0.52907334]
# [-1.07347112 0.86450677 -2.30085497]]
# b1 =
# [[ 1.74493465]
# [-0.76027113]]
# W2 =
# [[ 0.31930698 -0.24990073 1.4627996 ]
# [-2.05974396 -0.32173003 -0.38320915]
# [ 1.13444069 -1.0998786 -0.1713109 ]]
# b2 =
# [[-0.87809283]
# [ 0.04055394]
# [ 0.58207317]]
# v["dW1"] =
# [[-0.11006192 0.11447237 0.09015907]
# [ 0.05024943 0.09008559 -0.06837279]]
# v["db1"] =
# [[-0.01228902]
# [-0.09357694]]
# v["dW2"] =
# [[-0.02678881 0.05303555 -0.06916608]
# [-0.03967535 -0.06871727 -0.08452056]
# [-0.06712461 -0.00126646 -0.11173103]]
# v["db2"] = v[[ 0.02344157]
# [ 0.16598022]
# [ 0.07420442]]
# ```
# **Note** that:
# - The velocity is initialized with zeros. So the algorithm will take a few iterations to "build up" velocity and start to take bigger steps.
# - If $\beta = 0$, then this just becomes standard gradient descent without momentum.
#
# **How do you choose $\beta$?**
#
# - The larger the momentum $\beta$ is, the smoother the update because the more we take the past gradients into account. But if $\beta$ is too big, it could also smooth out the updates too much.
# - Common values for $\beta$ range from 0.8 to 0.999. If you don't feel inclined to tune this, $\beta = 0.9$ is often a reasonable default.
# - Tuning the optimal $\beta$ for your model might need trying several values to see what works best in term of reducing the value of the cost function $J$.
# <font color='blue'>
# **What you should remember**:
# - Momentum takes past gradients into account to smooth out the steps of gradient descent. It can be applied with batch gradient descent, mini-batch gradient descent or stochastic gradient descent.
# - You have to tune a momentum hyperparameter $\beta$ and a learning rate $\alpha$.
# ## 4 - Adam
#
# Adam is one of the most effective optimization algorithms for training neural networks. It combines ideas from RMSProp (described in lecture) and Momentum.
#
# **How does Adam work?**
# 1. It calculates an exponentially weighted average of past gradients, and stores it in variables $v$ (before bias correction) and $v^{corrected}$ (with bias correction).
# 2. It calculates an exponentially weighted average of the squares of the past gradients, and stores it in variables $s$ (before bias correction) and $s^{corrected}$ (with bias correction).
# 3. It updates parameters in a direction based on combining information from "1" and "2".
#
# The update rule is, for $l = 1, ..., L$:
#
# $$\begin{cases}
# v_{dW^{[l]}} = \beta_1 v_{dW^{[l]}} + (1 - \beta_1) \frac{\partial \mathcal{J} }{ \partial W^{[l]} } \\
# v^{corrected}_{dW^{[l]}} = \frac{v_{dW^{[l]}}}{1 - (\beta_1)^t} \\
# s_{dW^{[l]}} = \beta_2 s_{dW^{[l]}} + (1 - \beta_2) (\frac{\partial \mathcal{J} }{\partial W^{[l]} })^2 \\
# s^{corrected}_{dW^{[l]}} = \frac{s_{dW^{[l]}}}{1 - (\beta_2)^t} \\
# W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{dW^{[l]}}}{\sqrt{s^{corrected}_{dW^{[l]}}} + \varepsilon}
# \end{cases}$$
# where:
# - t counts the number of steps taken of Adam
# - L is the number of layers
# - $\beta_1$ and $\beta_2$ are hyperparameters that control the two exponentially weighted averages.
# - $\alpha$ is the learning rate
# - $\varepsilon$ is a very small number to avoid dividing by zero
#
# As usual, we will store all parameters in the `parameters` dictionary
# **Exercise**: Initialize the Adam variables $v, s$ which keep track of the past information.
#
# **Instruction**: The variables $v, s$ are python dictionaries that need to be initialized with arrays of zeros. Their keys are the same as for `grads`, that is:
# for $l = 1, ..., L$:
# ```python
# v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
# v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
# s["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
# s["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
#
# ```
# In[12]:
# GRADED FUNCTION: initialize_adam
def initialize_adam(parameters) :
"""
Initializes v and s as two python dictionaries with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters["W" + str(l)] = Wl
parameters["b" + str(l)] = bl
Returns:
v -- python dictionary that will contain the exponentially weighted average of the gradient.
v["dW" + str(l)] = ...
v["db" + str(l)] = ...
s -- python dictionary that will contain the exponentially weighted average of the squared gradient.
s["dW" + str(l)] = ...
s["db" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
s = {}
# Initialize v, s. Input: "parameters". Outputs: "v, s".
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
v["dW" + str(l+1)] = np.zeros(parameters["W" + str(l+1)].shape)
v["db" + str(l+1)] = np.zeros(parameters["b" + str(l+1)].shape)
s["dW" + str(l+1)] = np.zeros(parameters["W" + str(l+1)].shape)
s["db" + str(l+1)] = np.zeros(parameters["b" + str(l+1)].shape)
### END CODE HERE ###
return v, s
# In[13]:
parameters = initialize_adam_test_case()
v, s = initialize_adam(parameters)
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = \n" + str(v["db2"]))
print("s[\"dW1\"] = \n" + str(s["dW1"]))
print("s[\"db1\"] = \n" + str(s["db1"]))
print("s[\"dW2\"] = \n" + str(s["dW2"]))
print("s[\"db2\"] = \n" + str(s["db2"]))
# **Expected Output**:
#
# ```
# v["dW1"] =
# [[ 0. 0. 0.]
# [ 0. 0. 0.]]
# v["db1"] =
# [[ 0.]
# [ 0.]]
# v["dW2"] =
# [[ 0. 0. 0.]
# [ 0. 0. 0.]
# [ 0. 0. 0.]]
# v["db2"] =
# [[ 0.]
# [ 0.]
# [ 0.]]
# s["dW1"] =
# [[ 0. 0. 0.]
# [ 0. 0. 0.]]
# s["db1"] =
# [[ 0.]
# [ 0.]]
# s["dW2"] =
# [[ 0. 0. 0.]
# [ 0. 0. 0.]
# [ 0. 0. 0.]]
# s["db2"] =
# [[ 0.]
# [ 0.]
# [ 0.]]
# ```
# **Exercise**: Now, implement the parameters update with Adam. Recall the general update rule is, for $l = 1, ..., L$:
#
# $$\begin{cases}
# v_{W^{[l]}} = \beta_1 v_{W^{[l]}} + (1 - \beta_1) \frac{\partial J }{ \partial W^{[l]} } \\
# v^{corrected}_{W^{[l]}} = \frac{v_{W^{[l]}}}{1 - (\beta_1)^t} \\
# s_{W^{[l]}} = \beta_2 s_{W^{[l]}} + (1 - \beta_2) (\frac{\partial J }{\partial W^{[l]} })^2 \\
# s^{corrected}_{W^{[l]}} = \frac{s_{W^{[l]}}}{1 - (\beta_2)^t} \\
# W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{W^{[l]}}}{\sqrt{s^{corrected}_{W^{[l]}}}+\varepsilon}
# \end{cases}$$
#
#
# **Note** that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.
# In[18]:
# GRADED FUNCTION: update_parameters_with_adam
def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):
"""
Update parameters using Adam
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
learning_rate -- the learning rate, scalar.
beta1 -- Exponential decay hyperparameter for the first moment estimates
beta2 -- Exponential decay hyperparameter for the second moment estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
Returns:
parameters -- python dictionary containing your updated parameters
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
"""
L = len(parameters) // 2 # number of layers in the neural networks
v_corrected = {} # Initializing first moment estimate, python dictionary
s_corrected = {} # Initializing second moment estimate, python dictionary
# Perform Adam update on all parameters
for l in range(L):
# Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v".
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = beta1 * v["dW" + str(l+1)] + (1 - beta1)* grads["dW" + str(l+1)]
v["db" + str(l+1)] = beta1 * v["db" + str(l+1)] + (1 - beta1)* grads["db" + str(l+1)]
### END CODE HERE ###
# Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected".
### START CODE HERE ### (approx. 2 lines)
v_corrected["dW" + str(l+1)] = v["dW" + str(l+1)] / (1 - beta1 ** t)
v_corrected["db" + str(l+1)] = v["db" + str(l+1)] / (1 - beta1 ** t)
### END CODE HERE ###
# Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s".
### START CODE HERE ### (approx. 2 lines)
s["dW" + str(l+1)] = beta2 * s["dW" + str(l+1)] + (1 - beta2) * (grads["dW" + str(l+1)]**2)
s["db" + str(l+1)] = beta2 * s["db" + str(l+1)] + (1 - beta2) * (grads["db" + str(l+1)]**2)
### END CODE HERE ###
# Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected".
### START CODE HERE ### (approx. 2 lines)
s_corrected["dW" + str(l+1)] = s["dW" + str(l+1)] / (1 - beta2 ** t)
s_corrected["db" + str(l+1)] = s["db" + str(l+1)] / (1 - beta2 ** t)
### END CODE HERE ###
# Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters".
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * (v_corrected["dW" + str(l+1)] / (np.sqrt(s_corrected["dW" + str(l+1)]) + epsilon))
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * (v_corrected["db" + str(l+1)] / (np.sqrt(s_corrected["db" + str(l+1)]) + epsilon))
### END CODE HERE ###
return parameters, v, s
# In[19]:
parameters, grads, v, s = update_parameters_with_adam_test_case()
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2)
print("W1 = \n" + str(parameters["W1"]))
print("b1 = \n" + str(parameters["b1"]))
print("W2 = \n" + str(parameters["W2"]))
print("b2 = \n" + str(parameters["b2"]))
print("v[\"dW1\"] = \n" + str(v["dW1"]))
print("v[\"db1\"] = \n" + str(v["db1"]))
print("v[\"dW2\"] = \n" + str(v["dW2"]))
print("v[\"db2\"] = \n" + str(v["db2"]))
print("s[\"dW1\"] = \n" + str(s["dW1"]))
print("s[\"db1\"] = \n" + str(s["db1"]))
print("s[\"dW2\"] = \n" + str(s["dW2"]))
print("s[\"db2\"] = \n" + str(s["db2"]))
# **Expected Output**:
#
# ```
# W1 =
# [[ 1.63178673 -0.61919778 -0.53561312]
# [-1.08040999 0.85796626 -2.29409733]]
# b1 =
# [[ 1.75225313]
# [-0.75376553]]
# W2 =
# [[ 0.32648046 -0.25681174 1.46954931]
# [-2.05269934 -0.31497584 -0.37661299]
# [ 1.14121081 -1.09245036 -0.16498684]]
# b2 =
# [[-0.88529978]
# [ 0.03477238]
# [ 0.57537385]]
# v["dW1"] =
# [[-0.11006192 0.11447237 0.09015907]
# [ 0.05024943 0.09008559 -0.06837279]]
# v["db1"] =
# [[-0.01228902]
# [-0.09357694]]
# v["dW2"] =
# [[-0.02678881 0.05303555 -0.06916608]
# [-0.03967535 -0.06871727 -0.08452056]
# [-0.06712461 -0.00126646 -0.11173103]]
# v["db2"] =
# [[ 0.02344157]
# [ 0.16598022]
# [ 0.07420442]]
# s["dW1"] =
# [[ 0.00121136 0.00131039 0.00081287]
# [ 0.0002525 0.00081154 0.00046748]]
# s["db1"] =
# [[ 1.51020075e-05]
# [ 8.75664434e-04]]
# s["dW2"] =
# [[ 7.17640232e-05 2.81276921e-04 4.78394595e-04]
# [ 1.57413361e-04 4.72206320e-04 7.14372576e-04]
# [ 4.50571368e-04 1.60392066e-07 1.24838242e-03]]
# s["db2"] =
# [[ 5.49507194e-05]
# [ 2.75494327e-03]
# [ 5.50629536e-04]]
# ```
# You now have three working optimization algorithms (mini-batch gradient descent, Momentum, Adam). Let's implement a model with each of these optimizers and observe the difference.
# ## 5 - Model with different optimization algorithms
#
# Lets use the following "moons" dataset to test the different optimization methods. (The dataset is named "moons" because the data from each of the two classes looks a bit like a crescent-shaped moon.)
# In[20]:
train_X, train_Y = load_dataset()
# We have already implemented a 3-layer neural network. You will train it with:
# - Mini-batch **Gradient Descent**: it will call your function:
# - `update_parameters_with_gd()`
# - Mini-batch **Momentum**: it will call your functions:
# - `initialize_velocity()` and `update_parameters_with_momentum()`
# - Mini-batch **Adam**: it will call your functions:
# - `initialize_adam()` and `update_parameters_with_adam()`
# In[22]:
def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 10000, print_cost = True):
"""
3-layer neural network model which can be run in different optimizer modes.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
layers_dims -- python list, containing the size of each layer
learning_rate -- the learning rate, scalar.
mini_batch_size -- the size of a mini batch
beta -- Momentum hyperparameter
beta1 -- Exponential decay hyperparameter for the past gradients estimates
beta2 -- Exponential decay hyperparameter for the past squared gradients estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
num_epochs -- number of epochs
print_cost -- True to print the cost every 1000 epochs
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(layers_dims) # number of layers in the neural networks
costs = [] # to keep track of the cost
t = 0 # initializing the counter required for Adam update
seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours
m = X.shape[1] # number of training examples
# Initialize parameters
parameters = initialize_parameters(layers_dims)
# Initialize the optimizer
if optimizer == "gd":
pass # no initialization required for gradient descent
elif optimizer == "momentum":
v = initialize_velocity(parameters)
elif optimizer == "adam":
v, s = initialize_adam(parameters)
# Optimization loop
for i in range(num_epochs):
# Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
seed = seed + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
cost_total = 0
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward propagation
a3, caches = forward_propagation(minibatch_X, parameters)
# Compute cost and add to the cost total
cost_total += compute_cost(a3, minibatch_Y)
# Backward propagation
grads = backward_propagation(minibatch_X, minibatch_Y, caches)
# Update parameters
if optimizer == "gd":
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
elif optimizer == "momentum":
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)
elif optimizer == "adam":
t = t + 1 # Adam counter
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s,
t, learning_rate, beta1, beta2, epsilon)
cost_avg = cost_total / m
# Print the cost every 1000 epoch
if print_cost and i % 1000 == 0:
print ("Cost after epoch %i: %f" %(i, cost_avg))
if print_cost and i % 100 == 0:
costs.append(cost_avg)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epochs (per 100)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
# You will now run this 3 layer neural network with each of the 3 optimization methods.
#
# ### 5.1 - Mini-batch Gradient descent
#
# Run the following code to see how the model does with mini-batch gradient descent.
# In[23]:
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# ### 5.2 - Mini-batch gradient descent with momentum
#
# Run the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small; but for more complex problems you might see bigger gains.
# In[24]:
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# ### 5.3 - Mini-batch with Adam mode
#
# Run the following code to see how the model does with Adam.
# In[25]:
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "adam")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Adam optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# ### 5.4 - Summary
#
# <table>
# <tr>
# <td>
# **optimization method**
# </td>
# <td>
# **accuracy**
# </td>
# <td>
# **cost shape**
# </td>
#
# </tr>
# <td>
# Gradient descent
# </td>
# <td>
# 79.7%
# </td>
# <td>
# oscillations
# </td>
# <tr>
# <td>
# Momentum
# </td>
# <td>
# 79.7%
# </td>
# <td>
# oscillations
# </td>
# </tr>
# <tr>
# <td>
# Adam
# </td>
# <td>
# 94%
# </td>
# <td>
# smoother
# </td>
# </tr>
# </table>
#
# Momentum usually helps, but given the small learning rate and the simplistic dataset, its impact is almost negligeable. Also, the huge oscillations you see in the cost come from the fact that some minibatches are more difficult thans others for the optimization algorithm.
#
# Adam on the other hand, clearly outperforms mini-batch gradient descent and Momentum. If you run the model for more epochs on this simple dataset, all three methods will lead to very good results. However, you've seen that Adam converges a lot faster.
#
# Some advantages of Adam include:
# - Relatively low memory requirements (though higher than gradient descent and gradient descent with momentum)
# - Usually works well even with little tuning of hyperparameters (except $\alpha$)
# **References**:
#
# - Adam paper: https://arxiv.org/pdf/1412.6980.pdf
| unlicense |
APMonitor/applications | scheduling_and_control/3products_beginning_application/apm.py | 4 | 26852 | # Import
import csv
import math
import os
import random
import string
import time
import webbrowser
from contextlib import closing
import sys
# Get Python version
ver = sys.version_info[0]
#print('Version: '+str(ver))
if ver==2: # Python 2
import urllib
else: # Python 3+
import urllib.request, urllib.parse, urllib.error
#import socket
if ver==2: # Python 2
def cmd(server, app, aline):
'''Send a request to the server \n \
server = address of server \n \
app = application name \n \
aline = line to send to server \n'''
try:
# Web-server URL address
url_base = string.strip(server) + '/online/apm_line.php'
app = app.lower()
app.replace(" ", "")
params = urllib.urlencode({'p': app, 'a': aline})
f = urllib.urlopen(url_base, params)
# Stream solution output
if(aline=='solve'):
line = ''
while True:
char = f.read(1)
if not char:
break
elif char == '\n':
print(line)
line = ''
else:
line += char
# Send request to web-server
response = f.read()
except:
response = 'Failed to connect to server'
return response
def load_model(server,app,filename):
'''Load APM model file \n \
server = address of server \n \
app = application name \n \
filename = APM file name'''
# Load APM File
f = open(filename,'r')
aline = f.read()
f.close()
app = app.lower()
app.replace(" ","")
response = cmd(server,app,' '+aline)
return
def load_data(server,app,filename):
'''Load CSV data file \n \
server = address of server \n \
app = application name \n \
filename = CSV file name'''
# Load CSV File
f = open(filename,'r')
aline = f.read()
f.close()
app = app.lower()
app.replace(" ","")
response = cmd(server,app,'csv '+aline)
return
def get_ip(server):
'''Get current IP address \n \
server = address of server'''
# get ip address for web-address lookup
url_base = string.strip(server) + '/ip.php'
f = urllib.urlopen(url_base)
ip = string.strip(f.read())
return ip
def apm_t0(server,app,mode):
'''Retrieve restart file \n \
server = address of server \n \
app = application name \n \
mode = {'ss','mpu','rto','sim','est','ctl'} '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = string.strip(server) + '/online/' + ip + '_' + app + '/' + string.strip(mode) + '.t0'
f = urllib.urlopen(url)
# Send request to web-server
solution = f.read()
return solution
def get_solution(server,app):
'''Retrieve solution results\n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = string.strip(server) + '/online/' + ip + '_' + app + '/results.csv'
f = urllib.urlopen(url)
# Send request to web-server
solution = f.read()
# Write the file
sol_file = 'solution_' + app + '.csv'
fh = open(sol_file,'w')
# possible problem here if file isn't able to open (see MATLAB equivalent)
fh.write(solution.replace('\r',''))
fh.close()
# Use array package
from array import array
# Import CSV file from web server
with closing(urllib.urlopen(url)) as f:
reader = csv.reader(f, delimiter=',')
y={}
for row in reader:
if len(row)==2:
y[row[0]] = float(row[1])
else:
y[row[0]] = array('f', [float(col) for col in row[1:]])
# Return solution
return y
def get_file(server,app,filename):
'''Retrieve any file from web-server\n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = string.strip(server) + '/online/' + ip + '_' + app + '/' + filename
f = urllib.urlopen(url)
# Send request to web-server
file = f.read()
# Write the file
fh = open(filename,'w')
fh.write(file.replace('\r',''))
fh.close()
return (file)
def set_option(server,app,name,value):
'''Load APM option \n \
server = address of server \n \
app = application name \n \
name = {FV,MV,SV,CV}.option \n \
value = numeric value of option '''
aline = 'option %s = %f' %(name,value)
app = app.lower()
app.replace(" ","")
response = cmd(server,app,aline)
return response
def web(server,app):
'''Open APM web viewer in local browser \n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = string.strip(server) + '/online/' + ip + '_' + app + '/' + ip + '_' + app + '_oper.htm'
webbrowser.get().open_new_tab(url)
return url
def web_var(server,app):
'''Open APM web viewer in local browser \n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = string.strip(server) + '/online/' + ip + '_' + app + '/' + ip + '_' + app + '_var.htm'
webbrowser.get().open_new_tab(url)
return url
def web_root(server,app):
'''Open APM root folder \n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = string.strip(server) + '/online/' + ip + '_' + app + '/'
webbrowser.get().open_new_tab(url)
return url
def classify(server,app,type,aline):
'''Classify parameter or variable as FV, MV, SV, or CV \n \
server = address of server \n \
app = application name \n \
type = {FV,MV,SV,CV} \n \
aline = parameter or variable name '''
x = 'info' + ' ' + type + ', ' + aline
app = app.lower()
app.replace(" ","")
response = cmd(server,app,x)
return response
def csv_data(filename):
'''Load CSV File into Python
A = csv_data(filename)
Function csv_data extracts data from a comma
separated value (csv) file and returns it
to the array A'''
try:
f = open(filename, 'rb')
reader = csv.reader(f)
headers = reader.next()
c = [float] * (len(headers))
A = {}
for h in headers:
A[h] = []
for row in reader:
for h, v, conv in zip(headers, row, c):
A[h].append(conv(v))
except ValueError:
A = {}
return A
def csv_lookup(name,replay):
'''Lookup Index of CSV Column \n \
name = parameter or variable name \n \
replay = csv replay data to search'''
header = replay[0]
try:
i = header.index(string.strip(name))
except ValueError:
i = -1 # no match
return i
def csv_element(name,row,replay):
'''Retrieve CSV Element \n \
name = parameter or variable name \n \
row = row of csv file \n \
replay = csv replay data to search'''
# get row number
if (row>len(replay)): row = len(replay)-1
# get column number
col = csv_lookup(name,replay)
if (col>=0): value = float(replay[row][col])
else: value = float('nan')
return value
def get_attribute(server,app,name):
'''Retrieve options for FV, MV, SV, or CV \n \
server = address of server \n \
app = application name \n \
name = {FV,MV,SV,CV}.{MEAS,MODEL,NEWVAL} \n \n \
Valid name combinations \n \
{FV,MV,CV}.MEAS \n \
{SV,CV}.MODEL \n \
{FV,MV}.NEWVAL '''
# Web-server URL address
url_base = string.strip(server) + '/online/get_tag.php'
app = app.lower()
app.replace(" ","")
params = urllib.urlencode({'p':app,'n':name})
f = urllib.urlopen(url_base,params)
# Send request to web-server
value = eval(f.read())
return value
def load_meas(server,app,name,value):
'''Transfer measurement to server for FV, MV, or CV \n \
server = address of server \n \
app = application name \n \
name = name of {FV,MV,CV} '''
# Web-server URL address
url_base = string.strip(server) + '/online/meas.php'
app = app.lower()
app.replace(" ","")
params = urllib.urlencode({'p':app,'n':name+'.MEAS','v':value})
f = urllib.urlopen(url_base,params)
# Send request to web-server
response = f.read()
return response
else: # Python 3+
def cmd(server,app,aline):
'''Send a request to the server \n \
server = address of server \n \
app = application name \n \
aline = line to send to server \n'''
try:
# Web-server URL address
url_base = server.strip() + '/online/apm_line.php'
app = app.lower()
app.replace(" ","")
params = urllib.parse.urlencode({'p':app,'a':aline})
en_params = params.encode()
f = urllib.request.urlopen(url_base,en_params)
# Stream solution output
if(aline=='solve'):
line = ''
while True:
en_char = f.read(1)
char = en_char.decode()
if not char:
break
elif char == '\n':
print(line)
line = ''
else:
line += char
# Send request to web-server
en_response = f.read()
response = en_response.decode()
except:
response = 'Failed to connect to server'
return response
def load_model(server,app,filename):
'''Load APM model file \n \
server = address of server \n \
app = application name \n \
filename = APM file name'''
# Load APM File
f = open(filename,'r')
aline = f.read()
f.close()
app = app.lower()
app.replace(" ","")
response = cmd(server,app,' '+aline)
return
def load_data(server,app,filename):
'''Load CSV data file \n \
server = address of server \n \
app = application name \n \
filename = CSV file name'''
# Load CSV File
f = open(filename,'r')
aline = f.read()
f.close()
app = app.lower()
app.replace(" ","")
response = cmd(server,app,'csv '+aline)
return
def get_ip(server):
'''Get current IP address \n \
server = address of server'''
# get ip address for web-address lookup
url_base = server.strip() + '/ip.php'
f = urllib.request.urlopen(url_base)
fip = f.read()
ip = fip.decode().strip()
return ip
def apm_t0(server,app,mode):
'''Retrieve restart file \n \
server = address of server \n \
app = application name \n \
mode = {'ss','mpu','rto','sim','est','ctl'} '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = server.strip() + '/online/' + ip + '_' + app + '/' + mode.strip() + '.t0'
f = urllib.request.urlopen(url)
# Send request to web-server
solution = f.read()
return solution
def get_solution(server,app):
'''Retrieve solution results\n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = server.strip() + '/online/' + ip + '_' + app + '/results.csv'
f = urllib.request.urlopen(url)
# Send request to web-server
solution = f.read()
# Write the file
sol_file = 'solution_' + app + '.csv'
fh = open(sol_file,'w')
# possible problem here if file isn't able to open (see MATLAB equivalent)
en_solution = solution.decode().replace('\r','')
fh.write(en_solution)
fh.close()
# Use array package
from array import array
# Import CSV file from web server
with closing(urllib.request.urlopen(url)) as f:
fr = f.read()
de_f = fr.decode()
reader = csv.reader(de_f.splitlines(), delimiter=',')
y={}
for row in reader:
if len(row)==2:
y[row[0]] = float(row[1])
else:
y[row[0]] = array('f', [float(col) for col in row[1:]])
# Return solution
return y
def get_file(server,app,filename):
'''Retrieve any file from web-server\n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = server.strip() + '/online/' + ip + '_' + app + '/' + filename
f = urllib.request.urlopen(url)
# Send request to web-server
file = f.read()
# Write the file
fh = open(filename,'w')
en_file = file.decode().replace('\r','')
fh.write(en_file)
fh.close()
return (file)
def set_option(server,app,name,value):
'''Load APM option \n \
server = address of server \n \
app = application name \n \
name = {FV,MV,SV,CV}.option \n \
value = numeric value of option '''
aline = 'option %s = %f' %(name,value)
app = app.lower()
app.replace(" ","")
response = cmd(server,app,aline)
return response
def web(server,app):
'''Open APM web viewer in local browser \n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = server.strip() + '/online/' + ip + '_' + app + '/' + ip + '_' + app + '_oper.htm'
webbrowser.get().open_new_tab(url)
return url
def web_var(server,app):
'''Open APM web viewer in local browser \n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = server.strip() + '/online/' + ip + '_' + app + '/' + ip + '_' + app + '_var.htm'
webbrowser.get().open_new_tab(url)
return url
def web_root(server,app):
'''Open APM root folder \n \
server = address of server \n \
app = application name '''
# Retrieve IP address
ip = get_ip(server)
# Web-server URL address
app = app.lower()
app.replace(" ","")
url = server.strip() + '/online/' + ip + '_' + app + '/'
webbrowser.get().open_new_tab(url)
return url
def classify(server,app,type,aline):
'''Classify parameter or variable as FV, MV, SV, or CV \n \
server = address of server \n \
app = application name \n \
type = {FV,MV,SV,CV} \n \
aline = parameter or variable name '''
x = 'info' + ' ' + type + ', ' + aline
app = app.lower()
app.replace(" ","")
response = cmd(server,app,x)
return response
def csv_data(filename):
'''Load CSV File into Python
A = csv_data(filename)
Function csv_data extracts data from a comma
separated value (csv) file and returns it
to the array A'''
try:
f = open(filename, 'rb')
reader = csv.reader(f)
headers = next(reader)
c = [float] * (len(headers))
A = {}
for h in headers:
A[h] = []
for row in reader:
for h, v, conv in zip(headers, row, c):
A[h].append(conv(v))
except ValueError:
A = {}
return A
def csv_lookup(name,replay):
'''Lookup Index of CSV Column \n \
name = parameter or variable name \n \
replay = csv replay data to search'''
header = replay[0]
try:
i = header.index(name.strip())
except ValueError:
i = -1 # no match
return i
def csv_element(name,row,replay):
'''Retrieve CSV Element \n \
name = parameter or variable name \n \
row = row of csv file \n \
replay = csv replay data to search'''
# get row number
if (row>len(replay)): row = len(replay)-1
# get column number
col = csv_lookup(name,replay)
if (col>=0): value = float(replay[row][col])
else: value = float('nan')
return value
def get_attribute(server,app,name):
'''Retrieve options for FV, MV, SV, or CV \n \
server = address of server \n \
app = application name \n \
name = {FV,MV,SV,CV}.{MEAS,MODEL,NEWVAL} \n \n \
Valid name combinations \n \
{FV,MV,CV}.MEAS \n \
{SV,CV}.MODEL \n \
{FV,MV}.NEWVAL '''
# Web-server URL address
url_base = server.strip() + '/online/get_tag.php'
app = app.lower()
app.replace(" ","")
params = urllib.parse.urlencode({'p':app,'n':name})
params_en = params.encode()
f = urllib.request.urlopen(url_base,params_en)
# Send request to web-server
value = eval(f.read())
return value
def load_meas(server,app,name,value):
'''Transfer measurement to server for FV, MV, or CV \n \
server = address of server \n \
app = application name \n \
name = name of {FV,MV,CV} '''
# Web-server URL address
url_base = server.strip() + '/online/meas.php'
app = app.lower()
app.replace(" ","")
params = urllib.parse.urlencode({'p':app,'n':name+'.MEAS','v':value})
params_en = params.encode()
f = urllib.request.urlopen(url_base,params_en)
# Send request to web-server
response = f.read()
return response
def solve(app,imode):
'''
APM Solver for simulation, estimation, and optimization with both
static (steady-state) and dynamic models. The dynamic modes can solve
index 2+ DAEs without numerical differentiation.
y = solve(app,imode)
Function solve uploads the model file (apm) and optionally
a data file (csv) with the same name to the web-server and performs
a forward-time stepping integration of ODE or DAE equations
with the following arguments:
Input: app = model (apm) and data file (csv) name
imode = simulation mode {1..7}
steady-state dynamic sequential
simulate 1 4 7
estimate 2 5 8 (under dev)
optimize 3 6 9 (under dev)
Output: y.names = names of all variables
y.values = tables of values corresponding to y.names
y.nvar = number of variables
y.x = combined variables and values but variable
names may be modified to make them valid
characters (e.g. replace '[' with '')
'''
# server and application file names
server = 'http://byu.apmonitor.com'
app = app.lower()
app.replace(" ","")
app_model = app + '.apm'
app_data = app + '.csv'
# randomize the application name
from random import randint
app = app + '_' + str(randint(1000,9999))
# clear previous application
cmd(server,app,'clear all')
try:
# load model file
load_model(server,app,app_model)
except:
msg = 'Model file ' + app + '.apm does not exist'
print(msg)
return []
# check if data file exists (optional)
try:
# load data file
load_data(server,app,app_data)
except:
# data file is optional
print('Optional data file ' + app + '.csv does not exist')
pass
# default options
# use or don't use web viewer
web = False
if web:
set_option(server,app,'nlc.web',2)
else:
set_option(server,app,'nlc.web',0)
# internal nodes in the collocation (between 2 and 6)
set_option(server,app,'nlc.nodes',3)
# sensitivity analysis (default: 0 - off)
set_option(server,app,'nlc.sensitivity',0)
# simulation mode (1=ss, 2=mpu, 3=rto)
# (4=sim, 5=est, 6=nlc, 7=sqs)
set_option(server,app,'nlc.imode',imode)
# attempt solution
solver_output = cmd(server,app,'solve')
# check for successful solution
status = get_attribute(server,app,'nlc.appstatus')
if status==1:
# open web viewer if selected
if web:
web(server,app)
# retrieve solution and solution.csv
z = get_solution(server,app)
return z
else:
print(solver_output)
print('Error: Did not converge to a solution')
return []
def plotter(y, subplots=1, save=False, filename='solution', format='png'):
'''
The plotter will go through each of the variables in the output y and
create plots for them. The number of vertical subplots can be
specified and the plots can be saved in the same folder.
This functionality is dependant on matplotlib, so this library must
be installed on the computer for the automatic plotter to work.
The input y should be the output from the apm solution. This can be
retrieved from the server using the following line of code:
y = get_solution(server, app)
'''
try:
import matplotlib.pyplot as plt
var_size = len(y)
colors = ['r-', 'g-', 'k-', 'b-']
color_pick = 0
if subplots > 9:
subplots = 9
j = 1
pltcount = 0
start = True
for i in range(var_size):
if list(y)[i] != 'time' and list(y)[i][:3] != 'slk':
if j == 1:
if start != True:
plt.xlabel('time')
start = False
if save:
if pltcount != 0:
plt.savefig(filename + str(pltcount) + '.' + format, format=format)
pltcount += 1
plt.figure()
else:
plt.gca().axes.get_xaxis().set_ticklabels([])
plt.subplot(100*subplots+10+j)
plt.plot(y['time'], y[list(y)[i]], colors[color_pick], linewidth=2.0)
if color_pick == 3:
color_pick = 0
else:
color_pick += 1
plt.ylabel(list(y)[i])
if subplots == 1:
plt.title(list(y)[i])
if j == subplots or i+2 == var_size:
j = 1
else:
j += 1
plt.xlabel('time')
if save:
plt.savefig('plots/' + filename + str(pltcount) + '.' + format, format=format)
if pltcount <= 20:
plt.show()
except ImportError:
print('Dependent Packages not imported.')
print('Please install matplotlib package to use plotting features.')
except:
print('Graphs not created. Double check that the')
print('simulation/optimization was succesfull')
# This code adds back compatibility with previous versions
apm = cmd
apm_load = load_model
csv_load = load_data
apm_ip = get_ip
apm_sol = get_solution
apm_get = get_file
apm_option = set_option
apm_web = web
apm_web_var = web_var
apm_web_root = web_root
apm_info = classify
apm_tag = get_attribute
apm_meas = load_meas
apm_solve = solve
| apache-2.0 |
mixturemodel-flow/tensorflow | tensorflow/contrib/factorization/python/ops/gmm.py | 30 | 7114 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering using tf.Learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.contrib import framework
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import logging_ops as logging
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
from tensorflow.python.training import session_run_hook
def _streaming_sum(scalar_tensor):
"""Create a sum metric and update op."""
sum_metric = framework.local_variable(constant_op.constant(0.0))
sum_update = sum_metric.assign_add(scalar_tensor)
return sum_metric, sum_update
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes clusters or waits for cluster initialization."""
def __init__(self, init_op, is_initialized_op, is_chief):
self._init_op = init_op
self._is_chief = is_chief
self._is_initialized_op = is_initialized_op
def after_create_session(self, session, _):
assert self._init_op.graph == ops.get_default_graph()
assert self._is_initialized_op.graph == self._init_op.graph
while True:
try:
if session.run(self._is_initialized_op):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
class GMM(estimator.Estimator):
"""An estimator for GMM clustering."""
SCORES = 'scores'
ASSIGNMENTS = 'assignments'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
config=None):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
config: See Estimator
"""
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
super(GMM, self).__init__(
model_fn=self._model_builder(), model_dir=model_dir, config=config)
def predict_assignments(self, input_fn=None, batch_size=None, outputs=None):
"""See BaseEstimator.predict."""
results = self.predict(input_fn=input_fn,
batch_size=batch_size,
outputs=outputs)
for result in results:
yield result[GMM.ASSIGNMENTS]
def score(self, input_fn=None, batch_size=None, steps=None):
"""Predict total sum of distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative of the sum of distances.
Args:
input_fn: see predict.
batch_size: see predict.
steps: see predict.
Returns:
Total sum of distances to nearest clusters.
"""
results = self.evaluate(input_fn=input_fn, batch_size=batch_size,
steps=steps)
return np.sum(results[GMM.SCORES])
def weights(self):
"""Returns the cluster weights."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_WEIGHT)
def clusters(self):
"""Returns cluster centers."""
clusters = checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat([features[k] for k in sorted(features.keys())],
1)
return features
def _model_builder(self):
"""Creates a model function."""
def _model_fn(features, labels, mode, config):
"""Model function."""
assert labels is None, labels
(all_scores,
model_predictions,
losses, training_op,
init_op,
is_initialized) = gmm_ops.gmm(self._parse_tensor_or_dict(features),
self._training_initial_clusters,
self._num_clusters, self._random_seed,
self._covariance_type,
self._params)
incr_step = state_ops.assign_add(variables.get_global_step(), 1)
loss = math_ops.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], loss)
training_hooks = [_InitializeClustersHook(
init_op, is_initialized, config.is_chief)]
predictions = {
GMM.ALL_SCORES: all_scores[0],
GMM.ASSIGNMENTS: model_predictions[0][0],
}
eval_metric_ops = {
GMM.SCORES: _streaming_sum(loss),
}
return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss, train_op=training_op,
training_hooks=training_hooks)
return _model_fn
| apache-2.0 |
chatcannon/numpy | numpy/core/code_generators/ufunc_docstrings.py | 2 | 93777 | """
Docstrings for generated ufuncs
The syntax is designed to look like the function add_newdoc is being
called from numpy.lib, but in this file add_newdoc puts the docstrings
in a dictionary. This dictionary is used in
numpy/core/code_generators/generate_umath.py to generate the docstrings
for the ufuncs in numpy.core at the C level when the ufuncs are created
at compile time.
"""
from __future__ import division, absolute_import, print_function
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(start=-10, stop=10, num=101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10], cmap='gray')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
add : ndarray or scalar
The sum of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
out : ndarray, optional
Array of the same shape as `a`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi]. If `x` is a scalar then a
scalar is returned, otherwise an array of the same shape as `x`
is returned.
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array of the same shape as `x`, to store results in.
See `doc.ufuncs` (Section "Output arguments") for details.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
out : ndarray, optional
Array of the same shape as `x`, in which to store the results.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``. If `x` is a scalar, a scalar
is returned, otherwise an array.
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : ndarray
Array of of the same shape as `x`.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
Input values. `arctan` is applied to each element of `x`.
Returns
-------
out : ndarray
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
It is a scalar if `x` is a scalar.
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Array of the same shape as `x`.
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return
the `z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function
that has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
Result.
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32),
... np.array([4, 4, 4, 2147483647L], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine element-wise.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding cosine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Output array of same shape as `x`.
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The corresponding angle in degrees.
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'heaviside',
"""
Compute the Heaviside step function.
The Heaviside step function is defined as::
0 if x < 0
heaviside(x, h0) = h0 if x == 0
1 if x > 0
where `h0` is often taken to be 0.5, but 0 and 1 are also sometimes used.
Parameters
----------
x : array_like
Input values.
h0 : array_like
The value of the function at x = 0.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : ndarray
The output array, element-wise Heaviside step function of `x`.
Notes
-----
.. versionadded:: 1.13.0
References
----------
.. Wikipedia, "Heaviside step function",
https://en.wikipedia.org/wiki/Heaviside_step_function
Examples
--------
>>> np.heaviside([-1.5, 0, 2.0], 0.5)
array([ 0. , 0.5, 1. ])
>>> np.heaviside([-1.5, 0, 2.0], 1)
array([ 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray or scalar
The quotient ``x1/x2``, element-wise. Returns a scalar if
both ``x1`` and ``x2`` are scalars.
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and
division by zero.
Notes
-----
Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting.
Behavior on division by zero can be changed using ``seterr``.
In Python 2, when both ``x1`` and ``x2`` are of an integer type,
``divide`` will behave like ``floor_divide``. In Python 3, it behaves
like ``true_divide``.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types (Python 2 only):
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic (again,
Python 2 only), and does not raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using ``seterr``:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False], dtype=bool)
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Output array, element-wise exponential of `x`.
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with
magnitude 1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
http://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='gray')
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi], cmap='hsv')
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array to insert results into.
Returns
-------
out : ndarray
Element-wise 2 to the power `x`.
See Also
--------
power
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Element-wise exponential minus one: ``out = exp(x) - 1``.
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray or scalar
The absolute values of `x`, the returned values are always floats.
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : ndarray or scalar
The floor of each element in `x`.
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy instead uses the definition of
`floor` where `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
It is equivalent to the Python ``//`` operator and pairs with the
Python ``%`` (`remainder`), function so that ``b = a % b + b * (a // b)``
up to roundoff.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
See Also
--------
remainder : Remainder complementary to floor_divide.
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the C library function fmod, the
remainder has the same sign as the dividend `x1`. It is equivalent to
the Matlab(TM) ``rem`` function and should not be confused with the
Python modulus operator ``x1 % x2``.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
See Also
--------
remainder : Equivalent to the Python ``%`` operator.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors
is bound by conventions. For `fmod`, the sign of result is the sign of
the dividend, while for `remainder` the sign of the result is the sign
of the divisor. The `fmod` function is equivalent to the Matlab(TM)
``rem`` function.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False], dtype=bool)
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned. In a
two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit
two's-complement system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x1 : array_like
Only integer and boolean types are handled.
Returns
-------
out : array_like
Result.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> np.invert(np.array([13], dtype=uint8))
array([242], dtype=uint8)
>>> np.binary_repr(x, width=8)
'00001101'
>>> np.binary_repr(242, width=8)
'11110010'
The result depends on the bit-width:
>>> np.invert(np.array([13], dtype=uint16))
array([65522], dtype=uint16)
>>> np.binary_repr(x, width=16)
'0000000000001101'
>>> np.binary_repr(65522, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(array([True, False]))
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finiteness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
y : ndarray, bool
For scalar input, the result is a new boolean with value True
if the input is finite; otherwise the value is False (input is
either positive infinity, negative infinity or Not a Number).
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the
corresponding element of the input is finite; otherwise the values
are False (element is either positive infinity, negative infinity
or Not a Number).
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity. Errors result if the
second argument is also supplied when `x` is a scalar input, or if
first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Returns a boolean array of the same shape as `x`, True where ``x ==
+/-inf``, otherwise False.
Parameters
----------
x : array_like
Input values
out : array_like, optional
An array with the same shape as `x` to store the result.
Returns
-------
y : bool (scalar) or boolean ndarray
For scalar input, the result is a new boolean with value True if
the input is positive or negative infinity; otherwise the value is
False.
For array input, the result is a boolean array with the same shape
as the input and the values are True where the corresponding
element of the input is positive or negative infinity; elsewhere
the values are False. If a second argument was supplied the result
is stored there. If the type of that array is a numeric type the
result is represented as zeros and ones, if the type is boolean
then as False and True, respectively. The return value `y` is then
a reference to that array.
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray or bool
For scalar input, the result is a new boolean with value True if
the input is NaN; otherwise the value is False.
For array input, the result is a boolean array of the same
dimensions as the input and the values are True if the
corresponding element of the input is NaN; otherwise the values are
False.
See Also
--------
isinf, isneginf, isposinf, isfinite
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 =< x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it.
`log10` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., NaN])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base 2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small as
to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
out : ndarray, optional
Array to store results in.
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it.
`log1p` handles the floating-point negative zero as an infinitesimal
negative number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. `x1` and `x2` must be of the same shape.
Returns
-------
y : ndarray or bool
Boolean result with the same shape as `x1` and `x2` of the logical
AND operation on corresponding elements of `x1` and `x2`.
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : ndarray or bool
Boolean result with the same shape as `x1` and `x2` of the logical
OR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`. They must
be broadcastable to the same shape.
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by whether or not
broadcasting of one or both arrays was required.
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
minimum :
Element-wise minimum of two arrays, propagates NaNs.
fmax :
Element-wise maximum of two arrays, ignores NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
fmin, amin, nanmin
Notes
-----
The maximum is equivalent to ``np.where(x1 >= x2, x1, x2)`` when
neither x1 nor x2 are nans, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then that
element is returned. If both elements are NaNs then the first is
returned. The latter distinction is important for complex NaNs, which
are defined as at least one of the real or imaginary parts being a NaN.
The net effect is that NaNs are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
maximum :
Element-wise maximum of two arrays, propagates NaNs.
fmin :
Element-wise minimum of two arrays, ignores NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
fmax, amax, nanmax
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when
neither x1 nor x2 are NaNs, but it is faster and does proper
broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.minimum(-np.Inf, 1)
-inf
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : ndarray or scalar
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmin :
Element-wise minimum of two arrays, ignores NaNs.
maximum :
Element-wise maximum of two arrays, propagates NaNs.
amax :
The maximum value of an array along a given axis, propagates NaNs.
nanmax :
The maximum value of an array along a given axis, ignores NaNs.
minimum, amin, nanmin
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a NaN, then the
non-nan element is returned. If both elements are NaNs then the first
is returned. The latter distinction is important for complex NaNs,
which are defined as at least one of the real or imaginary parts being
a NaN. The net effect is that NaNs are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : ndarray or scalar
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmax :
Element-wise maximum of two arrays, ignores NaNs.
minimum :
Element-wise minimum of two arrays, propagates NaNs.
amin :
The minimum value of an array along a given axis, propagates NaNs.
nanmin :
The minimum value of an array along a given axis, ignores NaNs.
maximum, amax, nanmax
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are NaNs, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
Returns
-------
y1 : ndarray
Fractional part of `x`.
y2 : ndarray
Integral part of `x`.
Notes
-----
For integer input the return values are floats.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Numerical negative, element-wise.
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
out : ndarray, optional
A placeholder the same shape as `x1` to store the result.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
not_equal : ndarray bool, scalar bool
For each element in `x1, x2`, return True if `x1` is not equal
to `x2` and False otherwise.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True], dtype=bool)
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', '_ones_like',
"""
This function used to be the numpy.ones_like, but now a specific
function for that has been written for consistency with the other
*_like functions. It is only used internally in a limited fashion now.
See Also
--------
ones_like
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape. Note that an
integer type raised to a negative integer power will raise a ValueError.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
See Also
--------
float_power : power function that promotes integers to float
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'float_power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in `x2`.
`x1` and `x2` must be broadcastable to the same shape. This differs from
the power function in that integers, float16, and float32 are promoted to
floats with a minimum precision of float64 so that the result is always
inexact. The intent is that the function will return a usable result for
negative powers and seldom overflow for positive powers.
.. versionadded:: 1.12.0
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
See Also
--------
power : power function that preserves type
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.float_power(x1, 3)
array([ 0., 1., 8., 27., 64., 125.])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.float_power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.float_power(x1, x2)
array([[ 0., 1., 8., 27., 16., 5.],
[ 0., 1., 8., 27., 16., 5.]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding radian values.
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
Returns
-------
y : ndarray
The corresponding angle in radians.
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray
Return array.
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes the remainder complementary to the `floor_divide` function. It is
equivalent to the Python modulus operator``x1 % x2`` and has the same sign
as the divisor `x2`. It should not be confused with the Matlab(TM) ``rem``
function.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The element-wise remainder of the quotient ``floor_divide(x1, x2)``.
Returns a scalar if both `x1` and `x2` are scalars.
See Also
--------
floor_divide : Equivalent of Python ``//`` operator.
fmod : Equivalent of the Matlab(TM) ``rem`` function.
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of)
integers.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right `x2`. Because the internal
representation of numbers is in binary format, this operation is
equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray or scalar
Output array is same shape and type as `x`.
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. nan
is returned for nan inputs.
For complex inputs, the `sign` function returns
``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``.
complex(nan, 0) is returned for complex nan inputs.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The sign of `x`.
Notes
-----
There is more than one definition of sign in common use for complex
numbers. The definition used here is equivalent to :math:`x/\\sqrt{x*x}`
which is different from a common alternative, :math:`x/|x|`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
>>> np.sign(5-2j)
(1+0j)
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x : array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If both arguments are arrays or sequences, they have to be of the same
length. If `x2` is a scalar, its sign will be copied to all elements of
`x1`.
Parameters
----------
x1 : array_like
Values to change the sign of.
x2 : array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
The values of `x1` with the sign of `x2`.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next floating-point value after x1 towards x2, element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : array_like
The next representable values of `x1` in the direction of `x2`.
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x1 : array_like
Values to find the spacing of.
Returns
-------
out : array_like
The spacing of values of `x1`.
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and NaN is NaN.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
Returns
-------
y : array_like
The sine of each element of x.
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry (the
mathematical study of triangles). Consider a circle of radius 1
centered on the origin. A ray comes in from the :math:`+x` axis, makes
an angle at the origin (measured counter-clockwise from that axis), and
departs from the origin. The :math:`y` coordinate of the outgoing
ray's intersection with the unit circle is the sine of that angle. It
ranges from -1 for :math:`x=3\\pi / 2` to +1 for :math:`\\pi / 2.` The
function has zeroes where the angle is a multiple of :math:`\\pi`.
Sines of angles between :math:`\\pi` and :math:`2\\pi` are negative.
The numerous properties of the sine and related functions are included
in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the positive square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, numpy.inf])
array([ 2., NaN, Inf])
""")
add_newdoc('numpy.core.umath', 'cbrt',
"""
Return the cube-root of an array, element-wise.
.. versionadded:: 1.10.0
Parameters
----------
x : array_like
The values whose cube-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the cube
cube-root of each element in `x`.
If `out` was provided, `y` is a reference to it.
Examples
--------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray
Element-wise `x*x`, of the same shape and dtype as `x`.
Returns scalar if `x` is a scalar.
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or ``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
http://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
Returns
-------
out : ndarray
Result is scalar if both inputs are scalar, ndarray otherwise.
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making
``//`` and ``/`` equivalent operators. The default floor division
operation of ``/`` can be replaced by true division with ``from
__future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([0, 0, 0, 0, 1])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
add_newdoc('numpy.core.umath', 'frexp',
"""
Decompose the elements of x into mantissa and twos exponent.
Returns (`mantissa`, `exponent`), where `x = mantissa * 2**exponent``.
The mantissa is lies in the open interval(-1, 1), while the twos
exponent is a signed integer.
Parameters
----------
x : array_like
Array of numbers to be decomposed.
out1 : ndarray, optional
Output array for the mantissa. Must have the same shape as `x`.
out2 : ndarray, optional
Output array for the exponent. Must have the same shape as `x`.
Returns
-------
(mantissa, exponent) : tuple of ndarrays, (float, int)
`mantissa` is a float array with values between -1 and 1.
`exponent` is an int array which represents the exponent of 2.
See Also
--------
ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Examples
--------
>>> x = np.arange(9)
>>> y1, y2 = np.frexp(x)
>>> y1
array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875,
0.5 ])
>>> y2
array([0, 1, 2, 2, 3, 3, 3, 3, 4])
>>> y1 * 2**y2
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])
""")
add_newdoc('numpy.core.umath', 'ldexp',
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : array_like
Array of multipliers.
x2 : array_like, int
Array of twos exponents.
out : ndarray, optional
Output array for the result.
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
See Also
--------
frexp : Return (y1, y2) from ``x = y1 * 2**y2``, inverse to `ldexp`.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.], dtype=float32)
>>> x = np.arange(6)
>>> np.ldexp(*np.frexp(x))
array([ 0., 1., 2., 3., 4., 5.])
""")
| bsd-3-clause |
bsipocz/seaborn | setup.py | 22 | 3623 | #! /usr/bin/env python
#
# Copyright (C) 2012-2014 Michael Waskom <[email protected]>
import os
# temporarily redirect config directory to prevent matplotlib importing
# testing that for writeable directory which results in sandbox error in
# certain easy_install versions
os.environ["MPLCONFIGDIR"] = "."
DESCRIPTION = "Seaborn: statistical data visualization"
LONG_DESCRIPTION = """\
Seaborn is a library for making attractive and informative statistical graphics in Python. It is built on top of matplotlib and tightly integrated with the PyData stack, including support for numpy and pandas data structures and statistical routines from scipy and statsmodels.
Some of the features that seaborn offers are
- Several built-in themes that improve on the default matplotlib aesthetics
- Tools for choosing color palettes to make beautiful plots that reveal patterns in your data
- Functions for visualizing univariate and bivariate distributions or for comparing them between subsets of data
- Tools that fit and visualize linear regression models for different kinds of independent and dependent variables
- Functions that visualize matrices of data and use clustering algorithms to discover structure in those matrices
- A function to plot statistical timeseries data with flexible estimation and representation of uncertainty around the estimate
- High-level abstractions for structuring grids of plots that let you easily build complex visualizations
"""
DISTNAME = 'seaborn'
MAINTAINER = 'Michael Waskom'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://stanford.edu/~mwaskom/software/seaborn/'
LICENSE = 'BSD (3-clause)'
DOWNLOAD_URL = 'https://github.com/mwaskom/seaborn/'
VERSION = '0.7.0.dev'
try:
from setuptools import setup
_has_setuptools = True
except ImportError:
from distutils.core import setup
def check_dependencies():
install_requires = []
# Just make sure dependencies exist, I haven't rigorously
# tested what the minimal versions that will work are
# (help on that would be awesome)
try:
import numpy
except ImportError:
install_requires.append('numpy')
try:
import scipy
except ImportError:
install_requires.append('scipy')
try:
import matplotlib
except ImportError:
install_requires.append('matplotlib')
try:
import pandas
except ImportError:
install_requires.append('pandas')
return install_requires
if __name__ == "__main__":
install_requires = check_dependencies()
setup(name=DISTNAME,
author=MAINTAINER,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
install_requires=install_requires,
packages=['seaborn', 'seaborn.external', 'seaborn.tests'],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: BSD License',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Multimedia :: Graphics',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'],
)
| bsd-3-clause |
droundy/deft | papers/fuzzy-fmt/plot-phasediagram.py | 1 | 13966 | #!/usr/bin/python3
#This program produces temperature vs density, and pressure vs temperature phase diagrams
#from data stored in *best.dat (or *best_tensor.dat) data files generated by figs/new-melting.cpp
#and found in deft/papers/fuzzy-fmt/data/phase-diagram (edit later - currently files in newdata/phase-diagram and newdata_tensor/phasediagram)
#NOTE: Run this plot script from directory deft/papers/fuzzy-fmt
#with comand ./plot-phasediagram.py [Optional: --tensor]
from __future__ import print_function, division
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import os, glob
import argparse
import sys
parser = argparse.ArgumentParser("Plots phase diagrams p vs T and T vs n. Plots p-vs-T, p-vs-n, and T-vs-n.")
parser.add_argument('--tensor', action='store_true',
help='use tensor weight')
args=parser.parse_args()
p_at_freezing = [] #pressure at freezing (intersection point between homogeneous and crystal plots)
n_homogeneous_at_freezing =[]
n_crystal_at_freezing = []
kT_homogeneous_at_freezing = []
kT_crystal_at_freezing = []
kT_in_plot = []
kT_data = []
density_data = [] #index corresponds to kT
pressure_data = [] #index corresponds to kT
#for kT in np.arange(0.1, 1.15, 0.05): #data files with these temperatures will be plotted
#for kT in np.arange(0.1, 2.05, 0.05): #original
#for kT in np.arange(0.4, 2.05, 0.05): # new normal
#for kT in (1, 2, 4, 6, 8, 10, 12, 14, 16, 18):
#for kT in (0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2): #for paper
for kT in (1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25): #use for kT from 1 to 38 and 0.5
#for kT in (2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38): #use for kT from 1 to 38 and 0.5
#for kT in (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 38): #use for kT from 1 to 38 and 0.5
#for kT in np.arange(0.1, 1.05, 0.05): #data files with these temperatures will be plotted DEBUG
#values above and below this range do not currrently work DEBUG
n = []
invn = []
hfe = []
cfe = []
if args.tensor :
#files = sorted(list(glob.glob('data/phase-diagram/kT%.3f_n*_best_tensor.dat' % kT)))
files = sorted(list(glob.glob('newdata_tensor/phase-diagram/kT%.3f_n*_best_tensor.dat' % kT))) #remove 2 at the end of phase-diagram when done comparing new data
else :
files = sorted(list(glob.glob('newdata/phase-diagram/kT%.3f_n*_best.dat' % kT))) #remove 2 at the end of phase-diagram when done comparing new data
#files = sorted(list(glob.glob('data/phase-diagram/kT%.3f_n*_best.dat' % kT)))
#files = sorted(list(glob.glob('crystallization/kT%.3f_n*_best.dat' % kT)))
if len(files) == 0:
continue
for f in files:
data = np.loadtxt(f)
n.append(data[1]) #density
invn.append(1/data[1])
hfe.append(data[4]) #homogeneous free energy/atom
cfe.append(data[5]) #crystal free energy/atom
hfe = np.array(hfe)
cfe = np.array(cfe)
invn = np.array(invn)
n = np.array(n)
functions = np.vstack((np.ones_like(invn),
invn**-1,
invn**-2,
invn**-3,
invn**-4,
invn**-5,
invn**-6)).T
pressure_functions = np.vstack((np.zeros_like(invn),
invn**-2,
2*invn**-3,
3*invn**-4,
4*invn**-5,
5*invn**-6,
6*invn**-7)).T
A = np.linalg.lstsq(functions, cfe)
coeff = A[0]
#print('residuals', A[1])
#print('coeff', coeff)
fit_cfe = np.dot(functions, coeff)
dhfe=np.diff(hfe) #Caution: depends on order of data files!
dcfe=np.diff(cfe) #Caution: depends on order of data files!
dinvn=np.diff(invn) #Caution: depends on order of data files!
mid_invn=invn[0:len(invn)-1]+dinvn/2
hpressure = -(dhfe/dinvn) #for fixed N and Te
cpressure = -(dcfe/dinvn) #for fixed N and Te
fit_p = np.dot(pressure_functions, coeff)
mid_hfe = 0.5*(hfe[1:] + hfe[:-1])
mid_cfe = 0.5*(cfe[1:] + cfe[:-1])
mid_h_gibbs = mid_hfe + mid_invn*hpressure
mid_c_gibbs = mid_cfe + mid_invn*cpressure
fit_c_gibbs = fit_cfe + invn*fit_p
#Find pressure at point of intersection
def find_first_intersection(p1, g1, p2, g2):
for i in range(1,len(g1)-1):
m1=(g1[i+1]-g1[i])/(p1[i+1]-p1[i])
for j in range(1,len(g2)-1):
m2=(g2[j+1]-g2[j])/(p2[j+1]-p2[j])
#print(m2) #debug ASK!
if m1!=m2 :
P_inter=(g2[j] - m2*p2[j] -g1[i] + m1*p1[i])/(m1-m2)
if p1[i] < P_inter < p1[i+1] and p2[j] < P_inter < p2[j+1]:
g_inter=m1*P_inter+g1[i]-m1*p1[i]
if g1[i] < g_inter < g1[i+1] and g2[j] < g_inter < g2[j+1] :
return P_inter, g_inter
p_inter, g_inter = find_first_intersection(hpressure, mid_h_gibbs, cpressure, mid_c_gibbs)
pf_inter, gf_inter = find_first_intersection(hpressure, mid_h_gibbs, fit_p, fit_c_gibbs)
#Find homogeneous and crystal densities at p_inter
def find_densities(p_inter, pressure, invn):
for i in range(1,len(pressure)-1):
if pressure[i] > p_inter :
pressureabove=pressure[i]
invnabove=invn[i]
pressurebelow=pressure[i-1]
invnbelow=invn[i-1]
m=(pressureabove-pressurebelow)/(invnabove-invnbelow)
invn_inter=invnabove-((pressureabove-p_inter)/m)
return invn_inter
invnh=find_densities(p_inter, hpressure, mid_invn)
invnc=find_densities(p_inter, cpressure, mid_invn)
p_at_freezing.append(p_inter)
n_homogeneous_at_freezing.append(1/invnh)
n_crystal_at_freezing.append(1/invnc)
# compute the actual physical pressure as a function of density, and skip over coexistence
actual_pressure = []
actual_density = []
for i in range(len(mid_invn)):
if hpressure[i] >= p_inter:
break # if the pressure is too high, then we should just stop, since we have left the fluid
actual_pressure.append(hpressure[i])
actual_density.append(1/mid_invn[i])
actual_pressure.append(p_inter)
actual_density.append(1/invnh)
actual_pressure.append(p_inter)
actual_density.append(1/invnc)
for i in range(len(mid_invn)):
if cpressure[i] < 0 and mid_invn[i] <= invnc:
break # when the pressure is negative, we know we are in the crazy part where our dft fails.
if cpressure[i] > p_inter:
actual_pressure.append(cpressure[i])
actual_density.append(1/mid_invn[i])
actual_pressure = np.array(actual_pressure)
actual_density = np.array(actual_density)
#print (kT, p_inter, 1/invnh, 1/invnc) #Use >> phase_diagram_data.dat (or phase_diagram_data-tensor.dat) to store data for reference
kT_data.append(kT) #holds all values of kT in a list
density_data.append(actual_density)
pressure_data.append(actual_pressure)
n_homogeneous_at_freezing = np.array(n_homogeneous_at_freezing)
n_crystal_at_freezing = np.array(n_crystal_at_freezing)
p_at_freezing = np.array(p_at_freezing)
plt.figure('T-vs-n at fixed P')
plt.fill_betweenx(kT_data, n_homogeneous_at_freezing, n_crystal_at_freezing, color='#eeeeee')
#Plot T vs n at constant P
#for p in [2,5,10,20]: #paper
for p in [20, 60, 100, 200, 600, 1000, 2000, 4000, 6000]: #use for kT from 1 to 38 and 0.5
n_mid_at_p_list = []
kT_at_p_list = []
for i in range(0, len(kT_data)) : #number of temperatures kT
for j in range(0, len(density_data[i])-1) : #number of elements of n at some kT
if pressure_data[i][j] < p < pressure_data[i][j+1] :
phi = pressure_data[i][j+1]
plo = pressure_data[i][j]
nhi = density_data[i][j+1]
nlo = density_data[i][j]
n_mid_at_p_list.append((nlo*(phi - p) + nhi*(p - plo))/(phi - plo))
kT_at_p_list.append(kT_data[i])
plt.plot(n_mid_at_p_list, kT_at_p_list, '.-', label= 'P=%g' % p)
plt.title("Temperature vs Number Density at fixed Pressure")
plt.legend(loc='best')
plt.xlabel('Number Density')
plt.ylabel('Temperature')
# - OR - uncomment the plot you want
#Plot n vs T at constant P
#plt.plot(kT_at_p_list, n_mid_at_p_list, '.-', label= 'P=%g' % p)
#plt.title("Number Density vs Temperature at fixed Pressure")
#plt.legend(loc='best')
#plt.ylabel('Number Density')
#plt.xlabel('Temperature')
plt.figure('p-vs-n at fixed T')
plt.fill_betweenx(p_at_freezing, n_homogeneous_at_freezing, n_crystal_at_freezing, color='#eeeeee')
for i in range(len(kT_data)):
if kT_data[i] in [0.1, 0.2, 0.5, 1.0] or True:
#Plot P vs n at constant kT
plt.plot(density_data[i], pressure_data[i], label= 'kT=%g' % kT_data[i])
plt.title("Pressure vs Number Density at kT")
plt.legend(loc='best')
#plt.ylim(0, 26)
#plt.ylim(0, 500)
#plt.ylim(0, 45) #paper
plt.ylim(0, 6000) #use for kT from 1 to 38 and 0.5
#plt.xlim(0, 1.1)
#plt.xlim(0, 1.8)
#plt.xlim(0, 1.1) #paper
plt.xlim(0, 2.2) #use for kT from 1 to 38 and 0.5
plt.xlabel('Number Density')
plt.ylabel('Pressure')
plt.figure('p-vs-V at fixed T')
#Plot P vs 1/n (or V) at constant kT
plt.fill_betweenx(p_at_freezing, 1/n_homogeneous_at_freezing, 1/n_crystal_at_freezing, color='#eeeeee')
for i in range(len(kT_data)):
if kT_data[i] in [0.1, 0.2, 0.5, 1.0] or True:
plt.plot(1/density_data[i], pressure_data[i], label= 'kT=%g' % kT_data[i])
plt.title("Pressure vs volume at kT")
plt.legend(loc='best')
plt.ylim(0, 26)
plt.xlim(0.95, 1.6)
plt.xlabel('Volume per atom')
plt.ylabel('Pressure')
plt.figure('p-vs-T at fixed n')
#--------------NEW
#Plot P vs T at constant n
#for n in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1]: #densities to show on the plot
#for n in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1, 1.05, 1.1]: #densities to show on the plot
#for n in [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]: #densities to show on the plot - paper
for n in [0.7, 0.8, 0.9, 1, 2, 2.2]: #densities to show on the plot - use for kT from 1 to 38 and 0.5
p_mid_at_n_list = []
kT_at_n_list = []
for i in range(0, len(kT_data)) : #number of temperatures kT
for j in range(0, len(pressure_data[i])-1) : #number of elements of P at some kT
if density_data[i][j] < n < density_data[i][j+1] :
phi = pressure_data[i][j+1]
plo = pressure_data[i][j]
nhi = density_data[i][j+1]
nlo = density_data[i][j]
p_mid_at_n_list.append((plo*(nhi - n) + phi*(n - nlo))/(nhi - nlo))
kT_at_n_list.append(kT_data[i])
plt.plot(kT_at_n_list, p_mid_at_n_list, '.-', label= 'n=%g' % n)
plt.title("Pressure vs Temperature at fixed n")
plt.legend(loc='best')
plt.ylabel('Pressure')
plt.xlabel('Temperature')
# - OR - uncomment the plot you want
##Plot T vs P at constant n
#plt.plot(kT_at_n_list, p_mid_at_n_list, '.-', label= 'n=%g' % n)
#plt.title("Temperature vs Pressure at fixed n")
#plt.legend(loc='best')
#plt.xlabel('Pressure')
#plt.ylabel('Temperature')
#--------------end NEW
plt.figure('Phase Diagram of T vs n')
#Temperature vs Density Phase Diagram
plt.plot(n_homogeneous_at_freezing, kT_data, label='liquid', color='red')
plt.plot(n_crystal_at_freezing, kT_data, label='solid', color='blue')
#plt.fill_betweenx(kT_data, .1, n_homogeneous_at_freezing, color='red') #paper
plt.fill_betweenx(kT_data, .4, n_homogeneous_at_freezing, color='red') #use for kT from 1 to 38 and 0.5
plt.fill_betweenx(kT_data, n_homogeneous_at_freezing, n_crystal_at_freezing, color='gray')
#plt.fill_betweenx(kT_data, n_crystal_at_freezing, 1.6, color='blue')
#plt.fill_betweenx(kT_data, n_crystal_at_freezing, 1.8, color='blue') #paper
plt.fill_betweenx(kT_data, n_crystal_at_freezing, 2.14, color='blue') #use for kT from 1 to 38 and 0.5
plt.title("Temperature vs Number Density")
#plt.legend(loc='best')
plt.xlabel('Number Density')
plt.ylabel('Temperature')
##plt.plot([0.88, 0.90, 0.91, 0.92, 1.04, 1.12],[0.7, 0.8, 0.9, 1.0, 2.0, 3.0], label='chris_l', color='green')
##plt.plot([0.96, 0.98, 0.99, 1.00, 1.11, 1.19],[0.7, 0.8, 0.9, 1.0, 2.0, 3.0], label='chris_s', color='green')
#plt.plot([0.88, 0.90, 0.91, 0.92, 1.04, 1.12, 1.24, 1.44],[0.7, 0.8, 0.9, 1.0, 2.0, 3,5,10], label='chris_l', color='green')
#plt.plot([0.96, 0.98, 0.99, 1.00, 1.11, 1.19, 1.31, 1.51],[0.7, 0.8, 0.9, 1.0, 2.0, 3, 5, 10], label='chris_s', color='green')
plt.plot([0.88, 0.90, 0.91, 0.92, 1.04],[0.7, 0.8, 0.9, 1.0, 2.0], label='MC_l', color='green')
plt.plot([0.96, 0.98, 0.99, 1.00, 1.11],[0.7, 0.8, 0.9, 1.0, 2.0], label='MC_s', color='green')
plt.legend()
plt.figure('Phase Diagram of P vs T')
##Pressure vs Temperature Phase Diagram
plt.fill_between(kT_data, 0, p_at_freezing, color='red')
#plt.fill_between(kT_data, p_at_freezing, 50, color='blue') #FIX - change 30
plt.fill_between(kT_data, p_at_freezing, 6500, color='blue') #use for kT 1 to 38 and 0.5
#plt.fill_between(kT_data, p_at_freezing, 50, color='blue') #paper
plt.plot(kT_data, p_at_freezing, color='black')
#plt.ylim(0, 40)
#plt.xlim(kT_data.min(), kT_data.max()) #FIX!
plt.title("Pressure vs Temperature")
plt.xlabel('Temperature')
plt.ylabel('Pressure')
#plt.plot([0.7, 0.8,0.9,1.0,2.0,3.0], [6.24, 7.62, 8.78, 9.99, 25.5,43.8], label='chris_l', color='green')
##plt.plot([0.7, 0.8,0.9,1.0,2.0, 3, 5, 10], [6.24, 7.62, 8.78, 9.99, 25.5,43.8, 85.6, 210], label='chris_l', color='green')
plt.plot([0.7, 0.8,0.9,1.0,2.0], [6.24, 7.62, 8.78, 9.99, 25.5], label='MC', color='green')
plt.legend()
plt.show()
| gpl-2.0 |
ligz07/merlin | src/work_in_progress/srikanth/bottleneck_scripts/merge_data.py | 3 | 2870 |
import numpy, os
import matplotlib.pyplot as plt
def load_binary_file(file_name, dimension):
fid_lab = open(file_name, 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
frame_number = features.size / dimension
features = features[:(dimension * (features.size / dimension))]
features = features.reshape((-1, dimension))
return features, frame_number
def read_file_list(dir_name):
file_paths = []
filenames = []
for root, directories, files in os.walk(dir_name):
for filename in files:
filepath = os.path.join(root, filename)
file_paths.append(filepath)
filenames.append(filename)
return file_paths, filenames
def generate_context_feature(in_data_dir1, in_data_dir2, out_data_dir, dimension1, dimension2):
if not os.path.exists(out_data_dir):
os.makedirs(out_data_dir)
file_paths, filenames = read_file_list(in_data_dir1)
context_features = numpy
i = 0
for file_path, filename in zip(file_paths, filenames):
features1, frame_number1 = load_binary_file(file_path, dimension1)
features2, frame_number2 = load_binary_file(os.path.join(in_data_dir2, filename), dimension2)
if frame_number1 != frame_number2:
print dimension2
print filename
print "%s %d != %d" %(filename, frame_number1, frame_number2)
print features1.shape, features2.shape
os.exit(1)
context_features = numpy.zeros((frame_number1, dimension1+dimension2))
context_features[0:frame_number1, 0:dimension1] = features1
context_features[0:frame_number2, dimension1:dimension1+dimension2] = features2
print filename, features1.shape, features2.shape, context_features.shape
context_filename = out_data_dir + '/' + filename
context_features = numpy.asarray(context_features, 'float32')
fid = open(context_filename, 'wb')
context_features.tofile(fid)
fid.close()
if __name__ == '__main__':
in_dir1 = '/afs/inf.ed.ac.uk/group/cstr/projects/phd/s1432486/work/Merlin/test_version/dnn_tts/experiments/acoustic_model/data/nn_no_silence_lab_norm_490'
in_dir2 = '/afs/inf.ed.ac.uk/group/cstr/projects/phd/s1432486/work/Merlin/test_version/dnn_tts/experiments/acoustic_model/gen/DNN_TANH_TANH_TANH_TANH_LINEAR__mgc_lf0_vuv_bap_1_200_490_259_4_512_512_hidden_stacked/'
dimension1 = 490
dimension2 = 32*21 # 128 * 1
out_dir = '/afs/inf.ed.ac.uk/group/cstr/projects/phd/s1432486/work/Merlin/test_version/dnn_tts/experiments/acoustic_model/data/nn_no_silence_lab_norm_1162'
generate_context_feature(in_dir1, in_dir2, out_dir, dimension1, dimension2)
| apache-2.0 |
BoltzmannBrain/nupic.fluent | tests/unit/utils/network_data_generator_test.py | 4 | 7356 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Tests for the NetworkDataGenerator class."""
import os
import pandas
import random
import unittest
from fluent.utils.network_data_generator import NetworkDataGenerator
from nupic.data.file_record_stream import FileRecordStream
try:
import simplejson as json
except:
import json
class NetworkDataGeneratorTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(NetworkDataGeneratorTest, self).__init__(*args, **kwargs)
self.expected = [[
{"_token": "get",
"_categories": "0 1",
"_sequenceID": 0,
"ID": "1",
"_reset": 1},
{"_token": "rid",
"_categories": "0 1",
"_sequenceID": 0,
"ID": "1",
"_reset": 0},
{"_token": "of",
"_categories": "0 1",
"_sequenceID": 0,
"ID": "1",
"_reset": 0},
{"_token": "the",
"_categories": "0 1",
"_sequenceID": 0,
"ID": "1",
"_reset": 0},
{"_token": "trrible",
"_categories": "0 1",
"_sequenceID": 0,
"ID": "1",
"_reset": 0},
{"_token": "kitchen",
"_categories": "0 1",
"_sequenceID": 0,
"ID": "1",
"_reset": 0},
{"_token": "odor",
"_categories": "0 1",
"_sequenceID": 0,
"ID": "1",
"_reset": 0}],
[{"_token": "i",
"_categories": "2",
"_sequenceID": 1,
"ID": "2",
"_reset": 1},
{"_token": "don",
"_categories": "2",
"_sequenceID": 1,
"ID": "2",
"_reset": 0},
{"_token": "t",
"_categories": "2",
"_sequenceID": 1,
"ID": "2",
"_reset": 0},
{"_token": "care",
"_categories": "2",
"_sequenceID": 1,
"ID": "2",
"_reset": 0}]]
self.dirName = os.path.dirname(os.path.realpath(__file__))
def assertRecordsEqual(self, actual, expected):
self.assertIsInstance(actual, list)
self.assertEqual(len(actual), len(expected))
for a, e in zip(actual, expected):
self.assertEqual(len(a), len(e))
for ra, re in zip(a, e):
self.assertDictEqual(ra, re)
def testSplitNoPreprocess(self):
ndg = NetworkDataGenerator()
filename = os.path.join(self.dirName, "test_data/multi_sample.csv")
ndg.split(filename, 3, False)
self.assertRecordsEqual(ndg.records, self.expected)
def testSplitPreprocess(self):
ndg = NetworkDataGenerator()
filename = os.path.join(self.dirName, "test_data/multi_sample.csv")
expected = [[
{"_token": "gohbkchoo",
"_categories": "0 1",
"_sequenceID": 0,
"ID": "1",
"_reset": 1}],
[{"_token": "o",
"_categories": "2",
"_sequenceID": 1,
"ID": "2",
"_reset": 1},
{"_token": "ca",
"_categories": "2",
"_sequenceID": 1,
"ID": "2",
"_reset": 0}]]
ndg.split(filename, 3, True, ignoreCommon=100, correctSpell=True)
self.assertRecordsEqual(ndg.records, expected)
def testRandomize(self):
ndg = NetworkDataGenerator()
filename = os.path.join(self.dirName, "test_data/multi_sample.csv")
ndg.split(filename, 3, False)
random.seed(1)
ndg.randomizeData()
dataOutputFile = os.path.join(
self.dirName, "test_data/multi_sample_split.csv")
categoriesOutputFile = os.path.join(
self.dirName, "test_data/multi_sample_categories.json")
success = ndg.saveData(dataOutputFile, categoriesOutputFile)
randomizedIDs = []
dataTable = pandas.read_csv(dataOutputFile)
for _, values in dataTable.iterrows():
record = values.to_dict()
idx = record["_sequenceID"]
if idx.isdigit() and (not randomizedIDs or randomizedIDs[-1] != idx):
randomizedIDs.append(idx)
self.assertNotEqual(randomizedIDs, range(len(randomizedIDs)))
def testSaveData(self):
ndg = NetworkDataGenerator()
filename = os.path.join(self.dirName, "test_data/multi_sample.csv")
ndg.split(filename, 3, False)
dataOutputFile = os.path.join(
self.dirName, "test_data/multi_sample_split.csv")
categoriesOutputFile = os.path.join(
self.dirName, "test_data/multi_sample_categories.json")
success = ndg.saveData(dataOutputFile, categoriesOutputFile)
self.assertTrue(success)
dataTable = pandas.read_csv(dataOutputFile).fillna("")
types = {"_categories": "list",
"_token": "string",
"_sequenceID": "int",
"_reset": "int",
"ID": "string"}
specials = {"_categories": "C",
"_token": "",
"_sequenceID": "S",
"_reset": "R",
"ID": ""}
expected_records = [record for data in self.expected for record in data]
expected_records.insert(0, specials)
expected_records.insert(0, types)
for idx, values in dataTable.iterrows():
record = values.to_dict()
if idx > 1:
# csv values are strings, so cast the ints
record["_sequenceID"] = int(record["_sequenceID"])
record["_reset"] = int(record["_reset"])
self.assertDictEqual(record, expected_records[idx])
with open(categoriesOutputFile) as f:
categories = json.load(f)
expected_categories = {"kitchen": 0, "environment": 1, "not helpful": 2}
self.assertDictEqual(categories, expected_categories)
def testSaveDataIncorrectType(self):
ndg = NetworkDataGenerator()
filename = os.path.join(self.dirName, "test_data/multi_sample.csv")
dataOutputFile = os.path.join(
self.dirName, "test_data/multi_sample_split.csv")
categoriesOutputFile = os.path.join(
self.dirName, "test_data/multi_sample_categories.csv")
ndg.split(filename, 3, False)
with self.assertRaises(TypeError):
ndg.saveData(dataOutputFile, categoriesOutputFile)
def testFileRecordStreamReadData(self):
ndg = NetworkDataGenerator()
filename = os.path.join(self.dirName, "test_data/multi_sample.csv")
ndg.split(filename, 3, False)
dataOutputFile = os.path.join(
self.dirName, "test_data/multi_sample_split.csv")
categoriesOutputFile = os.path.join(
self.dirName, "test_data/multi_sample_categories.json")
ndg.saveData(dataOutputFile, categoriesOutputFile)
# If no error is raised, then the data is in the correct format
frs = FileRecordStream(dataOutputFile)
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
spurihwr/ImageProcessingProjects | Image_Recognition/PCABasedImageReco.py | 1 | 2899 | ####################################################################
# This code is PCA base face recognition programme. It reads 5
# faces from ORL database and the rest 5 are used as test.
# PCA_Performance shows the recognition performance.
#
# Download the ORL database from internet.
# This code was modified by Saurabh Puri in order to show the face
# recognition task
#######################################################################
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
import numpy as np
import cv2
zz=1;
noc=40; #no_of_classes
nots=5; #no_of_training_set
#width and height is hardcoded but could be derived from the image itself
#sometimes for a better performance, cropping of image is required as PCA is generally very sensitive to variations in the image (like light, shadow, etc.)
w = 112
h = 92
#Split the dataset into training and test set
#Folder location: ./att_faces/s*/*.pgm
#First half images in each class is considered as training set and other half are considered to be test set
X_train = np.empty(w*h, dtype=np.float32)
y_train = np.empty(1, dtype=np.int32)
X_test = np.empty(w*h, dtype=np.float32)
y_test = np.empty(1, dtype=np.int32)
for i in range(1,noc+1):
for j in range(1,nots+1):
#print(str(i) +' '+ str(j))
file= "./att_faces/s" + str(i) + "/" + str(j) + ".pgm"
im = cv2.imread(file)
im = im.transpose((2,0,1))
im = np.expand_dims(im,axis=0)
imgray = im[0][0]
im1D = imgray.flatten('F')
X_train = np.vstack((X_train,im1D))
y_train = np.hstack((y_train,i-1))
for i in range(1,noc+1):
for j in range(nots+1,nots+6):
#print(str(i) +' '+ str(j))
file= "./att_faces/s" + str(i) + "/" + str(j) + ".pgm"
im = cv2.imread(file)
im = im.transpose((2,0,1))
im = np.expand_dims(im,axis=0)
imgray = im[0][0]
im1D = imgray.flatten('F')
X_test = np.vstack((X_test,im1D))
y_test = np.hstack((y_test,i-1))
#delete first row as it was empty
X_train = np.delete(X_train,(0),axis=0)
y_train = np.delete(y_train,(0),axis=0)
X_test = np.delete(X_test,(0),axis=0)
y_test = np.delete(y_test,(0),axis=0)
print('loaded')
#normalize to 0-1
X_train = X_train/255
X_test = X_test/255
# initiate PCA and fit to the training data
pca = PCA(n_components=40)
pca.fit(X_train)
# transform
X_transformed = pca.transform(X_train)
newdata_transformed = pca.transform(X_test)
#initiate a classifier and then fit eigen faces and labels
clf = SVC()
clf.fit(X_transformed,y_train)
# predict new labels using the trained classifier
pred_labels = clf.predict(newdata_transformed)
#output the accuracy_score
score = accuracy_score(y_test,pred_labels,True)
print(score)
##Print the predicted labels
#print(pred_labels)
| mit |
rbiswas4/SNsims | snsims/simulations.py | 1 | 15705 | #!/usr/bin/env python
from __future__ import absolute_import, print_function, division
from future.utils import with_metaclass
import abc
from opsimsummary import Tiling, HealpixTiles
from analyzeSN import LightCurve
from .universe import Universe
from .paramDistribution import SimpleSALTDist
import os
import numpy as np
import pandas as pd
from lsst.sims.photUtils import BandpassDict
from lsst.sims.catUtils.supernovae import SNObject
__all__ = ['SimulationTile', 'EntireSimulation', 'TiledSimulation']
simBandNameDict = dict((x, 'lsst' + x) for x in 'ugrizy')
class Photometry(object):
"""
Temporary class standing in for the Photometry class in AnalyzeSN which is
currently in a branch
"""
def __init__(self):
pass
@staticmethod
def pair_method(obsHistID, snid, maxObsHistID):
return snid * maxObsHistID + obsHistID
class EntireSimulation(Universe):
"""
Simulation of a set of SN from a set of telescope pointings
and a set of SN. The simulation is perfectly reproducible if
both the pointings, paramsDF are the same (in terms of ordering)
Parameters
-----------
rng : instance of `numpy.random.RandomState`
pointings: instance of `pd.DataFrame`
dataFrame with a minimal set of columns
[`expMJD`, `filter`, `fiveSigmaDepth`]
paramsDF : `pd.DataFrame`
the minimal set of columns are
[`snid`, `x0`, `t0`, `x1` , `c` , `snra`, `sndec`]
Attributes
----------
randomState : `numpy.random.RandomState`
snParams : `pd.DataFrame`
"""
def __init__(self, rng, pointings, paramsDF, angularUnits='degrees',
maxObsHistID=None):
self.pointings = pointings
self._paramsDf = paramsDF
self._rng = rng
self.angularUnits = angularUnits
self.bandPasses = BandpassDict.loadTotalBandpassesFromFiles()
self.maxObsHistID = maxObsHistID
@property
def randomState(self):
return self._rng
@property
def snParams(self):
return self._paramsDf
@staticmethod
def getSNCosmoParamDict(odict, SNCosmoModel):
mydict = dict()
param_names = SNCosmoModel.param_names
for param in odict.index.values:
if param in param_names:
mydict[param] = odict[param]
return mydict
def SN(self, snid, timeRange='model'):
mySNParams = self.snParams.ix[snid]
if self.angularUnits == 'radians':
myra = np.degees(mySNParams.snra)
mydec = np.degrees(mySNParams.sndec)
elif self.angularUnits == 'degrees':
myra = mySNParams.snra
mydec = mySNParams.sndec
sn = SNObject(ra=myra, dec=mydec)
sncosmo_params = self.getSNCosmoParamDict(mySNParams, sn)
sn.set(**sncosmo_params)
return sn
def lc(self, snid, maxObsHistID=None):
if maxObsHistID is None:
maxObsHistID = self.maxObsHistID
sn = self.SN(snid, timeRange='model')
lcMinTime = sn.mintime()
lcMaxTime = sn.maxtime()
# lcMinTime = self.SN(snid, timeRange='model').mintime()
# lcMaxTime = self.SN(snid, timeRange='model').maxtime()
if lcMinTime is None or lcMaxTime is None:
df = self.pointings.copy()
else:
df = self.pointings.query('expMJD < @lcMaxTime and expMJD > @lcMinTime').copy().reset_index()
if maxObsHistID is None or ('obsHistID' in df.columns):
pass
else:
raise ValueError('Cannot index if obsHistID column not provided')
if maxObsHistID is not None:
idx = Photometry.pair_method(df.obsHistID.values,
snid,
maxObsHistID)
else:
idx = np.ones(len(df))
fluxerr = np.zeros(len(df))
modelFlux = np.zeros(len(df))
for i, rowtuple in enumerate(df.iterrows()):
row = rowtuple[1]
# print(row['expMJD'], row['filter'], row['fiveSigmaDepth'])
bp = self.bandPasses[row['filter']]
modelFlux[i] = self.staticModelFlux(sn, row['expMJD'],
bandpassobject=bp)
fluxerr[i] = sn.catsimBandFluxError(time=row['expMJD'],
bandpassobject=bp,
fluxinMaggies=modelFlux[i],
m5=row['fiveSigmaDepth'])
rng = self.randomState
df['fluxerr'] = fluxerr
deviations = rng.normal(size=len(df))
df['deviations'] = deviations
df['zp'] = 0.
df['ModelFlux'] = modelFlux
df['snid'] = snid
df['flux'] = df['ModelFlux'] + df['deviations'] * df['fluxerr']
df['zpsys']= 'ab'
df['diaID'] = idx
lc = df[['diaID', 'snid', 'expMJD', 'filter', 'ModelFlux', 'flux', 'fluxerr',
'zp', 'zpsys', 'fieldID']]
return LightCurve(lc)
@staticmethod
def staticModelFlux(sn, time, bandpassobject):
return sn.catsimBandFlux(bandpassobject=bandpassobject,
time=time)
def modelFlux(self, snid, time, bandpassobject):
# assert len(times) == len(bands)
# flux = np.zeros(len(times))
sn = self.SN(snid)
return self.staticModelFlux(sn, time=time, bandpassobject=bandpassobject)
# return self.SN(snid).catsimBandFlux(bandpassobject=bandpassobject,
# time=time)
def writeSNParams(self, paramFileName, IDVal=0):
"""
Write the dataframe `self.snParams` to a file
Parameters
----------
paramFileName : Instance of string
paramFileName
IDVal : integer
used as a key to write a group
"""
if paramFileName.endswith('.hdf'):
self.snParams.to_hdf(paramFileName, key='{}'.format(IDVal))
else:
raise NotImplementedError('Only methods to write to hdf files'
'implemented')
def writeSN(self, snid, fileName, IDVal=0, timeRange='model'):
"""
Write light curve of SN to disc
Parameters
----------
snid : int/string
SN id of SN
fileName : string, mandatory
timeRange : string, optional, defaults to model
time range over which the light curve is written to disk
"""
lc = self.lc(snid)
df = lc.lightCurve
df['band'] = df['band'].astype(str)
with pd.get_store(fileName) as store:
store.append('tile_{}'.format(IDVal), df)
class TiledSimulation(EntireSimulation):
def __init__(self,
paramDF,
NSIDE,
tileID,
hpOpSim,
rng=None,
allPointings=None,
timeRange=None):
"""
Parameters
----------
paramDF
"""
self.tileID = tileID
self._randomState = rng
if self._randomState is None:
self._randomState = np.random.RandomState(self.tileID)
self.Tiling = HealpixTiles(nside=NSIDE, preComputedMap=hpOpSim)
self.fieldArea = self.Tiling.area(self.tileID)
self.columns = ('expMJD', 'filter', 'fieldID', 'fiveSigmaDepth')
self.tilePointings = self.Tiling.pointingSequenceForTile(self.tileID,
allPointings=allPointings,
columns=self.columns)
super(TiledSimulation, self).__init__(rng=self._randomState,
pointings=self.tilePointings,
paramsDF=paramDF)
class SimulationTile(Universe):
def __init__(self,
paramDist,
rate,
NSIDE,
tileID,
hpOpSim,
allPointings=None,
timeRange=None,
angularUnits='radians'):
self.Tiling = HealpixTiles(nside=NSIDE, preComputedMap=hpOpSim)
self.tileID = tileID
self._randomState = np.random.RandomState(self.tileID)
self.fieldArea = self.Tiling.area(tileID)
self.zdist = rate(rng=self.randomState, fieldArea=self.fieldArea)
self.zsamples = self.zdist.zSamples
self.numSN = len(self.zsamples)
self.positions = self.Tiling.positions(self.tileID, self.numSN,
rng=self.randomState)
self._snParamTable = None
self.columns = ('expMJD', 'filter', 'fieldID', 'fiveSigmaDepth')
self.tilePointings = self.Tiling.pointingSequenceForTile(self.tileID,
allPointings=allPointings,
columns=self.columns)
self._timeRange = timeRange
self.bandPasses = BandpassDict.loadTotalBandpassesFromFiles()
@property
def minPeakTime(self):
if self._timeRange is None:
minTime = self.tilePointings.expMJD.min()
else:
minTime = self._timeRange[0]
return minTime
@property
def maxPeakTime(self):
if self._timeRange is None:
maxTime = self.tilePointings.expMJD.max()
else:
maxTime = self._timeRange[1]
return maxTime
@property
def snParamTable(self):
if self._snParamTable is None:
self.snParams()
return self._snParamTable
@property
def randomState(self):
if self._randomState is None:
self._randomState = np.random.RandomState(self.tileID)
return self._randomState
def snParams(self):
zsamples = self.zdist.zSamples
numSN = len(zsamples)
positions = self.Tiling.positions(self.tileID, numSN,
rng=self.randomState)
ra = self.positions[0]
dec = - self.positions[1] + 45.0
# Why do we need numSN
sp = SimpleSALTDist(numSN=numSN, rng=self.randomState,
mjdmin=self.minPeakTime,
zSamples=self.zsamples).paramSamples
sp['ra'] = self.positions[0]
sp['dec'] = self.positions[1]
sp['snid'] = self.tileID * 500.0 + np.arange(numSN)
sp.set_index('snid', inplace=True)
self._snParamTable = sp
# if self.minPeakTime is None or self.maxPeakTime is None:
# pass
# else:
# sp['t0'] = self.minPeakTime + \
# (self.maxPeakTime - self.minPeakTime) * sp['t0']
return sp
@staticmethod
def getSNCosmoParamDict(odict, SNCosmoModel):
mydict = dict()
param_names = SNCosmoModel.param_names
for param in odict.index.values:
if param in param_names:
mydict[param] = odict[param]
return mydict
def SN(self, snid, timeRange='model'):
mySNParams = self.snParamTable.ix[snid]
sn = SNObject(ra=mySNParams.ra, dec=mySNParams.dec)
sncosmo_params = self.getSNCosmoParamDict(mySNParams, sn)
sn.set(**sncosmo_params)
z = sn.get('z')
t0 = sn.get('t0')
# lcMinTime = t0 - 20. * (1.0 + z)
# lcMaxTime = t0 + 50. * (1.0 + z )
return sn
@staticmethod
def staticModelFlux(sn, time, bandpassobject):
return sn.catsimBandFlux(bandpassobject=bandpassobject,
time=time)
def modelFlux(self, snid, time, bandpassobject):
# assert len(times) == len(bands)
# flux = np.zeros(len(times))
# flux = np.asarray(list(self.SN(snid).catsimBandFlux(bandpassobject=self.bandPasses[bands[i]],
# time=times[i]) for i in range(len(times))))
#for i, band in enumerate(bands):
# bp = self.bandPasses[band]
# flux[i] = self.SN(snid).catsimBandFlux(bandpassobject=bp, time=times[i])
# print(len(flux), len(times))
return self.SN(snid).catsimBandFlux(bandpassobject=bandpassobject,
time=time)
def lc(self, snid):
sn = self.SN(snid, timeRange='model')
lcMinTime = sn.mintime()
lcMaxTime = sn.maxtime()
# lcMinTime = self.SN(snid, timeRange='model').mintime()
# lcMaxTime = self.SN(snid, timeRange='model').maxtime()
if lcMinTime is None or lcMaxTime is None:
df = self.tilePointings.copy()
else:
df = self.tilePointings.query('expMJD < @lcMaxTime and expMJD > @lcMinTime').copy()
df['snid'] = snid
fluxerr = np.zeros(len(df))
modelFlux = np.zeros(len(df))
for i, rowtuple in enumerate(df.iterrows()):
row = rowtuple[1]
# print(row['expMJD'], row['filter'], row['fiveSigmaDepth'])
bp = self.bandPasses[row['filter']]
modelFlux[i] = self.staticModelFlux(sn, row['expMJD'],
bandpassobject=bp)
fluxerr[i] = sn.catsimBandFluxError(time=row['expMJD'],
bandpassobject=bp,
# fluxinMaggies=row['ModelFlux'],
fluxinMaggies=modelFlux[i],
m5=row['fiveSigmaDepth'])
rng = self.randomState
df['fluxerr'] = fluxerr
deviations = rng.normal(size=len(df))
df['deviations'] = deviations
df['zp'] = 0.
df['ModelFlux'] = modelFlux
df['flux'] = df['ModelFlux'] + df['deviations'] * df['fluxerr']
df['zpsys']= 'ab'
lc = df[['snid', 'expMJD', 'filter', 'ModelFlux', 'flux', 'fluxerr',
'zp', 'zpsys', 'fieldID']]
return LightCurve(lc, bandNameDict=simBandNameDict)
def writeTile(self, fileName, timeRange='model', paramFileName=None):
"""
"""
count = 0
for snid in self.snParamTable.index.values:
self.writeSN(snid, fileName, timeRange=timeRange)
if count % 50 == 0:
if count == 0:
pass
print('another 50', snid)
count += 1
if paramFileName is None:
filename_parts = fileName.split('.')
filename_parts[-2] += '_params'
paramFileName = '.'.join(filename_parts)
self.writeSNParams(paramFileName)
def writeSNParams(self, paramFileName):
if paramFileName.endswith('.hdf'):
self.snParamTable.to_hdf(paramFileName, key='{}'.format(self.tileID))
else:
raise NotImplementedError('Only methods to write to hdf files'
'implemented')
def writeSN(self, snid, fileName, timeRange='model'):
"""
Write light curve of SN to disc
Parameters
----------
snid : int/string
SN id of SN
fileName : string, mandatory
timeRange : string, optional, defaults to model
time range over which the light curve is written to disk
"""
lc = self.lc(snid)
df = lc.lightCurve
df['band'] = df['band'].astype(str)
with pd.get_store(fileName) as store:
store.append('tile_{}'.format(self.tileID), df)
| mit |
robintw/scikit-image | doc/examples/applications/plot_rank_filters.py | 14 | 18001 | """
============
Rank filters
============
Rank filters are non-linear filters using the local gray-level ordering to
compute the filtered value. This ensemble of filters share a common base: the
local gray-level histogram is computed on the neighborhood of a pixel (defined
by a 2-D structuring element). If the filtered value is taken as the middle
value of the histogram, we get the classical median filter.
Rank filters can be used for several purposes such as:
* image quality enhancement
e.g. image smoothing, sharpening
* image pre-processing
e.g. noise reduction, contrast enhancement
* feature extraction
e.g. border detection, isolated point detection
* post-processing
e.g. small object removal, object grouping, contour smoothing
Some well known filters are specific cases of rank filters [1]_ e.g.
morphological dilation, morphological erosion, median filters.
In this example, we will see how to filter a gray-level image using some of the
linear and non-linear filters available in skimage. We use the `camera` image
from `skimage.data` for all comparisons.
.. [1] Pierre Soille, On morphological operators based on rank filters, Pattern
Recognition 35 (2002) 527-535.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import img_as_ubyte
from skimage import data
noisy_image = img_as_ubyte(data.camera())
hist = np.histogram(noisy_image, bins=np.arange(0, 256))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 3))
ax1.imshow(noisy_image, interpolation='nearest', cmap=plt.cm.gray)
ax1.axis('off')
ax2.plot(hist[1][:-1], hist[0], lw=2)
ax2.set_title('Histogram of grey values')
"""
.. image:: PLOT2RST.current_figure
Noise removal
=============
Some noise is added to the image, 1% of pixels are randomly set to 255, 1% are
randomly set to 0. The **median** filter is applied to remove the noise.
"""
from skimage.filters.rank import median
from skimage.morphology import disk
noise = np.random.random(noisy_image.shape)
noisy_image = img_as_ubyte(data.camera())
noisy_image[noise > 0.99] = 255
noisy_image[noise < 0.01] = 0
fig, ax = plt.subplots(2, 2, figsize=(10, 7))
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, vmin=0, vmax=255, cmap=plt.cm.gray)
ax1.set_title('Noisy image')
ax1.axis('off')
ax2.imshow(median(noisy_image, disk(1)), vmin=0, vmax=255, cmap=plt.cm.gray)
ax2.set_title('Median $r=1$')
ax2.axis('off')
ax3.imshow(median(noisy_image, disk(5)), vmin=0, vmax=255, cmap=plt.cm.gray)
ax3.set_title('Median $r=5$')
ax3.axis('off')
ax4.imshow(median(noisy_image, disk(20)), vmin=0, vmax=255, cmap=plt.cm.gray)
ax4.set_title('Median $r=20$')
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
The added noise is efficiently removed, as the image defaults are small (1
pixel wide), a small filter radius is sufficient. As the radius is increasing,
objects with bigger sizes are filtered as well, such as the camera tripod. The
median filter is often used for noise removal because borders are preserved and
e.g. salt and pepper noise typically does not distort the gray-level.
Image smoothing
================
The example hereunder shows how a local **mean** filter smooths the camera man
image.
"""
from skimage.filters.rank import mean
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10, 7])
loc_mean = mean(noisy_image, disk(10))
ax1.imshow(noisy_image, vmin=0, vmax=255, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(loc_mean, vmin=0, vmax=255, cmap=plt.cm.gray)
ax2.set_title('Local mean $r=10$')
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
One may be interested in smoothing an image while preserving important borders
(median filters already achieved this), here we use the **bilateral** filter
that restricts the local neighborhood to pixel having a gray-level similar to
the central one.
.. note::
A different implementation is available for color images in
`skimage.filters.denoise_bilateral`.
"""
from skimage.filters.rank import mean_bilateral
noisy_image = img_as_ubyte(data.camera())
bilat = mean_bilateral(noisy_image.astype(np.uint16), disk(20), s0=10, s1=10)
fig, ax = plt.subplots(2, 2, figsize=(10, 7))
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(bilat, cmap=plt.cm.gray)
ax2.set_title('Bilateral mean')
ax2.axis('off')
ax3.imshow(noisy_image[200:350, 350:450], cmap=plt.cm.gray)
ax3.axis('off')
ax4.imshow(bilat[200:350, 350:450], cmap=plt.cm.gray)
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
One can see that the large continuous part of the image (e.g. sky) is smoothed
whereas other details are preserved.
Contrast enhancement
====================
We compare here how the global histogram equalization is applied locally.
The equalized image [2]_ has a roughly linear cumulative distribution function
for each pixel neighborhood. The local version [3]_ of the histogram
equalization emphasizes every local gray-level variations.
.. [2] http://en.wikipedia.org/wiki/Histogram_equalization
.. [3] http://en.wikipedia.org/wiki/Adaptive_histogram_equalization
"""
from skimage import exposure
from skimage.filters import rank
noisy_image = img_as_ubyte(data.camera())
# equalize globally and locally
glob = exposure.equalize_hist(noisy_image) * 255
loc = rank.equalize(noisy_image, disk(20))
# extract histogram for each image
hist = np.histogram(noisy_image, bins=np.arange(0, 256))
glob_hist = np.histogram(glob, bins=np.arange(0, 256))
loc_hist = np.histogram(loc, bins=np.arange(0, 256))
fig, ax = plt.subplots(3, 2, figsize=(10, 10))
ax1, ax2, ax3, ax4, ax5, ax6 = ax.ravel()
ax1.imshow(noisy_image, interpolation='nearest', cmap=plt.cm.gray)
ax1.axis('off')
ax2.plot(hist[1][:-1], hist[0], lw=2)
ax2.set_title('Histogram of gray values')
ax3.imshow(glob, interpolation='nearest', cmap=plt.cm.gray)
ax3.axis('off')
ax4.plot(glob_hist[1][:-1], glob_hist[0], lw=2)
ax4.set_title('Histogram of gray values')
ax5.imshow(loc, interpolation='nearest', cmap=plt.cm.gray)
ax5.axis('off')
ax6.plot(loc_hist[1][:-1], loc_hist[0], lw=2)
ax6.set_title('Histogram of gray values')
"""
.. image:: PLOT2RST.current_figure
Another way to maximize the number of gray-levels used for an image is to apply
a local auto-leveling, i.e. the gray-value of a pixel is proportionally
remapped between local minimum and local maximum.
The following example shows how local auto-level enhances the camara man
picture.
"""
from skimage.filters.rank import autolevel
noisy_image = img_as_ubyte(data.camera())
auto = autolevel(noisy_image.astype(np.uint16), disk(20))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10, 7])
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(auto, cmap=plt.cm.gray)
ax2.set_title('Local autolevel')
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
This filter is very sensitive to local outliers, see the little white spot in
the left part of the sky. This is due to a local maximum which is very high
comparing to the rest of the neighborhood. One can moderate this using the
percentile version of the auto-level filter which uses given percentiles (one
inferior, one superior) in place of local minimum and maximum. The example
below illustrates how the percentile parameters influence the local auto-level
result.
"""
from skimage.filters.rank import autolevel_percentile
image = data.camera()
selem = disk(20)
loc_autolevel = autolevel(image, selem=selem)
loc_perc_autolevel0 = autolevel_percentile(image, selem=selem, p0=.00, p1=1.0)
loc_perc_autolevel1 = autolevel_percentile(image, selem=selem, p0=.01, p1=.99)
loc_perc_autolevel2 = autolevel_percentile(image, selem=selem, p0=.05, p1=.95)
loc_perc_autolevel3 = autolevel_percentile(image, selem=selem, p0=.1, p1=.9)
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax0, ax1, ax2 = axes
plt.gray()
ax0.imshow(np.hstack((image, loc_autolevel)), cmap=plt.cm.gray)
ax0.set_title('Original / auto-level')
ax1.imshow(
np.hstack((loc_perc_autolevel0, loc_perc_autolevel1)), vmin=0, vmax=255)
ax1.set_title('Percentile auto-level 0%,1%')
ax2.imshow(
np.hstack((loc_perc_autolevel2, loc_perc_autolevel3)), vmin=0, vmax=255)
ax2.set_title('Percentile auto-level 5% and 10%')
for ax in axes:
ax.axis('off')
"""
.. image:: PLOT2RST.current_figure
The morphological contrast enhancement filter replaces the central pixel by the
local maximum if the original pixel value is closest to local maximum,
otherwise by the minimum local.
"""
from skimage.filters.rank import enhance_contrast
noisy_image = img_as_ubyte(data.camera())
enh = enhance_contrast(noisy_image, disk(5))
fig, ax = plt.subplots(2, 2, figsize=[10, 7])
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(enh, cmap=plt.cm.gray)
ax2.set_title('Local morphological contrast enhancement')
ax2.axis('off')
ax3.imshow(noisy_image[200:350, 350:450], cmap=plt.cm.gray)
ax3.axis('off')
ax4.imshow(enh[200:350, 350:450], cmap=plt.cm.gray)
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
The percentile version of the local morphological contrast enhancement uses
percentile *p0* and *p1* instead of the local minimum and maximum.
"""
from skimage.filters.rank import enhance_contrast_percentile
noisy_image = img_as_ubyte(data.camera())
penh = enhance_contrast_percentile(noisy_image, disk(5), p0=.1, p1=.9)
fig, ax = plt.subplots(2, 2, figsize=[10, 7])
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(penh, cmap=plt.cm.gray)
ax2.set_title('Local percentile morphological\n contrast enhancement')
ax2.axis('off')
ax3.imshow(noisy_image[200:350, 350:450], cmap=plt.cm.gray)
ax3.axis('off')
ax4.imshow(penh[200:350, 350:450], cmap=plt.cm.gray)
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
Image threshold
===============
The Otsu threshold [1]_ method can be applied locally using the local gray-
level distribution. In the example below, for each pixel, an "optimal"
threshold is determined by maximizing the variance between two classes of
pixels of the local neighborhood defined by a structuring element.
The example compares the local threshold with the global threshold
`skimage.filters.threshold_otsu`.
.. note::
Local is much slower than global thresholding. A function for global Otsu
thresholding can be found in : `skimage.filters.threshold_otsu`.
.. [4] http://en.wikipedia.org/wiki/Otsu's_method
"""
from skimage.filters.rank import otsu
from skimage.filters import threshold_otsu
p8 = data.page()
radius = 10
selem = disk(radius)
# t_loc_otsu is an image
t_loc_otsu = otsu(p8, selem)
loc_otsu = p8 >= t_loc_otsu
# t_glob_otsu is a scalar
t_glob_otsu = threshold_otsu(p8)
glob_otsu = p8 >= t_glob_otsu
fig, ax = plt.subplots(2, 2)
ax1, ax2, ax3, ax4 = ax.ravel()
fig.colorbar(ax1.imshow(p8, cmap=plt.cm.gray), ax=ax1)
ax1.set_title('Original')
ax1.axis('off')
fig.colorbar(ax2.imshow(t_loc_otsu, cmap=plt.cm.gray), ax=ax2)
ax2.set_title('Local Otsu ($r=%d$)' % radius)
ax2.axis('off')
ax3.imshow(p8 >= t_loc_otsu, cmap=plt.cm.gray)
ax3.set_title('Original >= local Otsu' % t_glob_otsu)
ax3.axis('off')
ax4.imshow(glob_otsu, cmap=plt.cm.gray)
ax4.set_title('Global Otsu ($t=%d$)' % t_glob_otsu)
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
The following example shows how local Otsu thresholding handles a global level
shift applied to a synthetic image.
"""
n = 100
theta = np.linspace(0, 10 * np.pi, n)
x = np.sin(theta)
m = (np.tile(x, (n, 1)) * np.linspace(0.1, 1, n) * 128 + 128).astype(np.uint8)
radius = 10
t = rank.otsu(m, disk(radius))
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(m)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(m >= t, interpolation='nearest')
ax2.set_title('Local Otsu ($r=%d$)' % radius)
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
Image morphology
================
Local maximum and local minimum are the base operators for gray-level
morphology.
.. note::
`skimage.dilate` and `skimage.erode` are equivalent filters (see below for
comparison).
Here is an example of the classical morphological gray-level filters: opening,
closing and morphological gradient.
"""
from skimage.filters.rank import maximum, minimum, gradient
noisy_image = img_as_ubyte(data.camera())
closing = maximum(minimum(noisy_image, disk(5)), disk(5))
opening = minimum(maximum(noisy_image, disk(5)), disk(5))
grad = gradient(noisy_image, disk(5))
# display results
fig, ax = plt.subplots(2, 2, figsize=[10, 7])
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(closing, cmap=plt.cm.gray)
ax2.set_title('Gray-level closing')
ax2.axis('off')
ax3.imshow(opening, cmap=plt.cm.gray)
ax3.set_title('Gray-level opening')
ax3.axis('off')
ax4.imshow(grad, cmap=plt.cm.gray)
ax4.set_title('Morphological gradient')
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
Feature extraction
===================
Local histograms can be exploited to compute local entropy, which is related to
the local image complexity. Entropy is computed using base 2 logarithm i.e. the
filter returns the minimum number of bits needed to encode local gray-level
distribution.
`skimage.rank.entropy` returns the local entropy on a given structuring
element. The following example shows applies this filter on 8- and 16-bit
images.
.. note::
to better use the available image bit, the function returns 10x entropy for
8-bit images and 1000x entropy for 16-bit images.
"""
from skimage import data
from skimage.filters.rank import entropy
from skimage.morphology import disk
import numpy as np
import matplotlib.pyplot as plt
image = data.camera()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4))
fig.colorbar(ax1.imshow(image, cmap=plt.cm.gray), ax=ax1)
ax1.set_title('Image')
ax1.axis('off')
fig.colorbar(ax2.imshow(entropy(image, disk(5)), cmap=plt.cm.jet), ax=ax2)
ax2.set_title('Entropy')
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
Implementation
==============
The central part of the `skimage.rank` filters is build on a sliding window
that updates the local gray-level histogram. This approach limits the algorithm
complexity to O(n) where n is the number of image pixels. The complexity is
also limited with respect to the structuring element size.
In the following we compare the performance of different implementations
available in `skimage`.
"""
from time import time
from scipy.ndimage import percentile_filter
from skimage.morphology import dilation
from skimage.filters.rank import median, maximum
def exec_and_timeit(func):
"""Decorator that returns both function results and execution time."""
def wrapper(*arg):
t1 = time()
res = func(*arg)
t2 = time()
ms = (t2 - t1) * 1000.0
return (res, ms)
return wrapper
@exec_and_timeit
def cr_med(image, selem):
return median(image=image, selem=selem)
@exec_and_timeit
def cr_max(image, selem):
return maximum(image=image, selem=selem)
@exec_and_timeit
def cm_dil(image, selem):
return dilation(image=image, selem=selem)
@exec_and_timeit
def ndi_med(image, n):
return percentile_filter(image, 50, size=n * 2 - 1)
"""
Comparison between
* `filters.rank.maximum`
* `morphology.dilate`
on increasing structuring element size:
"""
a = data.camera()
rec = []
e_range = range(1, 20, 2)
for r in e_range:
elem = disk(r + 1)
rc, ms_rc = cr_max(a, elem)
rcm, ms_rcm = cm_dil(a, elem)
rec.append((ms_rc, ms_rcm))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to element size')
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Element radius')
ax.plot(e_range, rec)
ax.legend(['filters.rank.maximum', 'morphology.dilate'])
"""
.. image:: PLOT2RST.current_figure
and increasing image size:
"""
r = 9
elem = disk(r + 1)
rec = []
s_range = range(100, 1000, 100)
for s in s_range:
a = (np.random.random((s, s)) * 256).astype(np.uint8)
(rc, ms_rc) = cr_max(a, elem)
(rcm, ms_rcm) = cm_dil(a, elem)
rec.append((ms_rc, ms_rcm))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to image size')
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Image size')
ax.plot(s_range, rec)
ax.legend(['filters.rank.maximum', 'morphology.dilate'])
"""
.. image:: PLOT2RST.current_figure
Comparison between:
* `filters.rank.median`
* `scipy.ndimage.percentile`
on increasing structuring element size:
"""
a = data.camera()
rec = []
e_range = range(2, 30, 4)
for r in e_range:
elem = disk(r + 1)
rc, ms_rc = cr_med(a, elem)
rndi, ms_ndi = ndi_med(a, r)
rec.append((ms_rc, ms_ndi))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to element size')
ax.plot(e_range, rec)
ax.legend(['filters.rank.median', 'scipy.ndimage.percentile'])
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Element radius')
"""
.. image:: PLOT2RST.current_figure
Comparison of outcome of the three methods:
"""
fig, ax = plt.subplots()
ax.imshow(np.hstack((rc, rndi)))
ax.set_title('filters.rank.median vs. scipy.ndimage.percentile')
ax.axis('off')
"""
.. image:: PLOT2RST.current_figure
and increasing image size:
"""
r = 9
elem = disk(r + 1)
rec = []
s_range = [100, 200, 500, 1000]
for s in s_range:
a = (np.random.random((s, s)) * 256).astype(np.uint8)
(rc, ms_rc) = cr_med(a, elem)
rndi, ms_ndi = ndi_med(a, r)
rec.append((ms_rc, ms_ndi))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to image size')
ax.plot(s_range, rec)
ax.legend(['filters.rank.median', 'scipy.ndimage.percentile'])
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Image size')
"""
.. image:: PLOT2RST.current_figure
"""
plt.show()
| bsd-3-clause |
mfjb/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 22 | 45265 | from itertools import product
import pickle
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([0.0], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError,
nbrs.kneighbors_graph,
X, mode='blah')
assert_raises(ValueError,
nbrs.radius_neighbors_graph,
X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_kneighbors_parallel():
X, y = datasets.make_classification(n_samples=10, n_features=2,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y_1 = clf.predict(X_test)
dist_1, ind_1 = clf.kneighbors(X_test)
A_1 = clf.kneighbors_graph(X_test, mode='distance').toarray()
for n_jobs in [-1, 2, 5]:
clf.set_params(n_jobs=n_jobs)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
A = clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y_1, y)
assert_array_almost_equal(dist_1, dist)
assert_array_equal(ind_1, ind)
assert_array_almost_equal(A_1, A)
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
myt00seven/svrg | bk/svrg_bn_bk/run.py | 1 | 2580 | import sys, os
import numpy as np
import matplotlib.pyplot as plt
import theano
import theano.tensor as T
import lasagne
from load_dataset import *
from deep import DeepAutoEncoder
from sparse_autoencoder import SparseAutoEncoder
def main():
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
input_var = T.matrix('inputs')
target_var = T.matrix('targets')
n_hidden = 500
# network = DeepAutoEncoder(784, [300, 2])
# network.finish_network()
# network = network.output_layer
# network = DeepAutoEncoder(784, [300, 150, 2])
# network.finish_network()
# network = network.output_layer
network = SparseAutoEncoder(784, n_hidden).output_layer
# methods = ['adam', 'momentum', 'nesterov_momentum', 'adagrad', 'rmsprop', 'custom_momentum']
# methods = ['custom_adam_0.01_0.9_0.999', 'adam']
# methods = ['adam_reg']
# methods = ['adam_reg_dummy']
# methods = ['adam_deep300-2-300_0.01']
# methods = ['adam_deep_test_tied']
# methods = ['adam_deep_test_batch_norm']
# methods = ['adam_deep_0.01']
methods = ['adam_sparse_5.0_not_denoising']
# methods = ['svrg_100.0m_300']
n_images = 10
for j in range(n_images):
plt.subplot(len(methods) + 1, n_images, j + 1)
#plt.axis('off')
plt.xticks([])
plt.yticks([])
if j == 0:
plt.ylabel('original', rotation='horizontal')
plt.imshow(X_train[j].reshape(28, 28), cmap='Greys')
for i, model in enumerate(methods):
with np.load('models/model_%s.npz' % model) as f:
param_values = [f['arr_%d' % j] for j in range(len(f.files))]
lasagne.layers.set_all_param_values(network, param_values)
test_prediction = lasagne.layers.get_output(network, deterministic=True)
for j in range(n_images):
plt.subplot(len(methods) + 1, n_images, n_images * (i+1) + j + 1)
#plt.axis('off')
plt.xticks([])
plt.yticks([])
if j == 0:
plt.ylabel(model, rotation='horizontal')
plt.imshow(lasagne.layers.get_output(network, X_train[j]).eval().reshape(28, 28), cmap='Greys')
# n_images = 10
# for i in range(n_images):
# plt.subplot(n_images, 2, 2 * i + 1)
# plt.axis('off')
# plt.imshow(X_train[i].reshape(28, 28), cmap='Greys')
# plt.subplot(n_images, 2, 2 * i + 2)
# plt.axis('off')
# plt.imshow(lasagne.layers.get_output(network, X_train[i]).eval().reshape(28, 28), cmap='Greys')
plt.show()
main()
| mit |
jmschrei/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.20/_downloads/05a8c286e50d34a5e99b57ecfcc17c63/plot_simulate_evoked_data.py | 1 | 2807 | """
==============================
Generate simulated evoked data
==============================
Use :func:`~mne.simulation.simulate_sparse_stc` to simulate evoked data.
"""
# Author: Daniel Strohmeier <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.time_frequency import fit_iir_model_raw
from mne.viz import plot_sparse_source_estimates
from mne.simulation import simulate_sparse_stc, simulate_evoked
print(__doc__)
###############################################################################
# Load real data as templates
data_path = sample.data_path()
raw = mne.io.read_raw_fif(data_path + '/MEG/sample/sample_audvis_raw.fif')
proj = mne.read_proj(data_path + '/MEG/sample/sample_audvis_ecg-proj.fif')
raw.info['projs'] += proj
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # mark bad channels
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ave_fname = data_path + '/MEG/sample/sample_audvis-no-filter-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
fwd = mne.read_forward_solution(fwd_fname)
fwd = mne.pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
cov = mne.read_cov(cov_fname)
info = mne.io.read_info(ave_fname)
label_names = ['Aud-lh', 'Aud-rh']
labels = [mne.read_label(data_path + '/MEG/sample/labels/%s.label' % ln)
for ln in label_names]
###############################################################################
# Generate source time courses from 2 dipoles and the correspond evoked data
times = np.arange(300, dtype=np.float) / raw.info['sfreq'] - 0.1
rng = np.random.RandomState(42)
def data_fun(times):
"""Function to generate random source time courses"""
return (50e-9 * np.sin(30. * times) *
np.exp(- (times - 0.15 + 0.05 * rng.randn(1)) ** 2 / 0.01))
stc = simulate_sparse_stc(fwd['src'], n_dipoles=2, times=times,
random_state=42, labels=labels, data_fun=data_fun)
###############################################################################
# Generate noisy evoked data
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
iir_filter = fit_iir_model_raw(raw, order=5, picks=picks, tmin=60, tmax=180)[1]
nave = 100 # simulate average of 100 epochs
evoked = simulate_evoked(fwd, stc, info, cov, nave=nave, use_cps=True,
iir_filter=iir_filter)
###############################################################################
# Plot
plot_sparse_source_estimates(fwd['src'], stc, bgcolor=(1, 1, 1),
opacity=0.5, high_resolution=True)
plt.figure()
plt.psd(evoked.data[0])
evoked.plot(time_unit='s')
| bsd-3-clause |
Adityaparmar2903/pysb_optimize | robertson/robertson_optimize.py | 1 | 4501 | from pysb.examples.robertson import model
import numpy as np
from pysb.integrate import Solver
import scipy.optimize
from pyDOE import *
import os
import sys
import numdifftools as nd
from matplotlib import pyplot as plt
import pickle
from pysb.sensitivity import Sensitivity
method_list = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'L-BFGS-B',
'TNC', 'COBYLA','SLSQP', 'Newton-CG', 'trust-ncg', 'dogleg', 'differential evolution']
num_timepoints = 101
num_dim = 3
t = np.linspace(0, 200, num_timepoints)
num_obj_calls = 0
data = np.zeros((num_timepoints, len(model.observables)))
p_to_fit = [p for p in model.parameters if p.name[0] == 'k']
p_to_fit_indices = [model.parameters.index(p) for p in p_to_fit]
nominal_values = np.array([p.value for p in p_to_fit])
x_test = np.log10(nominal_values)
sol = Solver(model, t, use_analytic_jacobian=True, nsteps=10000)
sol.run()
#sens = Sensitivity(model, t)
plt.ion()
def gen_synth_data(model, t):
for obs_ix, obs in enumerate(model.observables):
rand_norm = np.random.randn(len(t))
sigma = 0.1
obs_max = np.max(sol.yobs[obs.name])
noise = rand_norm * sigma * sol.yobs[obs.name]
noisy_obs = noise + sol.yobs[obs.name]
norm_noisy_data = noisy_obs / obs_max
data[:, obs_ix] = noisy_obs
def obj_func(x):
global num_obj_calls
num_obj_calls += 1
p = x - x_test
lin_x = 10 ** x
for p_ix, pp in enumerate(p_to_fit):
pp.value = lin_x[p_ix]
sol.run()
total_err = 0
for obs_ix, obs in enumerate(model.observables):
y = sol.yobs[obs.name]
total_err += np.sum((y - data[:, obs_ix])**2)
try:
total_err += np.sum(p[np.where(p > 2)] - 2)*1000
total_err += np.sum(-2 - p[np.where(p < -2)])*1000
except Exception as e:
print "couldn't apply constraints"
print total_err
return total_err
def Jacob(x):
if np.any(np.isnan(x)):
jaco = np.zeros(x.shape)
jaco[:] = np.nan
return jaco
jaco = nd.Jacobian(obj_func)(x)
return jaco[0]
def Hessi(x):
if np.any(np.isnan(x)):
hes = np.zeros((len(x), len(x)))
hes[:] = np.nan
return hes
hes = nd.Hessian(obj_func)(x)
return hes
def jac_func(x):
#global num_jac_calls
#num_jac_calls += 1
lin_x = 10 ** x
# Initialize the model to have the values in the parameter array
for p_ix, p in enumerate(p_to_fit):
p.value = lin_x[p_ix]
sens.run()
dgdp = np.zeros(len(model.parameters))
for obs_ix, obs in enumerate(model.observables):
yobs = sens.yobs[obs.name]
ysens = sens.yobs_sens[obs.name]
y = data[:, obs_ix]
dgdy = np.tensordot(ysens, 2 * (yobs - y), axes=(0, 0))
dgdp += dgdy
dgdp[0:3] = dgdp[0:3] * np.log(10) * lin_x
return dgdp[0:3]
def hess_func(x):
jaco = nd.Jacobian(jac_func)(x)
return jaco
#gen_synth_data(model, t)
data = np.load('data.npy')
if __name__ == '__main__':
if len(sys.argv) < 4:
print 'Not enough input arguments.'
sys.exit()
from_idx = int(sys.argv[1])
to_idx = int(sys.argv[2])
if to_idx < from_idx:
print 'Invalid from-to pair.'
sys.exit()
method_id = int(sys.argv[3])
if method_id >= len(method_list):
print 'Invalid method id.'
sys.exit()
meth = method_list[method_id]
ini_val = np.load('initial_values.npy')
if method_id == 11:
for i in range(from_idx, to_idx):
result = scipy.optimize.differential_evolution(obj_func, [(-3.39, 0.61),(5.47, 9.47),(2,6)])
fname = 'Rob-%s_%d.pkl' % (method_id,i)
with open(fname, 'wb') as fh:
pickle.dump(result, fh)
func_eval = num_obj_calls
global num_obj_calls
num_obj_calls = 0
fname = 'Rob-eval-%s_%d.pkl' % (method_id, i)
with open(fname, 'wb') as fh:
pickle.dump(func_eval, fh)
else:
for i in range(from_idx, to_idx):
result = scipy.optimize.minimize(obj_func, ini_val[i], method=meth, jac=Jacob, hess=Hessi)
fname = 'Rob-%s_%d.pkl' % (method_id,i)
with open(fname, 'wb') as fh:
pickle.dump(result, fh)
func_eval = num_obj_calls
global num_obj_calls
num_obj_calls = 0
fname = 'Rob-eval-%s_%d.pkl' % (method_id, i)
with open(fname, 'wb') as fh:
pickle.dump(func_eval, fh)
| mit |
cactusbin/nyt | matplotlib/examples/pylab_examples/image_interp.py | 6 | 1925 | #!/usr/bin/env python
"""
The same (small) array, interpolated with three different
interpolation methods.
The center of the pixel at A[i,j] is plotted at i+0.5, i+0.5. If you
are using interpolation='nearest', the region bounded by (i,j) and
(i+1,j+1) will have the same color. If you are using interpolation,
the pixel center will have the same color as it does with nearest, but
other pixels will be interpolated between the neighboring pixels.
Earlier versions of matplotlib (<0.63) tried to hide the edge effects
from you by setting the view limits so that they would not be visible.
A recent bugfix in antigrain, and a new implementation in the
matplotlib._image module which takes advantage of this fix, no longer
makes this necessary. To prevent edge effects, when doing
interpolation, the matplotlib._image module now pads the input array
with identical pixels around the edge. e.g., if you have a 5x5 array
with colors a-y as below
a b c d e
f g h i j
k l m n o
p q r s t
u v w x y
the _image module creates the padded array,
a a b c d e e
a a b c d e e
f f g h i j j
k k l m n o o
p p q r s t t
o u v w x y y
o u v w x y y
does the interpolation/resizing, and then extracts the central region.
This allows you to plot the full range of your array w/o edge effects,
and for example to layer multiple images of different sizes over one
another with different interpolation methods - see
examples/layer_images.py. It also implies a performance hit, as this
new temporary, padded array must be created. Sophisticated
interpolation also implies a performance hit, so if you need maximal
performance or have very large images, interpolation='nearest' is
suggested.
"""
from pylab import *
A = rand(5,5)
figure(1)
imshow(A, interpolation='nearest')
grid(True)
figure(2)
imshow(A, interpolation='bilinear')
grid(True)
figure(3)
imshow(A, interpolation='bicubic')
grid(True)
show()
| unlicense |
sinhrks/pandas-ml | pandas_ml/confusion_matrix/abstract.py | 3 | 16323 | #!/usr/bin/python
# -*- coding: utf8 -*-
import numpy as np
import pandas as pd
import collections
import pandas_ml as pdml
from pandas_ml.confusion_matrix.stats import binom_interval, class_agreement, prop_test
class ConfusionMatrixAbstract(object):
"""
Abstract class for confusion matrix
You shouldn't instantiate this class.
You might instantiate ConfusionMatrix or BinaryConfusionMatrix classes
"""
TRUE_NAME = 'Actual'
PRED_NAME = 'Predicted'
def __init__(self, y_true, y_pred, labels=None,
display_sum=True, backend='matplotlib',
true_name='Actual', pred_name='Predicted'):
self.true_name = true_name
self.pred_name = pred_name
if isinstance(y_true, pd.Series):
self._y_true = y_true
self._y_true.name = self.true_name
else:
self._y_true = pd.Series(y_true, name=self.true_name)
if isinstance(y_pred, pd.Series):
self._y_pred = y_pred
self._y_pred.name = self.pred_name
else:
self._y_pred = pd.Series(y_pred, name=self.pred_name)
if labels is not None:
if not self.is_binary:
self._y_true = self._y_true.map(lambda i: self._label(i, labels))
self._y_pred = self._y_pred.map(lambda i: self._label(i, labels))
else:
N = len(labels)
assert len(labels) == 2, "labels be a list with length=2 - length=%d" % N
d = {labels[0]: False, labels[1]: True}
self._y_true = self._y_true.map(d)
self._y_pred = self._y_pred.map(d)
raise(NotImplementedError) # ToDo: see self.classes and BinaryConfusionMatrix.__class ...
N_true = len(y_true)
N_pred = len(y_pred)
assert N_true == N_pred, \
"y_true must have same size - %d != %d" % (N_true, N_pred)
df = pd.crosstab(self._y_true, self._y_pred)
idx = self._classes(df)
if self.is_binary and pdml.compat._PANDAS_ge_021:
df = df.reindex([False, True])
df = df.reindex([False, True], axis=1)
df = df.fillna(0)
else:
df = df.loc[idx, idx.copy()].fillna(0) # if some columns or rows are missing
self._df_confusion = df
self._df_confusion.index.name = self.true_name
self._df_confusion.columns.name = self.pred_name
self._df_confusion = self._df_confusion.astype(np.int64)
self._len = len(idx)
self.backend = backend
self.display_sum = display_sum
def _label(self, i, labels):
try:
return(labels[i])
except IndexError:
return(i)
def __repr__(self):
return(self.to_dataframe(calc_sum=self.display_sum).__repr__())
def __str__(self):
return(self.to_dataframe(calc_sum=self.display_sum).__str__())
# return("%s:\n%s" % (self.title, self.to_dataframe(calc_sum=self.display_sum).__str__()))
@property
def classes(self):
"""
Returns classes (property)
"""
return(self._classes())
def _classes(self, df=None):
"""
Returns classes (method)
"""
if df is None:
df = self.to_dataframe()
idx_classes = (df.columns | df.index).copy()
idx_classes.name = 'Classes'
return(idx_classes)
def to_dataframe(self, normalized=False, calc_sum=False,
sum_label='__all__'):
"""
Returns a Pandas DataFrame
"""
if normalized:
a = self._df_confusion.values.astype('float')
a = a.astype('float') / a.sum(axis=1)[:, np.newaxis]
df = pd.DataFrame(a,
index=self._df_confusion.index.copy(),
columns=self._df_confusion.columns.copy())
else:
df = self._df_confusion
if calc_sum:
df = df.copy()
df[sum_label] = df.sum(axis=1)
# df = pd.concat([df, pd.DataFrame(df.sum(axis=1), columns=[sum_label])], axis=1)
df = pd.concat([df, pd.DataFrame(df.sum(axis=0), columns=[sum_label]).T])
df.index.name = self.true_name
return(df)
@property
def true(self):
"""
Returns sum of actual (true) values for each class
"""
s = self.to_dataframe().sum(axis=1)
s.name = self.true_name
return(s)
@property
def pred(self):
"""
Returns sum of predicted values for each class
"""
s = self.to_dataframe().sum(axis=0)
s.name = self.pred_name
return(s)
def to_array(self, normalized=False, sum=False):
"""
Returns a Numpy Array
"""
return(self.to_dataframe(normalized, sum).values)
def toarray(self, *args, **kwargs):
"""
see to_array
"""
return(self.to_array(*args, **kwargs))
def len(self):
"""
Returns len of a confusion matrix.
For example: 3 means that this is a 3x3 (3 rows, 3 columns) matrix
"""
return(self._len)
def sum(self):
"""
Returns sum of a confusion matrix.
Also called "population"
It should be the number of elements of either y_true or y_pred
"""
return(self.to_dataframe().sum().sum())
@property
def population(self):
"""
see also sum
"""
return(self.sum())
def y_true(self, func=None):
if func is None:
return(self._y_true)
else:
return(self._y_true.map(func))
def y_pred(self, func=None):
if func is None:
return(self._y_pred)
else:
return(self._y_pred.map(func))
@property
def title(self):
"""
Returns title
"""
if self.is_binary:
return("Binary confusion matrix")
else:
return("Confusion matrix")
def plot(self, normalized=False, backend='matplotlib',
ax=None, max_colors=10, **kwargs):
"""
Plots confusion matrix
"""
df = self.to_dataframe(normalized)
try:
cmap = kwargs['cmap']
except KeyError:
import matplotlib.pyplot as plt
cmap = plt.cm.gray_r
title = self.title
if normalized:
title += " (normalized)"
if backend == 'matplotlib':
import matplotlib.pyplot as plt
# if ax is None:
fig, ax = plt.subplots(figsize=(9, 8))
plt.imshow(df, cmap=cmap, interpolation='nearest') # imshow / matshow
ax.set_title(title)
tick_marks_col = np.arange(len(df.columns))
tick_marks_idx = tick_marks_col.copy()
ax.set_yticks(tick_marks_idx)
ax.set_xticks(tick_marks_col)
ax.set_xticklabels(df.columns, rotation=45, ha='right')
ax.set_yticklabels(df.index)
ax.set_ylabel(df.index.name)
ax.set_xlabel(df.columns.name)
# N_min = 0
N_max = self.max()
if N_max > max_colors:
# Continuous colorbar
plt.colorbar()
else:
# Discrete colorbar
pass
# ax2 = fig.add_axes([0.93, 0.1, 0.03, 0.8])
# bounds = np.arange(N_min, N_max + 2, 1)
# norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
# cb = mpl.colorbar.ColorbarBase(ax2, cmap=cmap, norm=norm, spacing='proportional', ticks=bounds, boundaries=bounds, format='%1i')
return ax
elif backend == 'seaborn':
import seaborn as sns
ax = sns.heatmap(df, **kwargs)
return ax
# You should test this yourself
# because I'm facing an issue with Seaborn under Mac OS X (2015-04-26)
# RuntimeError: Cannot get window extent w/o renderer
# sns.plt.show()
else:
msg = "'backend' must be either 'matplotlib' or 'seaborn'"
raise ValueError(msg)
def binarize(self, select):
"""Returns a binary confusion matrix from
a confusion matrix"""
if not isinstance(select, collections.Iterable):
select = np.array(select)
y_true_bin = self.y_true().map(lambda x: x in select)
y_pred_bin = self.y_pred().map(lambda x: x in select)
from pandas_ml.confusion_matrix.bcm import BinaryConfusionMatrix
binary_cm = BinaryConfusionMatrix(y_true_bin, y_pred_bin)
return(binary_cm)
def enlarge(self, select):
"""
Enlarges confusion matrix with new classes
It should add empty rows and columns
"""
if not isinstance(select, collections.Iterable):
idx_new_cls = pd.Index([select])
else:
idx_new_cls = pd.Index(select)
new_idx = self._df_confusion.index | idx_new_cls
new_idx.name = self.true_name
new_col = self._df_confusion.columns | idx_new_cls
new_col.name = self.pred_name
print(new_col)
self._df_confusion = self._df_confusion.loc[:, new_col]
# self._df_confusion = self._df_confusion.loc[new_idx, new_col].fillna(0)
# ToFix: KeyError: 'the label [True] is not in the [index]'
@property
def stats_overall(self):
"""
Returns an OrderedDict with overall statistics
"""
df = self._df_confusion
d_stats = collections.OrderedDict()
d_class_agreement = class_agreement(df)
key = 'Accuracy'
try:
d_stats[key] = d_class_agreement['diag'] # 0.35
except KeyError:
d_stats[key] = np.nan
key = '95% CI'
try:
d_stats[key] = binom_interval(np.sum(np.diag(df)), df.sum().sum()) # (0.1539, 0.5922)
except: # noqa
d_stats[key] = np.nan
d_prop_test = prop_test(df)
d_stats['No Information Rate'] = 'ToDo' # 0.8
d_stats['P-Value [Acc > NIR]'] = d_prop_test['p.value'] # 1
d_stats['Kappa'] = d_class_agreement['kappa'] # 0.078
d_stats['Mcnemar\'s Test P-Value'] = 'ToDo' # np.nan
return(d_stats)
@property
def stats_class(self):
"""
Returns a DataFrame with class statistics
"""
# stats = ['TN', 'FP', 'FN', 'TP']
# df = pd.DataFrame(columns=self.classes, index=stats)
df = pd.DataFrame(columns=self.classes)
# ToDo Avoid these for loops
for cls in self.classes:
binary_cm = self.binarize(cls)
binary_cm_stats = binary_cm.stats()
for key, value in binary_cm_stats.items():
df.loc[key, cls] = value # binary_cm_stats
d_name = {
'population': 'Population',
'P': 'P: Condition positive',
'N': 'N: Condition negative',
'PositiveTest': 'Test outcome positive',
'NegativeTest': 'Test outcome negative',
'TP': 'TP: True Positive',
'TN': 'TN: True Negative',
'FP': 'FP: False Positive',
'FN': 'FN: False Negative',
'TPR': 'TPR: (Sensitivity, hit rate, recall)', # True Positive Rate
'TNR': 'TNR=SPC: (Specificity)', # True Negative Rate
'PPV': 'PPV: Pos Pred Value (Precision)',
'NPV': 'NPV: Neg Pred Value',
'prevalence': 'Prevalence',
# 'xxx': 'xxx: Detection Rate',
# 'xxx': 'xxx: Detection Prevalence',
# 'xxx': 'xxx: Balanced Accuracy',
'FPR': 'FPR: False-out',
'FDR': 'FDR: False Discovery Rate',
'FNR': 'FNR: Miss Rate',
'ACC': 'ACC: Accuracy',
'F1_score': 'F1 score',
'MCC': 'MCC: Matthews correlation coefficient',
'informedness': 'Informedness',
'markedness': 'Markedness',
'LRP': 'LR+: Positive likelihood ratio',
'LRN': 'LR-: Negative likelihood ratio',
'DOR': 'DOR: Diagnostic odds ratio',
'FOR': 'FOR: False omission rate',
}
df.index = df.index.map(lambda id: self._name_from_dict(id, d_name))
return(df)
def stats(self, lst_stats=None):
"""
Return an OrderedDict with statistics
"""
d_stats = collections.OrderedDict()
d_stats['cm'] = self
d_stats['overall'] = self.stats_overall
d_stats['class'] = self.stats_class
return(d_stats)
def _name_from_dict(self, key, d_name):
"""
Returns name (value in dict d_name
or key if key doesn't exists in d_name)
"""
try:
return(d_name[key])
except (KeyError, TypeError):
return(key)
def _str_dict(self, d, line_feed_key_val='\n',
line_feed_stats='\n\n', d_name=None):
"""
Return a string representation of a dictionary
"""
s = ""
for i, (key, val) in enumerate(d.items()):
name = self._name_from_dict(key, d_name)
if i != 0:
s = s + line_feed_stats
s = s + "%s:%s%s" % (name, line_feed_key_val, val)
return(s)
def _str_stats(self, lst_stats=None):
"""
Returns a string representation of statistics
"""
d_stats_name = {
"cm": "Confusion Matrix",
"overall": "Overall Statistics",
"class": "Class Statistics",
}
stats = self.stats(lst_stats)
d_stats_str = collections.OrderedDict([
("cm", str(stats['cm'])),
("overall", self._str_dict(
stats['overall'],
line_feed_key_val=' ', line_feed_stats='\n')),
("class", str(stats['class'])),
])
s = self._str_dict(
d_stats_str, line_feed_key_val='\n\n',
line_feed_stats='\n\n\n', d_name=d_stats_name)
return(s)
def print_stats(self, lst_stats=None):
"""
Prints statistics
"""
print(self._str_stats(lst_stats))
def get(self, actual=None, predicted=None):
"""
Get confusion matrix value for a given
actual class and a given predicted class
if only one parameter is given (actual or predicted)
we get confusion matrix value for actual=actual and predicted=actual
"""
if actual is None:
actual = predicted
if predicted is None:
predicted = actual
return(self.to_dataframe().loc[actual, predicted])
def max(self):
"""
Returns max value of confusion matrix
"""
return(self.to_dataframe().max().max())
def min(self):
"""
Returns min value of confusion matrix
"""
return(self.to_dataframe().min().min())
@property
def is_binary(self):
"""Return False"""
return(False)
@property
def classification_report(self):
"""
Returns a DataFrame with classification report
"""
columns = np.array(['precision', 'recall', 'F1_score', 'support'])
index = self.classes
df = pd.DataFrame(index=index, columns=columns)
for cls in self.classes:
binary_cm = self.binarize(cls)
for stat in columns:
df.loc[cls, stat] = getattr(binary_cm, stat)
total_support = df.support.sum()
df.loc['__avg / total__', :] = (df[df.columns[:-1]].transpose() * df.support).sum(axis=1) / df.support.sum()
df.loc['__avg / total__', 'support'] = total_support
return(df)
def _avg_stat(self, stat):
"""
Binarizes confusion matrix
and returns (weighted) average statistics
"""
s_values = pd.Series(index=self.classes)
for cls in self.classes:
binary_cm = self.binarize(cls)
v = getattr(binary_cm, stat)
print(v)
s_values[cls] = v
value = (s_values * self.true).sum() / self.population
return(value)
| bsd-3-clause |
rxa254/VibrationsAndWaves | Simulations/TravelingWave.py | 1 | 2291 | #!/usr/bin/env python
from __future__ import division
import matplotlib as mpl
#mpl.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-paper')
import matplotlib.animation as animation
mpl.rcParams.update({'text.usetex': True,
'lines.linewidth': 2.5,
'font.size': 20,
'xtick.labelsize': 'small',
'ytick.labelsize': 'small',
'axes.grid': True,
'axes.labelsize': 'medium',
'grid.alpha': 0.73,
'lines.markersize': 12,
'legend.borderpad': 0.2,
'legend.fancybox': True,
'legend.fontsize': 13,
'legend.framealpha': 0.7,
'legend.handletextpad': 0.1,
'legend.labelspacing': 0.2,
'legend.loc': 'best',
'savefig.dpi': 100,
'pdf.compression': 9})
n = 1
L = 1
omega = 2*np.pi*1
# Set up formatting for the movie files
Writer = animation.writers['ffmpeg']
writer = Writer(fps=25, metadata=dict(artist='rxa254'), bitrate=1800)
fig, ax = plt.subplots(2,1,sharex=True, figsize=(9, 12))
x = np.arange(0, 2*np.pi, 0.1)
line0, = ax[0].plot(x, np.sin(n*np.pi*x/L),
marker = 'o', c='xkcd:Blue', mfc='xkcd:Tomato', alpha=0.7)
line1, = ax[1].plot(x, np.sin(n*np.pi*x/L),
marker = 'o', c='xkcd:Purple', mfc='xkcd:Green', alpha=0.7)
ax[0].set_title(r'$y = sin(\pi x / L - \omega t)$', fontsize=20)
ax[1].set_title(r'$y = sin(\pi x / L + \omega t)$', fontsize=20)
plt.xlabel('x/L')
def animate(t):
line0.set_ydata(np.sin(n*np.pi*x/L - omega*t)) # update the data
line1.set_ydata(np.sin(n*np.pi*x/L + omega*t)) # update the data
return line0,line1,
# Init only required for blitting to give a clean slate.
def init():
line0.set_ydata(np.ma.array(x, mask=True))
line1.set_ydata(np.ma.array(x, mask=True))
return line0,line1,
ani = animation.FuncAnimation(fig, animate, np.arange(0, 2, 0.003), init_func=init,
interval=15, blit=True)
ani.save('travelling_wave.mp4', writer=writer)
#plt.show()
#plt.savefig("modes_of_string.pdf", bbox_inches='tight')
| mit |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sklearn/datasets/samples_generator.py | 2 | 40584 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe
# License: BSD 3 clause
from itertools import product
import numpy as np
from scipy import linalg
from ..utils import array2d, check_random_state
from ..utils import shuffle as util_shuffle
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
dupplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined in order to add covariance. The clusters
are then placed on the vertices of the hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=2)
The number of dupplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float or None, optional (default=0.0)
Shift all features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float or None, optional (default=1.0)
Multiply all features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
"be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
n_samples_per_cluster = []
for k in xrange(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in xrange(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples)
# Build the polytope
C = np.array(list(product([-class_sep, class_sep], repeat=n_informative)))
if not hypercube:
for k in xrange(n_clusters):
C[k, :] *= generator.rand()
for f in xrange(n_informative):
C[:, f] *= generator.rand()
generator.shuffle(C)
# Loop over all clusters
pos = 0
pos_end = 0
for k in xrange(n_clusters):
# Number of samples in cluster k
n_samples_k = n_samples_per_cluster[k]
# Define the range of samples
pos = pos_end
pos_end = pos + n_samples_k
# Assign labels
y[pos:pos_end] = k % n_classes
# Draw features at random
X[pos:pos_end, :n_informative] = generator.randn(n_samples_k,
n_informative)
# Multiply by a random matrix to create co-variance of the features
A = 2 * generator.rand(n_informative, n_informative) - 1
X[pos:pos_end, :n_informative] = np.dot(X[pos:pos_end, :n_informative],
A)
# Shift the cluster to a vertice
X[pos:pos_end, :n_informative] += np.tile(C[k, :], (n_samples_k, 1))
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.int)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
X[:, n_features - n_useless:] = generator.randn(n_samples, n_useless)
# Randomly flip labels
if flip_y >= 0.0:
for i in xrange(n_samples):
if generator.rand() < flip_y:
y[i] = generator.randint(n_classes)
# Randomly shift and scale
constant_shift = shift is not None
constant_scale = scale is not None
for f in xrange(n_features):
if not constant_shift:
shift = (2 * generator.rand() - 1) * class_sep
if not constant_scale:
scale = 1 + 100 * generator.rand()
X[:, f] += shift
X[:, f] *= scale
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = range(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50,
allow_unlabeled=True, random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. Number of labels follows
a Poisson distribution that never takes the value 0.
length : int, optional (default=50)
Sum of the features (number of words if documents).
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : list of tuples
The label sets.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
n = n_classes + 1
while (not allow_unlabeled and n == 0) or n > n_classes:
n = generator.poisson(n_labels)
# pick n classes
y = []
while len(y) != n:
# pick a class with probability P(c)
c = generator.multinomial(1, p_c).argmax()
if not c in y:
y.append(c)
# pick a non-zero document length by rejection sampling
k = 0
while k == 0:
k = generator.poisson(length)
# generate a document of length k words
x = np.zeros(n_features, dtype=int)
for i in range(k):
if len(y) == 0:
# if sample does not belong to any class, generate noise word
w = generator.randint(n_features)
else:
# pick a class and generate an appropriate word
c = y[generator.randint(len(y))]
w = generator.multinomial(1, p_w_c[:, c]).argmax()
x[w] += 1
return x, y
X, Y = zip(*[sample_example() for i in range(n_samples)])
return np.array(X, dtype=np.float64), Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] > 9.34 else -1
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
**References**:
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See the `make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = range(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2di
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
n_samples_out = int(n_samples / float(1 + factor))
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
n_samples_out, n_samples_in = n_samples_out + 1, n_samples_in + 1
outer_circ_x = np.cos(np.linspace(0, 2 * np.pi, n_samples_out)[:-1])
outer_circ_y = np.sin(np.linspace(0, 2 * np.pi, n_samples_out)[:-1])
inner_circ_x = (np.cos(np.linspace(0, 2 * np.pi, n_samples_in)[:-1])
* factor)
inner_circ_y = (np.sin(np.linspace(0, 2 * np.pi, n_samples_in)[:-1])
* factor)
X = np.vstack((np.append(outer_circ_x, inner_circ_x),\
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_out - 1), np.ones(n_samples_in - 1)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y.astype(np.int)
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
"""
n_samples_out = n_samples / 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),\
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in), np.ones(n_samples_out)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y.astype(np.int)
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print X.shape
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
"""
generator = check_random_state(random_state)
if isinstance(centers, (int, np.integer)):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = array2d(centers)
n_features = centers.shape[1]
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in xrange(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, n in enumerate(n_samples_per_center):
X.append(centers[i] + generator.normal(scale=cluster_std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman #1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman #2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman #3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
from ..utils.fixes import qr_economic
u, _ = qr_economic(generator.randn(n_samples, n))
v, _ = qr_economic(generator.randn(n_features, n))
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = (1 - tail_strength) * \
np.exp(-1.0 * (singular_ind / effective_rank) ** 2)
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in xrange(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symetric definite positive matrix.
Parameters
----------
dim: integer, optional (default=1)
The size of the random (matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
prec: array of shape = [dim, dim]
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have assymetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
d = np.diag(prec)
d = 1. / np.sqrt(d)
prec *= d
prec *= d[:, np.newaxis]
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perpsective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
| agpl-3.0 |
jfparis/ratesetterClient | setup.py | 1 | 1723 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 Jean-Francois Paris
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import ratesetterclient
setup(
name='ratesetterClient',
version=ratesetterclient.__version__,
description='python library for ratesetter.com',
author='Jean-Francois Paris',
author_email='[email protected]',
package_dir={'':'.'},
url="https://github.com/jfparis/ratesetterClient",
packages=['ratesetterclient'],
license='LGPL 2.0',
install_requires=[
'lxml',
'requests',
'pandas',
],
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
),
)
| lgpl-3.0 |
jgdo/arips_ros | door_handle_detection/pytorch/dataset.py | 1 | 7855 | import glob
import os
import random
import cv2
import numpy as np
import matplotlib.pyplot as plt
from annot_utils import *
import torch
from torch.utils.data import Dataset
def normalizeCoords(x, size):
n = (x / size) * 2.0 - 1.0
return n
def denormalizeCoords(n, size):
x = int((n + 1) / 2 * size)
return x
# https://stackoverflow.com/questions/4601373/better-way-to-shuffle-two-numpy-arrays-in-unison
def unison_shuffled_copies(a, b):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def loadDataFromFolder(folder):
jpgs = glob.glob(folder + "*.jpg")
imgs = []
labels = []
for img_path in jpgs:
annot = getAnnotationName(img_path)
if not os.path.exists(annot):
# print("Skipping unannotated {}".format(img_path))
continue
data = loadAnnotation(annot)
img = cv2.imread(img_path).astype(np.float32) / 255.0
label = np.zeros((240, 320, 2), dtype=np.float32)
if data[2] > 0.5:
label[data[0][1], data[0][0], 0] = 1
label[data[1][1], data[1][0], 1] = 1
kernel = np.ones((5, 5), np.uint8)
label[:, :, 0] = cv2.dilate(label[:, :, 0], kernel, iterations=1)
label[:, :, 1] = cv2.dilate(label[:, :, 1], kernel, iterations=1)
ksize = 21
label[:, :, 0] = cv2.GaussianBlur(label[:, :, 0], (ksize, ksize), 0)
label[:, :, 0] /= label[:, :, 0].max()
label[:, :, 1] = cv2.GaussianBlur(label[:, :, 1], (ksize, ksize), 0)
label[:, :, 1] /= label[:, :, 1].max()
# kernel = np.ones((7, 7), np.uint8)
# label[:,:,0] = cv2.dilate(label[:,:,0], kernel, iterations=1)
# label[:, :, 1] = cv2.dilate(label[:, :, 1], kernel, iterations=1)
imgs.append(img)
labels.append(np.asarray(label))
imgs = np.stack(imgs)
labels = np.stack(labels)
return imgs, labels
def loadAllData():
all_images = []
all_labels = []
all_folders = [
"/home/jgdo/catkin_ws/src/arips_ros/door_handle_detection/data/with_handle/",
"/home/jgdo/catkin_ws/src/arips_ros/door_handle_detection/data/no_handle/",
]
for folder in all_folders:
images, labels = loadDataFromFolder(folder)
all_images.append(images)
all_labels.append(labels)
all_images = np.concatenate(all_images)
all_labels = np.concatenate(all_labels)
all_images, all_labels = unison_shuffled_copies(all_images, all_labels)
return all_images, all_labels
class DoorDataGenerator(Dataset):
def __init__(self, all_images, all_labels, train, shuffle=False):
assert len(all_images) == len(all_labels)
train_ratio = 0.8
split_index = int(len(all_labels) * train_ratio)
if train:
self.images = all_images[0:split_index]
self.labels = all_labels[0:split_index]
else:
self.images = all_images[split_index:-1]
self.labels = all_labels[split_index:-1]
self.shuffle = shuffle
self.get_with_indices = False
self.on_epoch_end()
print("Loaded {} {} images".format(len(self.images), "train" if train else "test"))
def __len__(self):
return len(self.images)
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.images))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def arugment_brightness(self, img, label):
brightness_center = random.uniform(0.0, 1.0)
brightness_factor = random.uniform(0.5, 1.3)
img -= brightness_center
img *= brightness_factor
img += brightness_center
np.clip(img, 0.0, 1.0, out=img)
return img, label
def augment_pos(self, img, label):
width, height = img.shape[1], img.shape[0]
center_x = random.randrange(width)
center_y = random.randrange(height)
scale_factor = random.uniform(1.0, 1.3)
# ignore if label will be outside image
if label[4] > 0:
ncenter_x = normalizeCoords(center_x, width)
ncenter_y = normalizeCoords(center_y, height)
sx = (label[0] - ncenter_x) * scale_factor + ncenter_x
sy = (label[1] - ncenter_y) * scale_factor + ncenter_y
ex = (label[2] - ncenter_x) * scale_factor + ncenter_x
ey = (label[3] - ncenter_y) * scale_factor + ncenter_y
def in_range(x):
return -1.0 <= x <= 1.0
# return unchanged if out of range
for x in [sx, sy, ex, ey]:
if not in_range(x):
return img, label
label = [sx, sy, ex, ey, label[4]]
# else: ignore label, since no door handle present
img_resized = cv2.resize(img, None, fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_LINEAR)
start_x = round((center_x) * scale_factor - center_x)
start_y = round((center_y) * scale_factor - center_y)
img = img_resized[start_y:start_y + height, start_x:start_x + width]
return img, label
def augment_noise(self, img, label):
mean = 0
sigma = random.uniform(0.0, 0.05)
gauss = np.random.normal(mean, sigma, img.shape)
img += gauss
np.clip(img, 0, 1, out=img)
return img, label
def augment(self, batch_images, batch_labels, funcs):
assert len(batch_images) == len(batch_labels)
for i in range(len(batch_images)):
img, label = batch_images[i, :, :, 0:3], batch_labels[i]
for func in funcs:
img, label = func(img, label)
batch_images[i, :, :, 0:3], batch_labels[i] = img, label
return batch_images, batch_labels
def toCHW(self, x):
return x.transpose(2, 0, 1)
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indices = self.indexes[index: index + 1]
batch_images, batch_labels = self.images[indices], self.labels[indices]
batch_images, batch_labels = self.augment(batch_images, batch_labels,
[
self.arugment_brightness,
# self.augment_pos,
self.augment_noise,
])
if self.get_with_indices:
return batch_images, batch_labels, indices
return self.toCHW(batch_images[0]), self.toCHW(batch_labels[0])
def showImageLabels(img_float_5, labels):
if type(img_float_5) == torch.Tensor:
img_float_5 = img_float_5.cpu().numpy()
if type(labels) == torch.Tensor:
labels = labels.detach().cpu().numpy()
if img_float_5.shape[0] == 3:
img_float_5 = img_float_5.transpose(1, 2, 0)
if labels.shape[0] == 2:
labels = labels.transpose(1, 2, 0)
img = (img_float_5[:, :, 0:3] * 255).astype(np.uint8)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
color1 = np.stack([labels[:, :, 0] * 255, labels[:, :, 0] * 0, labels[:, :, 0] * 0], axis=2).astype(np.uint8)
color2 = np.stack([labels[:, :, 1] * 0, labels[:, :, 1] * 255, labels[:, :, 1] * 0], axis=2).astype(np.uint8)
img = np.maximum(img, color1)
img = np.maximum(img, color2)
plt.imshow(img)
plt.show()
def test_dataset():
all_images, all_labels = loadAllData()
dataset = DoorDataGenerator(all_images, all_labels, train=True)
for i in range(10):
batch_images, batch_labels = dataset[i]
showImageLabels(batch_images, batch_labels)
if __name__ == "__main__":
test_dataset()
| gpl-2.0 |
btabibian/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 21 | 17922 | import numpy as np
import scipy.sparse as sp
import numbers
from scipy import linalg
from sklearn.decomposition import NMF, non_negative_factorization
from sklearn.decomposition import nmf # For testing internals
from scipy.sparse import csc_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raise_message, assert_no_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.extmath import squared_norm
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
def test_initialize_nn_output():
# Test that initialization does not return negative values
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_parameter_checking():
A = np.ones((2, 2))
name = 'spam'
msg = "Invalid solver parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(solver=name).fit, A)
msg = "Invalid init parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(init=name).fit, A)
msg = "Invalid beta_loss parameter: got 'spam' instead of one"
assert_raise_message(ValueError, msg, NMF(solver='mu',
beta_loss=name).fit, A)
msg = "Invalid beta_loss parameter: solver 'cd' does not handle "
msg += "beta_loss = 1.0"
assert_raise_message(ValueError, msg, NMF(solver='cd',
beta_loss=1.0).fit, A)
msg = "Negative values in data passed to"
assert_raise_message(ValueError, msg, NMF().fit, -A)
assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A,
2, 'nndsvd')
clf = NMF(2, tol=0.1).fit(A)
assert_raise_message(ValueError, msg, clf.transform, -A)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init='nndsvd')
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd')
Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda')
War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar',
random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_almost_equal(evl[ref != 0], ref[ref != 0])
# ignore UserWarning raised when both solver='mu' and init='nndsvd'
@ignore_warnings(category=UserWarning)
def test_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for solver in ('cd', 'mu'):
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random'):
model = NMF(n_components=2, solver=solver, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_nmf_fit_close():
rng = np.random.mtrand.RandomState(42)
# Test that the fit is not too far away
for solver in ('cd', 'mu'):
pnmf = NMF(5, solver=solver, init='nndsvdar', random_state=0,
max_iter=600)
X = np.abs(rng.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.1)
def test_nmf_transform():
# Test that NMF.transform returns close values
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(6, 5))
for solver in ['cd', 'mu']:
m = NMF(solver=solver, n_components=3, init='random',
random_state=0, tol=1e-5)
ft = m.fit_transform(A)
t = m.transform(A)
assert_array_almost_equal(ft, t, decimal=2)
def test_nmf_transform_custom_init():
# Smoke test that checks if NMF.transform works with custom initialization
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 5))
n_components = 4
avg = np.sqrt(A.mean() / n_components)
H_init = np.abs(avg * random_state.randn(n_components, 5))
W_init = np.abs(avg * random_state.randn(6, n_components))
m = NMF(solver='cd', n_components=n_components, init='custom',
random_state=0)
m.fit_transform(A, W=W_init, H=H_init)
m.transform(A)
def test_nmf_inverse_transform():
# Test that NMF.inverse_transform returns close values
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 4))
for solver in ('cd', 'mu'):
m = NMF(solver=solver, n_components=4, init='random', random_state=0,
max_iter=1000)
ft = m.fit_transform(A)
A_new = m.inverse_transform(ft)
assert_array_almost_equal(A, A_new, decimal=2)
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(30, 10))
NMF(n_components=15, random_state=0, tol=1e-2).fit(A)
def test_nmf_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
for solver in ('cd', 'mu'):
est1 = NMF(solver=solver, n_components=5, init='random',
random_state=0, tol=1e-2)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_array_almost_equal(W1, W2)
assert_array_almost_equal(H1, H2)
def test_nmf_sparse_transform():
# Test that transform works on sparse data. Issue #2124
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(3, 2))
A[1, 1] = 0
A = csc_matrix(A)
for solver in ('cd', 'mu'):
model = NMF(solver=solver, random_state=0, n_components=2,
max_iter=400)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)
def test_non_negative_factorization_consistency():
# Test that the function is called in the same way, either directly
# or through the NMF class
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
for solver in ('cd', 'mu'):
W_nmf, H, _ = non_negative_factorization(
A, solver=solver, random_state=1, tol=1e-2)
W_nmf_2, _, _ = non_negative_factorization(
A, H=H, update_H=False, solver=solver, random_state=1, tol=1e-2)
model_class = NMF(solver=solver, random_state=1, tol=1e-2)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_array_almost_equal(W_nmf, W_cls, decimal=10)
assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10)
def test_non_negative_factorization_checking():
A = np.ones((2, 2))
# Test parameters checking is public function
nnmf = non_negative_factorization
assert_no_warnings(nnmf, A, A, A, np.int64(1))
msg = ("Number of components must be a positive integer; "
"got (n_components=1.5)")
assert_raise_message(ValueError, msg, nnmf, A, A, A, 1.5)
msg = ("Number of components must be a positive integer; "
"got (n_components='2')")
assert_raise_message(ValueError, msg, nnmf, A, A, A, '2')
msg = "Negative values in data passed to NMF (input H)"
assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, 'custom')
msg = "Negative values in data passed to NMF (input W)"
assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, 'custom')
msg = "Array passed to NMF (input H) is full of zeros"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom')
msg = "Invalid regularization parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom', True,
'cd', 2., 1e-4, 200, 0., 0., 'spam')
def _beta_divergence_dense(X, W, H, beta):
"""Compute the beta-divergence of X and W.H for dense array only.
Used as a reference for testing nmf._beta_divergence.
"""
if isinstance(X, numbers.Number):
W = np.array([[W]])
H = np.array([[H]])
X = np.array([[X]])
WH = np.dot(W, H)
if beta == 2:
return squared_norm(X - WH) / 2
WH_Xnonzero = WH[X != 0]
X_nonzero = X[X != 0]
np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero)
if beta == 1:
res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero))
res += WH.sum() - X.sum()
elif beta == 0:
div = X_nonzero / WH_Xnonzero
res = np.sum(div) - X.size - np.sum(np.log(div))
else:
res = (X_nonzero ** beta).sum()
res += (beta - 1) * (WH ** beta).sum()
res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum()
res /= beta * (beta - 1)
return res
def test_beta_divergence():
# Compare _beta_divergence with the reference _beta_divergence_dense
n_samples = 20
n_features = 10
n_components = 5
beta_losses = [0., 0.5, 1., 1.5, 2.]
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0.
X_csr = sp.csr_matrix(X)
W, H = nmf._initialize_nmf(X, n_components, init='random', random_state=42)
for beta in beta_losses:
ref = _beta_divergence_dense(X, W, H, beta)
loss = nmf._beta_divergence(X, W, H, beta)
loss_csr = nmf._beta_divergence(X_csr, W, H, beta)
assert_almost_equal(ref, loss, decimal=7)
assert_almost_equal(ref, loss_csr, decimal=7)
def test_special_sparse_dot():
# Test the function that computes np.dot(W, H), only where X is non zero.
n_samples = 10
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0.
X_csr = sp.csr_matrix(X)
W = np.abs(rng.randn(n_samples, n_components))
H = np.abs(rng.randn(n_components, n_features))
WH_safe = nmf._special_sparse_dot(W, H, X_csr)
WH = nmf._special_sparse_dot(W, H, X)
# test that both results have same values, in X_csr nonzero elements
ii, jj = X_csr.nonzero()
WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel()
assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10)
# test that WH_safe and X_csr have the same sparse structure
assert_array_equal(WH_safe.indices, X_csr.indices)
assert_array_equal(WH_safe.indptr, X_csr.indptr)
assert_array_equal(WH_safe.shape, X_csr.shape)
@ignore_warnings(category=ConvergenceWarning)
def test_nmf_multiplicative_update_sparse():
# Compare sparse and dense input in multiplicative update NMF
# Also test continuity of the results with respect to beta_loss parameter
n_samples = 20
n_features = 10
n_components = 5
alpha = 0.1
l1_ratio = 0.5
n_iter = 20
# initialization
rng = np.random.mtrand.RandomState(1337)
X = rng.randn(n_samples, n_features)
X = np.abs(X)
X_csr = sp.csr_matrix(X)
W0, H0 = nmf._initialize_nmf(X, n_components, init='random',
random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):
# Reference with dense array X
W, H = W0.copy(), H0.copy()
W1, H1, _ = non_negative_factorization(
X, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
# Compare with sparse X
W, H = W0.copy(), H0.copy()
W2, H2, _ = non_negative_factorization(
X_csr, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
assert_array_almost_equal(W1, W2, decimal=7)
assert_array_almost_equal(H1, H2, decimal=7)
# Compare with almost same beta_loss, since some values have a specific
# behavior, but the results should be continuous w.r.t beta_loss
beta_loss -= 1.e-5
W, H = W0.copy(), H0.copy()
W3, H3, _ = non_negative_factorization(
X_csr, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
assert_array_almost_equal(W1, W3, decimal=4)
assert_array_almost_equal(H1, H3, decimal=4)
def test_nmf_negative_beta_loss():
# Test that an error is raised if beta_loss < 0 and X contains zeros.
# Test that the output has not NaN values when the input contains zeros.
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0
X_csr = sp.csr_matrix(X)
def _assert_nmf_no_nan(X, beta_loss):
W, H, _ = non_negative_factorization(
X, n_components=n_components, solver='mu', beta_loss=beta_loss,
random_state=0, max_iter=1000)
assert_false(np.any(np.isnan(W)))
assert_false(np.any(np.isnan(H)))
msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
for beta_loss in (-0.6, 0.):
assert_raise_message(ValueError, msg, _assert_nmf_no_nan, X, beta_loss)
_assert_nmf_no_nan(X + 1e-9, beta_loss)
for beta_loss in (0.2, 1., 1.2, 2., 2.5):
_assert_nmf_no_nan(X, beta_loss)
_assert_nmf_no_nan(X_csr, beta_loss)
def test_nmf_regularization():
# Test the effect of L1 and L2 regularizations
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = np.abs(rng.randn(n_samples, n_features))
# L1 regularization should increase the number of zeros
l1_ratio = 1.
for solver in ['cd', 'mu']:
regul = nmf.NMF(n_components=n_components, solver=solver,
alpha=0.5, l1_ratio=l1_ratio, random_state=42)
model = nmf.NMF(n_components=n_components, solver=solver,
alpha=0., l1_ratio=l1_ratio, random_state=42)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
W_regul_n_zeros = W_regul[W_regul == 0].size
W_model_n_zeros = W_model[W_model == 0].size
H_regul_n_zeros = H_regul[H_regul == 0].size
H_model_n_zeros = H_model[H_model == 0].size
assert_greater(W_regul_n_zeros, W_model_n_zeros)
assert_greater(H_regul_n_zeros, H_model_n_zeros)
# L2 regularization should decrease the mean of the coefficients
l1_ratio = 0.
for solver in ['cd', 'mu']:
regul = nmf.NMF(n_components=n_components, solver=solver,
alpha=0.5, l1_ratio=l1_ratio, random_state=42)
model = nmf.NMF(n_components=n_components, solver=solver,
alpha=0., l1_ratio=l1_ratio, random_state=42)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
assert_greater(W_model.mean(), W_regul.mean())
assert_greater(H_model.mean(), H_regul.mean())
@ignore_warnings(category=ConvergenceWarning)
def test_nmf_decreasing():
# test that the objective function is decreasing at each iteration
n_samples = 20
n_features = 15
n_components = 10
alpha = 0.1
l1_ratio = 0.5
tol = 0.
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.abs(X, X)
W0, H0 = nmf._initialize_nmf(X, n_components, init='random',
random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):
for solver in ('cd', 'mu'):
if solver != 'mu' and beta_loss != 2:
# not implemented
continue
W, H = W0.copy(), H0.copy()
previous_loss = None
for _ in range(30):
# one more iteration starting from the previous results
W, H, _ = non_negative_factorization(
X, W, H, beta_loss=beta_loss, init='custom',
n_components=n_components, max_iter=1, alpha=alpha,
solver=solver, tol=tol, l1_ratio=l1_ratio, verbose=0,
regularization='both', random_state=0, update_H=True)
loss = nmf._beta_divergence(X, W, H, beta_loss)
if previous_loss is not None:
assert_greater(previous_loss, loss)
previous_loss = loss
| bsd-3-clause |
eHealthAfrica/formhub | odk_viewer/pandas_mongo_bridge.py | 2 | 27490 | from itertools import chain
import time
from django.conf import settings
from pandas.core.frame import DataFrame
from pandas.io.parsers import ExcelWriter
from pyxform.survey import Survey
from pyxform.survey_element import SurveyElement
from pyxform.section import Section, RepeatingSection
from pyxform.question import Question
from odk_viewer.models.data_dictionary import ParsedInstance, DataDictionary
from utils.export_tools import question_types_to_exclude
from collections import OrderedDict
from common_tags import ID, XFORM_ID_STRING, STATUS, ATTACHMENTS, GEOLOCATION,\
UUID, SUBMISSION_TIME, NA_REP, BAMBOO_DATASET_ID, DELETEDAT
# this is Mongo Collection where we will store the parsed submissions
xform_instances = settings.MONGO_DB.instances
# the bind type of select multiples that we use to compare
MULTIPLE_SELECT_BIND_TYPE = u"select"
GEOPOINT_BIND_TYPE = u"geopoint"
# column group delimiters
GROUP_DELIMITER_SLASH = '/'
GROUP_DELIMITER_DOT = '.'
DEFAULT_GROUP_DELIMITER = GROUP_DELIMITER_SLASH
GROUP_DELIMITERS = [GROUP_DELIMITER_SLASH, GROUP_DELIMITER_DOT]
def get_valid_sheet_name(sheet_name, existing_name_list):
# truncate sheet_name to XLSDataFrameBuilder.SHEET_NAME_MAX_CHARS
new_sheet_name = unique_sheet_name = \
sheet_name[:XLSDataFrameBuilder.SHEET_NAME_MAX_CHARS]
# make sure its unique within the list
i = 1
generated_name = new_sheet_name
while generated_name in existing_name_list:
digit_length = len(str(i))
allowed_name_len = XLSDataFrameBuilder.SHEET_NAME_MAX_CHARS - \
digit_length
# make name the required len
if len(generated_name) > allowed_name_len:
generated_name = generated_name[:allowed_name_len]
generated_name = "{0}{1}".format(generated_name, i)
i += 1
return generated_name
def remove_dups_from_list_maintain_order(l):
return list(OrderedDict.fromkeys(l))
def get_prefix_from_xpath(xpath):
xpath = str(xpath)
parts = xpath.rsplit('/', 1)
if len(parts) == 1:
return None
elif len(parts) == 2:
return '%s/' % parts[0]
else:
raise ValueError('%s cannot be prefixed, it returns %s' % (xpath, str(parts)))
class NoRecordsFoundError(Exception):
pass
class AbstractDataFrameBuilder(object):
IGNORED_COLUMNS = [XFORM_ID_STRING, STATUS, ID, ATTACHMENTS, GEOLOCATION,
BAMBOO_DATASET_ID, DELETEDAT]
# fields NOT within the form def that we want to include
ADDITIONAL_COLUMNS = [UUID, SUBMISSION_TIME, "webhooks"]
"""
Group functionality used by any DataFrameBuilder i.e. XLS, CSV and KML
"""
def __init__(self, username, id_string, filter_query=None,
group_delimiter=DEFAULT_GROUP_DELIMITER, split_select_multiples=True):
self.username = username
self.id_string = id_string
self.filter_query = filter_query
self.group_delimiter = group_delimiter
self.split_select_multiples = split_select_multiples
self._setup()
def _setup(self):
self.dd = DataDictionary.objects.get(user__username=self.username,
id_string=self.id_string)
self.select_multiples = self._collect_select_multiples(self.dd)
self.gps_fields = self._collect_gps_fields(self.dd)
@classmethod
def _fields_to_select(cls, dd):
return [c.get_abbreviated_xpath() for c in \
dd.get_survey_elements() if isinstance(c, Question)]
@classmethod
def _collect_select_multiples(cls, dd):
return dict([(e.get_abbreviated_xpath(), [c.get_abbreviated_xpath()\
for c in e.children])
for e in dd.get_survey_elements() if e.bind.get("type")=="select"])
@classmethod
def _split_select_multiples(cls, record, select_multiples):
""" Prefix contains the xpath and slash if we are within a repeat so that we can figure out which select multiples belong to which repeat
"""
for key, choices in select_multiples.items():
# the select multiple might be blank or not exist in the record, need to make those False
selections = []
if key in record:
# split selected choices by spaces and join by / to the
# element's xpath
selections = ["%s/%s" % (key, r) for r in\
record[key].split(" ")]
# remove the column since we are adding separate columns
# for each choice
record.pop(key)
# add columns to record for every choice, with default
# False and set to True for items in selections
record.update(dict([(choice, choice in selections)\
for choice in choices]))
# recurs into repeats
for record_key, record_item in record.items():
if type(record_item) == list:
for list_item in record_item:
if type(list_item) == dict:
cls._split_select_multiples(list_item,
select_multiples)
return record
@classmethod
def _collect_gps_fields(cls, dd):
return [e.get_abbreviated_xpath() for e in dd.get_survey_elements()
if e.bind.get("type")=="geopoint"]
@classmethod
def _tag_edit_string(cls, record):
"""
Turns a list of tags into a string representation.
"""
if '_tags' in record:
tags = []
for tag in record['_tags']:
if ',' in tag and ' ' in tag:
tags.append('"%s"' % tag)
else:
tags.append(tag)
record.update({'_tags': u', '.join(sorted(tags))})
@classmethod
def _split_gps_fields(cls, record, gps_fields):
updated_gps_fields = {}
for key, value in record.iteritems():
if key in gps_fields and isinstance(value, basestring):
gps_xpaths = DataDictionary.get_additional_geopoint_xpaths(key)
gps_parts = dict([(xpath, None) for xpath in gps_xpaths])
# hack, check if its a list and grab the object within that
parts = value.split(' ')
# TODO: check whether or not we can have a gps recording
# from ODKCollect that has less than four components,
# for now we are assuming that this is not the case.
if len(parts) == 4:
gps_parts = dict(zip(gps_xpaths, parts))
updated_gps_fields.update(gps_parts)
# check for repeats within record i.e. in value
elif type(value) == list:
for list_item in value:
if type(list_item) == dict:
cls._split_gps_fields(list_item, gps_fields)
record.update(updated_gps_fields)
def _query_mongo(self, query='{}', start=0,
limit=ParsedInstance.DEFAULT_LIMIT, fields='[]', count=False):
# ParsedInstance.query_mongo takes params as json strings
# so we dumps the fields dictionary
count_args = {
'username': self.username,
'id_string': self.id_string,
'query': query,
'fields': '[]',
'sort': '{}',
'count': True
}
count_object = ParsedInstance.query_mongo(**count_args)
record_count = count_object[0]["count"]
if record_count == 0:
raise NoRecordsFoundError("No records found for your query")
# if count was requested, return the count
if count:
return record_count
else:
query_args = {
'username': self.username,
'id_string': self.id_string,
'query': query,
'fields': fields,
#TODO: we might want to add this in for the user
#to sepcify a sort order
'sort': '{}',
'start': start,
'limit': limit,
'count': False
}
# use ParsedInstance.query_mongo
cursor = ParsedInstance.query_mongo(**query_args)
return cursor
class XLSDataFrameBuilder(AbstractDataFrameBuilder):
"""
Generate structures from mongo and DataDictionary for a DataFrameXLSWriter
This builder can choose to query the data in batches and write to a single
ExcelWriter object using multiple instances of DataFrameXLSWriter
"""
INDEX_COLUMN = u"_index"
PARENT_TABLE_NAME_COLUMN = u"_parent_table_name"
PARENT_INDEX_COLUMN = u"_parent_index"
EXTRA_COLUMNS = [INDEX_COLUMN, PARENT_TABLE_NAME_COLUMN,
PARENT_INDEX_COLUMN]
SHEET_NAME_MAX_CHARS = 30
XLS_SHEET_COUNT_LIMIT = 255
XLS_COLUMN_COUNT_MAX = 255
CURRENT_INDEX_META = 'current_index'
def __init__(self, username, id_string, filter_query=None,
group_delimiter=DEFAULT_GROUP_DELIMITER,
split_select_multiples=True):
super(XLSDataFrameBuilder, self).__init__(username, id_string,
filter_query, group_delimiter, split_select_multiples)
def _setup(self):
super(XLSDataFrameBuilder, self)._setup()
# need to split columns, with repeats in individual sheets and
# everything else on the default sheet
self._generate_sections()
def export_to(self, file_path, batchsize=1000):
self.xls_writer = ExcelWriter(file_path)
# get record count
record_count = self._query_mongo(count=True)
# query in batches and for each batch create an XLSDataFrameWriter and
# write to existing xls_writer object
start = 0
header = True
while start < record_count:
cursor = self._query_mongo(self.filter_query, start=start,
limit=batchsize)
data = self._format_for_dataframe(cursor)
# write all cursor's data to their respective sheets
for section_name, section in self.sections.iteritems():
records = data[section_name]
# TODO: currently ignoring nested repeats so ignore sections that have 0 records
if len(records) > 0:
# use a different group delimiter if needed
columns = section["columns"]
if self.group_delimiter != DEFAULT_GROUP_DELIMITER:
columns = [self.group_delimiter.join(col.split("/")) for col in columns ]
columns = columns + self.EXTRA_COLUMNS
writer = XLSDataFrameWriter(records, columns)
writer.write_to_excel(self.xls_writer, section_name,
header=header, index=False)
header = False
# increment counter(s)
start += batchsize
time.sleep(0.1)
self.xls_writer.save()
def _format_for_dataframe(self, cursor):
"""
Format each record for consumption by a dataframe
returns a dictionary with the key being the name of the sheet, and values
a list of dicts to feed into a DataFrame
"""
data = dict((section_name, []) for section_name in self.sections.keys())
main_section = self.sections[self.survey_name]
main_sections_columns = main_section["columns"]
for record in cursor:
# from record, we'll end up with multiple records, one for each
# section we have
# add records for the default section
self._add_data_for_section(data[self.survey_name],
record, main_sections_columns, self.survey_name)
parent_index = main_section[self.CURRENT_INDEX_META]
for sheet_name, section in self.sections.iteritems():
# skip default section i.e survey name
if sheet_name != self.survey_name:
xpath = section["xpath"]
columns = section["columns"]
# TODO: handle nested repeats -ignoring nested repeats for
# now which will not be in the top level record, perhaps
# nest sections as well so we can recurs in and get them
if record.has_key(xpath):
repeat_records = record[xpath]
num_repeat_records = len(repeat_records)
for repeat_record in repeat_records:
self._add_data_for_section(data[sheet_name],
repeat_record, columns, sheet_name,
parent_index, self.survey_name)
return data
def _add_data_for_section(self, data_section, record, columns, section_name,
parent_index = -1, parent_table_name = None):
data_section.append({})
self.sections[section_name][self.CURRENT_INDEX_META] += 1
index = self.sections[section_name][self.CURRENT_INDEX_META]
#data_section[len(data_section)-1].update(record) # we could simply do
# this but end up with duplicate data from repeats
if self.split_select_multiples:
# find any select multiple(s) and add additional columns to record
record = self._split_select_multiples(record, self.select_multiples)
# alt, precision
self._split_gps_fields(record, self.gps_fields)
for column in columns:
data_value = None
try:
data_value = record[column]
except KeyError:
# a record may not have responses for some elements simply
# because they were not captured
pass
data_section[
len(data_section)-1].update({self.group_delimiter.join(column.split('/')) if self.group_delimiter != DEFAULT_GROUP_DELIMITER else column: data_value})
data_section[len(data_section)-1].update({
XLSDataFrameBuilder.INDEX_COLUMN: index,
XLSDataFrameBuilder.PARENT_INDEX_COLUMN: parent_index,
XLSDataFrameBuilder.PARENT_TABLE_NAME_COLUMN: parent_table_name})
# add ADDITIONAL_COLUMNS
data_section[len(data_section)-1].update(
dict([(column, record[column] if record.has_key(column) else None)
for column in self.ADDITIONAL_COLUMNS]))
def _generate_sections(self):
"""
Split survey questions into separate sections for each xls sheet and
columns for each section
"""
# clear list
self.sections = OrderedDict()
# dict of select multiple elements
self.select_multiples = {}
survey_element = self.dd.survey
self.survey_name = get_valid_sheet_name(
survey_element.name, self.sections.keys())
self._create_section(
self.survey_name, survey_element.get_abbreviated_xpath(), False)
# build sections
self._build_sections_recursive(self.survey_name, self.dd.get_survey())
for section_name in self.sections:
self.sections[section_name]['columns'] += self.ADDITIONAL_COLUMNS
self.get_exceeds_xls_limits()
def _build_sections_recursive(self, section_name, element, is_repeating=False):
"""Builds a section's children and recurses any repeating sections
to build those as a separate section
"""
for child in element.children:
# if a section, recurse
if isinstance(child, Section):
new_is_repeating = isinstance(child, RepeatingSection)
new_section_name = section_name
# if its repeating, build a new section
if new_is_repeating:
new_section_name = get_valid_sheet_name(
child.name, self.sections.keys())
self._create_section(new_section_name,
child.get_abbreviated_xpath(), True)
self._build_sections_recursive(
new_section_name, child, new_is_repeating)
else:
# add to survey_sections
if isinstance(child, Question) and not \
question_types_to_exclude(child.type)\
and not child.bind.get(u"type") == MULTIPLE_SELECT_BIND_TYPE:
self._add_column_to_section(section_name, child)
elif child.bind.get(u"type") == MULTIPLE_SELECT_BIND_TYPE:
self.select_multiples[child.get_abbreviated_xpath()] = \
[option.get_abbreviated_xpath() for option in
child.children]
# if select multiple, get its choices and make them
# columns
if self.split_select_multiples:
for option in child.children:
self._add_column_to_section(section_name, option)
else:
self._add_column_to_section(section_name, child)
# split gps fields within this section
if child.bind.get(u"type") == GEOPOINT_BIND_TYPE:
# add columns for geopoint components
for xpath in\
self.dd.get_additional_geopoint_xpaths(
child.get_abbreviated_xpath()):
self._add_column_to_section(section_name, xpath)
def get_exceeds_xls_limits(self):
if not hasattr(self, "exceeds_xls_limits"):
self.exceeds_xls_limits = False
if len(self.sections) > self.XLS_SHEET_COUNT_LIMIT:
self.exceeds_xls_limits = True
else:
for section in self.sections.itervalues():
if len(section["columns"]) > self.XLS_COLUMN_COUNT_MAX:
self.exceeds_xls_limits = True
break
return self.exceeds_xls_limits
def _create_section(self, section_name, xpath, is_repeat):
index = len(self.sections)
self.sections[section_name] = {"name": section_name, "xpath": xpath,
"columns": [], "is_repeat": is_repeat,
self.CURRENT_INDEX_META: 0}
def _add_column_to_section(self, sheet_name, column):
section = self.sections[sheet_name]
xpath = None
if isinstance(column, SurveyElement):
xpath = column.get_abbreviated_xpath()
elif isinstance(column, basestring):
xpath = column
assert(xpath)
# make sure column is not already in list
if xpath not in section["columns"]:
section["columns"].append(xpath)
class CSVDataFrameBuilder(AbstractDataFrameBuilder):
def __init__(self, username, id_string, filter_query=None,
group_delimiter=DEFAULT_GROUP_DELIMITER,
split_select_multiples=True):
super(CSVDataFrameBuilder, self).__init__(username,
id_string, filter_query, group_delimiter, split_select_multiples)
self.ordered_columns = OrderedDict()
def _setup(self):
super(CSVDataFrameBuilder, self)._setup()
@classmethod
def _reindex(cls, key, value, ordered_columns, parent_prefix = None):
"""
Flatten list columns by appending an index, otherwise return as is
"""
d = {}
# check for lists
if type(value) is list and len(value) > 0 and key != "webhooks" :
for index, item in enumerate(value):
# start at 1
index += 1
# for each list check for dict, we want to transform the key of
# this dict
if type(item) is dict:
for nested_key, nested_val in item.iteritems():
# given the key "children/details" and nested_key/ abbreviated xpath "children/details/immunization/polio_1", generate ["children", index, "immunization/polio_1"]
xpaths = [
"%s[%s]" % (
nested_key[:nested_key.index(key) + len(key)],
index),
nested_key[nested_key.index(key) + len(key)+1:]]
# re-create xpath the split on /
xpaths = "/".join(xpaths).split("/")
new_prefix = xpaths[:-1]
if type(nested_val) is list and nested_key != "webhooks":
# if nested_value is a list, rinse and repeat
d.update(cls._reindex(nested_key, nested_val,
ordered_columns, new_prefix))
else:
# it can only be a scalar
# collapse xpath
if parent_prefix:
xpaths[0:len(parent_prefix)] = parent_prefix
new_xpath = u"/".join(xpaths)
# check if this key exists in our ordered columns
if key in ordered_columns.keys():
if not new_xpath in ordered_columns[key]:
ordered_columns[key].append(new_xpath)
d[new_xpath] = nested_val
else:
d[key] = value
else:
# anything that's not a list will be in the top level dict so its
# safe to simply assign
d[key] = value
return d
@classmethod
def _build_ordered_columns(cls, survey_element, ordered_columns,
is_repeating_section=False):
"""
Build a flat ordered dict of column groups
is_repeating_section ensures that child questions of repeating sections
are not considered columns
"""
for child in survey_element.children:
child_xpath = child.get_abbreviated_xpath()
if isinstance(child, Section):
child_is_repeating = False
if isinstance(child, RepeatingSection):
ordered_columns[child.get_abbreviated_xpath()] = []
child_is_repeating = True
cls._build_ordered_columns(child, ordered_columns,
child_is_repeating)
elif isinstance(child, Question) and not \
question_types_to_exclude(child.type) and not\
is_repeating_section:# if is_repeating_section, its parent
# already initiliased an empty list so we dont add it to our
# list of columns, the repeating columns list will be
# generated when we reindex
ordered_columns[child.get_abbreviated_xpath()] = None
def _format_for_dataframe(self, cursor):
# TODO: check for and handle empty results
# add ordered columns for select multiples
if self.split_select_multiples:
for key, choices in self.select_multiples.items():
# HACK to ensure choices are NOT duplicated
self.ordered_columns[key] = remove_dups_from_list_maintain_order(
choices)
# add ordered columns for gps fields
for key in self.gps_fields:
gps_xpaths = self.dd.get_additional_geopoint_xpaths(key)
self.ordered_columns[key] = [key] + gps_xpaths
data = []
for record in cursor:
# split select multiples
if self.split_select_multiples:
record = self._split_select_multiples(record,
self.select_multiples)
# check for gps and split into components i.e. latitude, longitude,
# altitude, precision
self._split_gps_fields(record, self.gps_fields)
self._tag_edit_string(record)
flat_dict = {}
# re index repeats
for key, value in record.iteritems():
reindexed = self._reindex(key, value, self.ordered_columns)
flat_dict.update(reindexed)
# if delimetr is diferent, replace within record as well
if self.group_delimiter != DEFAULT_GROUP_DELIMITER:
flat_dict = dict((self.group_delimiter.join(k.split('/')), v) for k, v in flat_dict.iteritems())
data.append(flat_dict)
return data
def export_to(self, file_or_path, data_frame_max_size=30000):
from math import ceil
# get record count
record_count = self._query_mongo(query=self.filter_query, count=True)
self.ordered_columns = OrderedDict()
self._build_ordered_columns(self.dd.survey, self.ordered_columns)
# pandas will only export 30k records in a dataframe to a csv - we need to create multiple 30k dataframes if required,
# we need to go through all the records though so that we can figure out the columns we need for repeats
datas = []
num_data_frames = int(ceil(float(record_count)/float(data_frame_max_size)))
for i in range(num_data_frames):
cursor = self._query_mongo(self.filter_query,
start=(i * data_frame_max_size), limit=data_frame_max_size)
data = self._format_for_dataframe(cursor)
datas.append(data)
columns = list(chain.from_iterable([[ xpath ] if cols == None else cols\
for xpath, cols in self.ordered_columns.iteritems()]))
# use a different group delimiter if needed
if self.group_delimiter != DEFAULT_GROUP_DELIMITER:
columns = [self.group_delimiter.join(col.split("/")) for col in columns ]
# add extra columns
columns += [col for col in self.ADDITIONAL_COLUMNS]
header = True
if hasattr(file_or_path, 'read'):
csv_file = file_or_path
close = False
else:
csv_file = open(file_or_path, "wb")
close = True
for data in datas:
writer = CSVDataFrameWriter(data, columns)
writer.write_to_csv(csv_file, header=header)
header = False
if close:
csv_file.close()
class XLSDataFrameWriter(object):
def __init__(self, records, columns):
self.dataframe = DataFrame(records, columns=columns)
def write_to_excel(self, excel_writer, sheet_name, header=False,
index=False):
self.dataframe.to_excel(excel_writer, sheet_name, header=header,
index=index)
class CSVDataFrameWriter(object):
def __init__(self, records, columns):
# TODO: if records is empty, raise a known exception
# catch it in the view and handle
assert(len(records) > 0)
self.dataframe = DataFrame(records, columns=columns)
# remove columns we don't want
for col in AbstractDataFrameBuilder.IGNORED_COLUMNS:
if col in self.dataframe.columns:
del(self.dataframe[col])
def write_to_csv(self, csv_file, header=True, index=False):
self.dataframe.to_csv(csv_file, header=header, index=index, na_rep=NA_REP,
encoding='utf-8')
| bsd-2-clause |
bsipocz/astropy | astropy/modeling/blackbody.py | 1 | 13057 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Model and functions related to blackbody radiation.
.. _blackbody-planck-law:
Blackbody Radiation
-------------------
Blackbody flux is calculated with Planck law
(:ref:`Rybicki & Lightman 1979 <ref-rybicki1979>`):
.. math::
B_{\\lambda}(T) = \\frac{2 h c^{2} / \\lambda^{5}}{exp(h c / \\lambda k T) - 1}
B_{\\nu}(T) = \\frac{2 h \\nu^{3} / c^{2}}{exp(h \\nu / k T) - 1}
where the unit of :math:`B_{\\lambda}(T)` is
:math:`erg \\; s^{-1} cm^{-2} \\mathring{A}^{-1} sr^{-1}`, and
:math:`B_{\\nu}(T)` is :math:`erg \\; s^{-1} cm^{-2} Hz^{-1} sr^{-1}`.
:func:`~astropy.modeling.blackbody.blackbody_lambda` and
:func:`~astropy.modeling.blackbody.blackbody_nu` calculate the
blackbody flux for :math:`B_{\\lambda}(T)` and :math:`B_{\\nu}(T)`,
respectively.
For blackbody representation as a model, see :class:`BlackBody1D`.
.. _blackbody-examples:
Examples
^^^^^^^^
>>> import numpy as np
>>> from astropy import units as u
>>> from astropy.modeling.blackbody import blackbody_lambda, blackbody_nu
Calculate blackbody flux for 5000 K at 100 and 10000 Angstrom while suppressing
any Numpy warnings:
>>> wavelengths = [100, 10000] * u.AA
>>> temperature = 5000 * u.K
>>> with np.errstate(all='ignore'):
... flux_lam = blackbody_lambda(wavelengths, temperature)
... flux_nu = blackbody_nu(wavelengths, temperature)
>>> flux_lam # doctest: +FLOAT_CMP
<Quantity [ 1.27452545e-108, 7.10190526e+005] erg / (Angstrom cm2 s sr)>
>>> flux_nu # doctest: +FLOAT_CMP
<Quantity [ 4.25135927e-123, 2.36894060e-005] erg / (cm2 Hz s sr)>
Alternatively, the same results for ``flux_nu`` can be computed using
:class:`BlackBody1D` with blackbody representation as a model. The difference between
this and the former approach is in one additional step outlined as follows:
>>> from astropy import constants as const
>>> from astropy.modeling import models
>>> temperature = 5000 * u.K
>>> bolometric_flux = const.sigma_sb * temperature ** 4 / np.pi
>>> bolometric_flux.to(u.erg / (u.cm * u.cm * u.s)) # doctest: +FLOAT_CMP
<Quantity 1.12808367e+10 erg / (cm2 s)>
>>> wavelengths = [100, 10000] * u.AA
>>> bb_astro = models.BlackBody1D(temperature, bolometric_flux=bolometric_flux)
>>> bb_astro(wavelengths).to(u.erg / (u.cm * u.cm * u.Hz * u.s)) / u.sr # doctest: +FLOAT_CMP
<Quantity [4.25102471e-123, 2.36893879e-005] erg / (cm2 Hz s sr)>
where ``bb_astro(wavelengths)`` computes the equivalent result as ``flux_nu`` above.
Plot a blackbody spectrum for 5000 K:
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from astropy import constants as const
from astropy import units as u
from astropy.modeling.blackbody import blackbody_lambda
temperature = 5000 * u.K
wavemax = (const.b_wien / temperature).to(u.AA) # Wien's displacement law
waveset = np.logspace(
0, np.log10(wavemax.value + 10 * wavemax.value), num=1000) * u.AA
with np.errstate(all='ignore'):
flux = blackbody_lambda(waveset, temperature)
fig, ax = plt.subplots(figsize=(8, 5))
ax.plot(waveset.value, flux.value)
ax.axvline(wavemax.value, ls='--')
ax.get_yaxis().get_major_formatter().set_powerlimits((0, 1))
ax.set_xlabel(r'$\\lambda$ ({0})'.format(waveset.unit))
ax.set_ylabel(r'$B_{\\lambda}(T)$')
ax.set_title('Blackbody, T = {0}'.format(temperature))
Note that an array of temperatures can also be given instead of a single
temperature. In this case, the Numpy broadcasting rules apply: for instance, if
the frequency and temperature have the same shape, the output will have this
shape too, while if the frequency is a 2-d array with shape ``(n, m)`` and the
temperature is an array with shape ``(m,)``, the output will have a shape
``(n, m)``.
See Also
^^^^^^^^
.. _ref-rybicki1979:
Rybicki, G. B., & Lightman, A. P. 1979, Radiative Processes in Astrophysics (New York, NY: Wiley)
"""
import warnings
from collections import OrderedDict
import numpy as np
from .core import Fittable1DModel
from .parameters import Parameter
from astropy import constants as const
from astropy import units as u
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['BlackBody1D', 'blackbody_nu', 'blackbody_lambda']
# Units
FNU = u.erg / (u.cm**2 * u.s * u.Hz)
FLAM = u.erg / (u.cm**2 * u.s * u.AA)
# Some platform implementations of expm1() are buggy and Numpy uses
# them anyways--the bug is that on certain large inputs it returns
# NaN instead of INF like it should (it should only return NaN on a
# NaN input
# See https://github.com/astropy/astropy/issues/4171
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
_has_buggy_expm1 = np.isnan(np.expm1(1000)) or np.isnan(np.expm1(1e10))
class BlackBody1D(Fittable1DModel):
"""
One dimensional blackbody model.
Parameters
----------
temperature : :class:`~astropy.units.Quantity`
Blackbody temperature.
bolometric_flux : :class:`~astropy.units.Quantity`
The bolometric flux of the blackbody (i.e., the integral over the
spectral axis).
Notes
-----
Model formula:
.. math:: f(x) = \\pi B_{\\nu} f_{\\text{bolometric}} / (\\sigma T^{4})
Examples
--------
>>> from astropy.modeling import models
>>> from astropy import units as u
>>> bb = models.BlackBody1D()
>>> bb(6000 * u.AA) # doctest: +FLOAT_CMP
<Quantity 1.3585381201978953e-15 erg / (cm2 Hz s)>
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import BlackBody1D
from astropy.modeling.blackbody import FLAM
from astropy import units as u
from astropy.visualization import quantity_support
bb = BlackBody1D(temperature=5778*u.K)
wav = np.arange(1000, 110000) * u.AA
flux = bb(wav).to(FLAM, u.spectral_density(wav))
with quantity_support():
plt.figure()
plt.semilogx(wav, flux)
plt.axvline(bb.lambda_max.to(u.AA).value, ls='--')
plt.show()
"""
# We parametrize this model with a temperature and a bolometric flux. The
# bolometric flux is the integral of the model over the spectral axis. This
# is more useful than simply having an amplitude parameter.
temperature = Parameter(default=5000, min=0, unit=u.K)
bolometric_flux = Parameter(default=1, min=0, unit=u.erg / u.cm ** 2 / u.s)
# We allow values without units to be passed when evaluating the model, and
# in this case the input x values are assumed to be frequencies in Hz.
_input_units_allow_dimensionless = True
# We enable the spectral equivalency by default for the spectral axis
input_units_equivalencies = {'x': u.spectral()}
def evaluate(self, x, temperature, bolometric_flux):
"""Evaluate the model.
Parameters
----------
x : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Frequency at which to compute the blackbody. If no units are given,
this defaults to Hz.
temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Temperature of the blackbody. If no units are given, this defaults
to Kelvin.
bolometric_flux : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Desired integral for the blackbody.
Returns
-------
y : number or ndarray
Blackbody spectrum. The units are determined from the units of
``bolometric_flux``.
"""
# We need to make sure that we attach units to the temperature if it
# doesn't have any units. We do this because even though blackbody_nu
# can take temperature values without units, the / temperature ** 4
# factor needs units to be defined.
if isinstance(temperature, u.Quantity):
temperature = temperature.to(u.K, equivalencies=u.temperature())
else:
temperature = u.Quantity(temperature, u.K)
# We normalize the returned blackbody so that the integral would be
# unity, and we then multiply by the bolometric flux. A normalized
# blackbody has f_nu = pi * B_nu / (sigma * T^4), which is what we
# calculate here. We convert to 1/Hz to make sure the units are
# simplified as much as possible, then we multiply by the bolometric
# flux to get the normalization right.
fnu = ((np.pi * u.sr * blackbody_nu(x, temperature) /
const.sigma_sb / temperature ** 4).to(1 / u.Hz) *
bolometric_flux)
# If the bolometric_flux parameter has no unit, we should drop the /Hz
# and return a unitless value. This occurs for instance during fitting,
# since we drop the units temporarily.
if hasattr(bolometric_flux, 'unit'):
return fnu
else:
return fnu.value
@property
def input_units(self):
# The input units are those of the 'x' value, which should always be
# Hz. Because we do this, and because input_units_allow_dimensionless
# is set to True, dimensionless values are assumed to be in Hz.
return {'x': u.Hz}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('temperature', u.K),
('bolometric_flux', outputs_unit['y'] * u.Hz)])
@property
def lambda_max(self):
"""Peak wavelength when the curve is expressed as power density."""
return const.b_wien / self.temperature
def blackbody_nu(in_x, temperature):
"""Calculate blackbody flux per steradian, :math:`B_{\\nu}(T)`.
.. note::
Use `numpy.errstate` to suppress Numpy warnings, if desired.
.. warning::
Output values might contain ``nan`` and ``inf``.
Parameters
----------
in_x : number, array-like, or `~astropy.units.Quantity`
Frequency, wavelength, or wave number.
If not a Quantity, it is assumed to be in Hz.
temperature : number, array-like, or `~astropy.units.Quantity`
Blackbody temperature.
If not a Quantity, it is assumed to be in Kelvin.
Returns
-------
flux : `~astropy.units.Quantity`
Blackbody monochromatic flux in
:math:`erg \\; cm^{-2} s^{-1} Hz^{-1} sr^{-1}`.
Raises
------
ValueError
Invalid temperature.
ZeroDivisionError
Wavelength is zero (when converting to frequency).
"""
# Convert to units for calculations, also force double precision
with u.add_enabled_equivalencies(u.spectral() + u.temperature()):
freq = u.Quantity(in_x, u.Hz, dtype=np.float64)
temp = u.Quantity(temperature, u.K, dtype=np.float64)
# Check if input values are physically possible
if np.any(temp < 0):
raise ValueError(f'Temperature should be positive: {temp}')
if not np.all(np.isfinite(freq)) or np.any(freq <= 0):
warnings.warn('Input contains invalid wavelength/frequency value(s)',
AstropyUserWarning)
log_boltz = const.h * freq / (const.k_B * temp)
boltzm1 = np.expm1(log_boltz)
if _has_buggy_expm1:
# Replace incorrect nan results with infs--any result of 'nan' is
# incorrect unless the input (in log_boltz) happened to be nan to begin
# with. (As noted in #4393 ideally this would be replaced by a version
# of expm1 that doesn't have this bug, rather than fixing incorrect
# results after the fact...)
boltzm1_nans = np.isnan(boltzm1)
if np.any(boltzm1_nans):
if boltzm1.isscalar and not np.isnan(log_boltz):
boltzm1 = np.inf
else:
boltzm1[np.where(~np.isnan(log_boltz) & boltzm1_nans)] = np.inf
# Calculate blackbody flux
bb_nu = (2.0 * const.h * freq ** 3 / (const.c ** 2 * boltzm1))
flux = bb_nu.to(FNU, u.spectral_density(freq))
return flux / u.sr # Add per steradian to output flux unit
def blackbody_lambda(in_x, temperature):
"""Like :func:`blackbody_nu` but for :math:`B_{\\lambda}(T)`.
Parameters
----------
in_x : number, array-like, or `~astropy.units.Quantity`
Frequency, wavelength, or wave number.
If not a Quantity, it is assumed to be in Angstrom.
temperature : number, array-like, or `~astropy.units.Quantity`
Blackbody temperature.
If not a Quantity, it is assumed to be in Kelvin.
Returns
-------
flux : `~astropy.units.Quantity`
Blackbody monochromatic flux in
:math:`erg \\; cm^{-2} s^{-1} \\mathring{A}^{-1} sr^{-1}`.
"""
if getattr(in_x, 'unit', None) is None:
in_x = u.Quantity(in_x, u.AA)
bb_nu = blackbody_nu(in_x, temperature) * u.sr # Remove sr for conversion
flux = bb_nu.to(FLAM, u.spectral_density(in_x))
return flux / u.sr # Add per steradian to output flux unit
| bsd-3-clause |
wdurhamh/statsmodels | statsmodels/sandbox/examples/try_multiols.py | 33 | 1243 | # -*- coding: utf-8 -*-
"""
Created on Sun May 26 13:23:40 2013
Author: Josef Perktold, based on Enrico Giampieri's multiOLS
"""
#import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.sandbox.multilinear import multiOLS, multigroup
data = sm.datasets.longley.load_pandas()
df = data.exog
df['TOTEMP'] = data.endog
#This will perform the specified linear model on all the
#other columns of the dataframe
res0 = multiOLS('GNP + 1', df)
#This select only a certain subset of the columns
res = multiOLS('GNP + 0', df, ['GNPDEFL', 'TOTEMP', 'POP'])
print(res.to_string())
url = "http://vincentarelbundock.github.com/"
url = url + "Rdatasets/csv/HistData/Guerry.csv"
df = pd.read_csv(url, index_col=1) #'dept')
#evaluate the relationship between the various parameters whith the Wealth
pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test']
#define the groups
groups = {}
groups['crime'] = ['Crime_prop', 'Infanticide',
'Crime_parents', 'Desertion', 'Crime_pers']
groups['religion'] = ['Donation_clergy', 'Clergy', 'Donations']
groups['wealth'] = ['Commerce', 'Lottery', 'Instruction', 'Literacy']
#do the analysis of the significance
res3 = multigroup(pvals < 0.05, groups)
print(res3)
| bsd-3-clause |
jz3707/tushare | test/storing_test.py | 40 | 1729 | # -*- coding:utf-8 -*-
import os
from sqlalchemy import create_engine
from pandas.io.pytables import HDFStore
import tushare as ts
def csv():
df = ts.get_hist_data('000875')
df.to_csv('c:/day/000875.csv',columns=['open','high','low','close'])
def xls():
df = ts.get_hist_data('000875')
#直接保存
df.to_excel('c:/day/000875.xlsx', startrow=2,startcol=5)
def hdf():
df = ts.get_hist_data('000875')
# df.to_hdf('c:/day/store.h5','table')
store = HDFStore('c:/day/store.h5')
store['000875'] = df
store.close()
def json():
df = ts.get_hist_data('000875')
df.to_json('c:/day/000875.json',orient='records')
#或者直接使用
print(df.to_json(orient='records'))
def appends():
filename = 'c:/day/bigfile.csv'
for code in ['000875', '600848', '000981']:
df = ts.get_hist_data(code)
if os.path.exists(filename):
df.to_csv(filename, mode='a', header=None)
else:
df.to_csv(filename)
def db():
df = ts.get_tick_data('600848',date='2014-12-22')
engine = create_engine('mysql://root:[email protected]/mystock?charset=utf8')
# db = MySQLdb.connect(host='127.0.0.1',user='root',passwd='jimmy1',db="mystock",charset="utf8")
# df.to_sql('TICK_DATA',con=db,flavor='mysql')
# db.close()
df.to_sql('tick_data',engine,if_exists='append')
def nosql():
import pymongo
import json
conn = pymongo.Connection('127.0.0.1', port=27017)
df = ts.get_tick_data('600848',date='2014-12-22')
print(df.to_json(orient='records'))
conn.db.tickdata.insert(json.loads(df.to_json(orient='records')))
# print conn.db.tickdata.find()
if __name__ == '__main__':
nosql() | bsd-3-clause |
ales-erjavec/orange | Orange/orng/orngProjectionPursuit.py | 6 | 7987 | import orange
import numpy
import scipy.special
import scipy.optimize
import scipy.stats
from pylab import *
def sqrtm(mat):
""" Retruns the square root of the matrix mat """
U, S, V = numpy.linalg.svd(mat)
D = numpy.diag(numpy.sqrt(S))
return numpy.dot(numpy.dot(U,D),V)
def standardize(mat):
""" Subtracts means and multiplies by diagonal elements of inverse
square root of covariance matrix.
"""
av = numpy.average(mat, axis=0)
sigma = numpy.corrcoef(mat, rowvar=0)
srSigma = sqrtm(sigma)
isrSigma = numpy.linalg.inv(srSigma)
return (mat-av) * numpy.diag(isrSigma)
def friedman_tmp_func(alpha, Z=numpy.zeros((1,1)), J=5, n=1):
alpha = numpy.array(alpha)
pols = [scipy.special.legendre(j) for j in range(0,J+1)]
vals0 = [numpy.dot(alpha.T, Z[i,:]) for i in range(n)]
def f_tmp(x): return 2*x-1
vals = map(f_tmp, map(scipy.stats.zprob, vals0))
val = [1./n*sum(map(p, vals))**2 for p in pols]
return vals, pols, - 0.5 * sum([(2*j+1)*v for j, v in enumerate(val)])
class ProjectionPursuit:
FRIEDMAN = 0
MOMENT = 1
SILHUETTE = 2
HARTINGAN = 3
def __init__(self, data, index = FRIEDMAN, dim=2, maxiter=10):
self.dim = dim
if type(data) == orange.ExampleTable:
self.dataNP = data.toNumpy()[0] # TODO: check if conversion of discrete values works ok
else:
self.dataNP = data
self.Z = standardize(self.dataNP)
self.totalSize, self.nVars = numpy.shape(self.Z)
self.maxiter = maxiter
self.currentOptimum = None
self.index = index
def optimize(self, maxiter = 5, opt_method=scipy.optimize.fmin):
func = self.getIndex()
if self.currentOptimum != None:
x = self.currentOptimum
else:
x = numpy.random.rand(self.dim * self.nVars)
alpha = opt_method(func, x, maxiter=maxiter).reshape(self.dim * self.nVars,1)
self.currentOptimum = alpha
print alpha, len(alpha)
optValue = func(alpha)
if self.dim == 2:
alpha1 = alpha[:self.nVars]
alpha2 = alpha[self.nVars:]
alpha = numpy.append(alpha1, alpha2, axis=1)
projectedData = numpy.dot(self.Z, alpha)
return alpha, optValue, projectedData
def find_optimum(self, opt_method=scipy.optimize.fmin):
func = self.getIndex()
alpha = opt_method(func, \
numpy.random.rand(self.dim * self.nVars),\
maxiter=self.maxiter).reshape(self.dim * self.nVars,1)
print alpha, len(alpha)
optValue = func(alpha)
if self.dim == 2:
alpha1 = alpha[:self.nVars]
alpha2 = alpha[self.nVars:]
alpha = numpy.append(alpha1, alpha2, axis=1)
projectedData = numpy.dot(self.Z, alpha)
return alpha, optValue, projectedData
def getIndex(self):
if self.index == self.FRIEDMAN:
return self.getFriedmanIndex()
elif self.index == self.MOMENT:
return self.getMomentIndex()
elif self.index == self.SILHUETTE:
return self.getSilhouetteBasedIndex()
elif self.index == self.HARTINGAN:
return self.getHartinganBasedIndex()
def getFriedmanIndex(self, J=5):
if self.dim == 1:
def func(alpha, Z=self.Z, J=J, n=self.totalSize):
vals, pols, val = friedman_tmp_func(alpha, Z=Z, J=J, n=n)
return val
elif self.dim == 2:
def func(alpha, Z=self.Z, J=J, n=self.totalSize):
alpha1, alpha2 = alpha[:self.nVars], alpha[self.nVars:]
vals1, pols, val1 = friedman_tmp_func(alpha1, Z=Z, J=J, n=n)
vals2, pols, val2 = friedman_tmp_func(alpha2, Z=Z, J=J, n=n)
val12 = - 0.5 * sum([sum([(2*j+1)*(2*k+1)*vals1[j]*vals2[k] for k in range(0, J+1-j)]) \
for j in range(0,J+1)])
## print val1, val2
return 0.5 * (val1 + val2 + val12)
return func
def getMomentIndex(self): # lahko dodas faktor 1./12
if self.dim == 1:
def func(alpha):
smpl = numpy.dot(self.Z, alpha)
return scipy.stats.kstat(smpl, n=3) ** 2 + 0.25 * scipy.stats.kstat(smpl, n=4)
else:
print "To do."
return func
def getSilhouetteBasedIndex(self, nClusters=5):
import orngClustering
def func(alpha, nClusters=nClusters):
alpha1, alpha2 = alpha[:self.nVars], alpha[self.nVars:]
alpha1 = alpha1.reshape((self.nVars,1))
alpha2 = alpha2.reshape(self.nVars,1)
alpha = numpy.append(alpha1, alpha2, axis=1)
smpl = numpy.dot(self.Z, alpha)
smpl = orange.ExampleTable(smpl)
km = orngClustering.KMeans(smpl, centroids=nClusters)
score = orngClustering.score_silhouette(km)
return -score
import functools
silhIndex = functools.partial(func, nClusters=nClusters)
return silhIndex
def getHartinganBasedIndex(self, nClusters=5):
import orngClustering
def func(alpha, nClusters=nClusters):
alpha1, alpha2 = alpha[:self.nVars], alpha[self.nVars:]
alpha1 = alpha1.reshape((self.nVars,1))
alpha2 = alpha2.reshape(self.nVars,1)
alpha = numpy.append(alpha1, alpha2, axis=1)
smpl = numpy.dot(self.Z, alpha)
smpl = orange.ExampleTable(smpl)
km1 = orngClustering.KMeans(smpl, centroids=nClusters)
km2 = orngClustering.KMeans(smpl, centroids=nClusters)
score = (self.totalSize - nClusters - 1) * (km1.score-km2.score) / (km2.score)
return -score
import functools
hartinganIndex = functools.partial(func, nClusters=nClusters)
return hartinganIndex
def draw_scatter_hist(x,y, fileName="lala.png"):
from matplotlib.ticker import NullFormatter
nullfmt = NullFormatter() # no labels
clf()
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left+width+0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
figure(1, figsize=(8,8))
axScatter = axes(rect_scatter)
axHistx = axes(rect_histx)
axHisty = axes(rect_histy)
# no labels
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
# the scatter plot:
axScatter.scatter(x, y)
# now determine nice limits by hand:
binwidth = 0.25
xymax = numpy.max([numpy.max(np.fabs(x)), numpy.max(np.fabs(y))])
lim = (int(xymax/binwidth) + 1) * binwidth
axScatter.set_xlim( (-lim, lim) )
axScatter.set_ylim( (-lim, lim) )
bins = numpy.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation='horizontal')
axHistx.set_xlim(axScatter.get_xlim())
axHisty.set_ylim(axScatter.get_ylim())
savefig(fileName)
if __name__=="__main__":
## data = orange.ExampleTable("c:\\Work\\Subgroup discovery\\iris.tab")
data = orange.ExampleTable(r"E:\Development\Orange Datasets\UCI\iris.tab")
data = data.select(data.domain.attributes)
impmin = orange.ImputerConstructor_minimal(data)
data = impmin(data)
ppy = ProjectionPursuit(data, dim=2, maxiter=100)
#ppy.friedman_index(J=5)
#ppy.silhouette_based_index(nClusters=2)
## import os
## os.chdir("C:\\Work\\Subgroup discovery")
#draw_scatter_hist(ppy.friedmanProjData[:,0], ppy.friedmanProjData[:,1])
#draw_scatter_hist(ppy.silhouetteProjData[:,0], ppy.silhouetteProjData[:,1])
print ppy.optimize()
| gpl-3.0 |
platinhom/ManualHom | Coding/Python/scipy-html-0.16.1/generated/scipy-stats-maxwell-1.py | 1 | 1050 | from scipy.stats import maxwell
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
# Calculate a few first moments:
mean, var, skew, kurt = maxwell.stats(moments='mvsk')
# Display the probability density function (``pdf``):
x = np.linspace(maxwell.ppf(0.01),
maxwell.ppf(0.99), 100)
ax.plot(x, maxwell.pdf(x),
'r-', lw=5, alpha=0.6, label='maxwell pdf')
# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.
# Freeze the distribution and display the frozen ``pdf``:
rv = maxwell()
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
# Check accuracy of ``cdf`` and ``ppf``:
vals = maxwell.ppf([0.001, 0.5, 0.999])
np.allclose([0.001, 0.5, 0.999], maxwell.cdf(vals))
# True
# Generate random numbers:
r = maxwell.rvs(size=1000)
# And compare the histogram:
ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
ax.legend(loc='best', frameon=False)
plt.show()
| gpl-2.0 |
aetilley/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
luoshao23/ML_algorithm | Deep_Learning/WGAN.py | 1 | 9010 |
# Large amount of credit goes to:
# https://github.com/keras-team/keras-contrib/blob/master/examples/improved_wgan.py
# which I've used as a reference for this implementation
from __future__ import print_function, division
from keras.datasets import mnist
from keras.layers.merge import _Merge
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import RMSprop
from functools import partial
import keras.backend as K
import matplotlib.pyplot as plt
import sys
import numpy as np
class RandomWeightedAverage(_Merge):
"""Provides a (random) weighted average between real and generated image samples"""
def _merge_function(self, inputs):
alpha = K.random_uniform((32, 1, 1, 1))
return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
class WGANGP():
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
# Following parameter and optimizer set as recommended in paper
self.n_critic = 5
optimizer = RMSprop(lr=0.00005)
# Build the generator and critic
self.generator = self.build_generator()
self.critic = self.build_critic()
#-------------------------------
# Construct Computational Graph
# for the Critic
#-------------------------------
# Freeze generator's layers while training critic
self.generator.trainable = False
# Image input (real sample)
real_img = Input(shape=self.img_shape)
# Noise input
z_disc = Input(shape=(self.latent_dim,))
# Generate image based of noise (fake sample)
fake_img = self.generator(z_disc)
# Discriminator determines validity of the real and fake images
fake = self.critic(fake_img)
valid = self.critic(real_img)
# Construct weighted average between real and fake images
interpolated_img = RandomWeightedAverage()([real_img, fake_img])
# Determine validity of weighted sample
validity_interpolated = self.critic(interpolated_img)
# Use Python partial to provide loss function with additional
# 'averaged_samples' argument
partial_gp_loss = partial(self.gradient_penalty_loss,
averaged_samples=interpolated_img)
partial_gp_loss.__name__ = 'gradient_penalty' # Keras requires function names
self.critic_model = Model(inputs=[real_img, z_disc],
outputs=[valid, fake, validity_interpolated])
self.critic_model.compile(loss=[self.wasserstein_loss,
self.wasserstein_loss,
partial_gp_loss],
optimizer=optimizer,
loss_weights=[1, 1, 10])
#-------------------------------
# Construct Computational Graph
# for Generator
#-------------------------------
# For the generator we freeze the critic's layers
self.critic.trainable = False
self.generator.trainable = True
# Sampled noise for input to generator
z_gen = Input(shape=(100,))
# Generate images based of noise
img = self.generator(z_gen)
# Discriminator determines validity
valid = self.critic(img)
# Defines generator model
self.generator_model = Model(z_gen, valid)
self.generator_model.compile(loss=self.wasserstein_loss, optimizer=optimizer)
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):
"""
Computes gradient penalty based on prediction and weighted real / fake samples
"""
gradients = K.gradients(y_pred, averaged_samples)[0]
# compute the euclidean norm by squaring ...
gradients_sqr = K.square(gradients)
# ... summing over the rows ...
gradients_sqr_sum = K.sum(gradients_sqr,
axis=np.arange(1, len(gradients_sqr.shape)))
# ... and sqrt
gradient_l2_norm = K.sqrt(gradients_sqr_sum)
# compute lambda * (1 - ||grad||)^2 still for each single sample
gradient_penalty = K.square(1 - gradient_l2_norm)
# return the mean as loss over all the batch samples
return K.mean(gradient_penalty)
def wasserstein_loss(self, y_true, y_pred):
return K.mean(y_true * y_pred)
def build_generator(self):
model = Sequential()
model.add(Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
model.add(Reshape((7, 7, 128)))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=4, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(Activation("relu"))
model.add(Conv2D(self.channels, kernel_size=4, padding="same"))
model.add(Activation("tanh"))
model.summary()
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
def build_critic(self):
model = Sequential()
model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization(momentum=0.8))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1))
model.summary()
img = Input(shape=self.img_shape)
validity = model(img)
return Model(img, validity)
def train(self, epochs, batch_size, sample_interval=50):
# Load the dataset
(X_train, _), (_, _) = mnist.load_data()
# Rescale -1 to 1
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = np.expand_dims(X_train, axis=3)
# Adversarial ground truths
valid = -np.ones((batch_size, 1))
fake = np.ones((batch_size, 1))
dummy = np.zeros((batch_size, 1)) # Dummy gt for gradient penalty
for epoch in range(epochs):
for _ in range(self.n_critic):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
# Sample generator input
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# Train the critic
d_loss = self.critic_model.train_on_batch([imgs, noise],
[valid, fake, dummy])
# ---------------------
# Train Generator
# ---------------------
g_loss = self.generator_model.train_on_batch(noise, valid)
# Plot the progress
print ("%d [D loss: %f] [G loss: %f]" % (epoch, d_loss[0], g_loss))
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
def sample_images(self, epoch):
r, c = 5, 5
noise = np.random.normal(0, 1, (r * c, self.latent_dim))
gen_imgs = self.generator.predict(noise)
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')
axs[i,j].axis('off')
cnt += 1
fig.savefig("images/mnist_%d.png" % epoch)
plt.close()
if __name__ == '__main__':
wgan = WGANGP()
# wgan.train(epochs=30000, batch_size=32, sample_interval=100)
wgan.train(epochs=300, batch_size=32, sample_interval=100) | mit |
ztwo/Auto_Analysis | public/Performance.py | 1 | 1608 | # -*- coding: utf-8 -*-
__author__ = 'joko'
"""
@author:joko
@time: 16/11/8 下午2:52
"""
import lib.Utils as U
def data_marker(cpu, mem, h_cpu, h_mem, path):
"""
:param cpu: cpu列表
:param mem: 内存列表
:param path: 存储的文件路径
:return:
"""
import matplotlib
matplotlib.use('Agg')
import pylab as pl
pl.plot(cpu, 'r')
pl.plot(mem, 'g')
pl.title('performance')
pl.xlabel('second')
pl.ylabel('percent')
pl.plot(cpu, color="red", linewidth=2.5, linestyle="-", label="this_cpu")
pl.plot(mem, color="blue", linewidth=2.5, linestyle="-", label="this_mem")
if h_mem is not None:
pl.plot(
h_cpu,
color="magenta",
linewidth=2.5,
linestyle="-",
label="historical_cpu")
pl.plot(
h_mem,
color="green",
linewidth=2.5,
linestyle="-",
label="historical_mem")
pl.legend(loc='upper left')
pl.xlim(0.0, len(mem))
pl.ylim(0.0, 100.0)
pl.savefig(path)
U.Logging.debug('Report:%s' % path)
# pl.show() #调出GUI实时查看
pl.close() # 必须关闭,不然值会在内存中不销毁
# import matplotlib as mpl
# print mpl.get_cachedir()
if __name__ == '__main__':
import random
def get_num():
lst = []
for i in range(10):
lst.append(random.randint(1, 60))
return lst
for i in range(1):
data_marker(get_num(), get_num(), get_num(), get_num(), '%s.png' % i)
cpu_list = []
mem_list = []
| mit |
AnasGhrab/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
carlochess/proyectoComplejidad | Model/mainModel.py | 1 | 6163 | #==============================================================================
#==================================Main Model==================================
#==============================================================================
from __future__ import print_function
class MainModel(object):
def __init__(self,numberBoxes=None,volumeBackpack=None,maximumWeightBackpack=None,
descriptionBoxes=None,optimumNumberPeople=None):
super(MainModel,self).__init__()
self.numberBoxes=numberBoxes
self.volumeBackpack=volumeBackpack
self.maximumWeightBackpack=maximumWeightBackpack
self.descriptionBoxes=descriptionBoxes
self.optimumNumberPeople=optimumNumberPeople
self.solucion = None
self.solucionUno = None
self.solucionDos = None
def getNumberBoxes(self):
return(self.numberBoxes)
def getVolumeBackpack(self):
return(self.volumeBackpack)
def getMaximumWeightBackpack(self):
return(self.maximumWeightBackpack)
def getDescriptionBoxes(self):
return(self.descriptionBoxes)
def getOptimumNumberPeople(self):
return(self.optimumNumberPeople)
def getDataTable(self):
return({"Box":[],"Item Number":self.listBoxNumber(self.descriptionBoxes),
"Item Volume":self.listBoxVolume(self.descriptionBoxes),
"Item Weight":self.listBoxWeight(self.descriptionBoxes)})
def listBoxNumber(self,dataList):
newDataList=[]
for i in range(len(dataList)):
newDataList.append((str)(dataList[i][0]))
return(newDataList)
def listBoxVolume(self,dataList):
newDataList=[]
for i in range(len(dataList)):
newDataList.append((str)(dataList[i][1]))
return(newDataList)
def listBoxWeight(self,dataList):
newDataList=[]
for i in range(len(dataList)):
newDataList.append((str)(dataList[i][2]))
return(newDataList)
def processingDataList(self,dataList):
print("input list: ",dataList,"\n")
numberBoxes=(int)(dataList.pop(0))
print("number boxes: ",numberBoxes,"\n")
propertiesBackpack=[]
cadena=""
for i in range(len(dataList[0])):
cadena+=dataList[0][i]
if((dataList[0][i]==" ")or(i==(len(dataList[0])-1))):
propertiesBackpack.append((float)(cadena))
cadena=""
print("properties backpack: ",propertiesBackpack,"\n")
dataList.pop(0)
string=""
descriptionBoxes=[]
for i in range(len(dataList)):
aux=[]
for j in range(len(dataList[i])):
string+=dataList[i][j]
if((dataList[i][j]==" ")or(j==(len(dataList[i])-1))):
aux.append((str)(string))
string=""
descriptionBoxes.append([(int)(aux[0]),(float)(aux[1]),(float)(aux[2])])
print("description boxes: ",descriptionBoxes,"\n")
self.numberBoxes=numberBoxes
self.volumeBackpack=propertiesBackpack[0]
self.maximumWeightBackpack=propertiesBackpack[1]
self.descriptionBoxes=descriptionBoxes
print("-> ", self.descriptionBoxes)
##################################################3
#Esta funcion calcula el numero optimo de personas en este caso seria 10
def calculateOptimalNumberPeople(self):
from .primeraModel import PrimeraParteModel
self.modeloSolucion = PrimeraParteModel(self.descriptionBoxes,self.volumeBackpack,self.maximumWeightBackpack)
self.solucionUno = self.modeloSolucion.getSolucion()
self.optimumNumberPeople = self.modeloSolucion.getNumPersonas()
#Esta funcion calcula el numero optimo de personas en este caso seria 10
def calculateEvenlyNumberPeople(self):
from .segundaModel import SegundaParteModel
n = int(self.solucionUno.getNumeroMochilas())
self.modeloSolucion = SegundaParteModel(self.descriptionBoxes,self.volumeBackpack,self.maximumWeightBackpack,n)
self.solucionDos = self.modeloSolucion.getSolucion()
self.optimumNumberPeople = self.modeloSolucion.getNumPersonas()
def getSolucionUno(self):
return self.solucionUno
def getSolucionDos(self):
return self.solucionDos
def haySolucionUno(self):
return self.solucionUno is not None
## Esto deberia estar en la vista, don't kill me pls :(
## Hacerlas por stacks http://matplotlib.org/examples/pylab_examples/bar_stacked.html
def graficarSolucion(self,solucion):
import numpy as np
import matplotlib.pyplot as plt
listaVolumen = solucion.getVolumenesMaletas()
listaPesos = solucion.getPesosMaletas()
N = len(listaVolumen)
menMeans =listaVolumen
menStd = np.arange(N)
ind = np.arange(N)
width = 0.35
fig, ax = plt.subplots()
rects1 = ax.bar(ind, menMeans, width, color='b', yerr=menStd)
womenMeans = listaPesos
womenStd = np.arange(len(listaPesos))
rects2 = ax.bar(ind+width, womenMeans, width, color='r', yerr=womenStd)
ax.set_ylabel('Valoes')
ax.set_title('Valores por volumen y peso')
ax.set_xticks(ind+width)
labelsMaletas = []
for i in range(N):
labelsMaletas.append("Maleta "+str(i))
ax.set_xticklabels( labelsMaletas )
ax.legend( (rects1[0], rects2[0]), ('Volumen', 'Peso') )
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),
ha='center', va='bottom')
ax.axhline(y=solucion.getMochila().getVolumen(),xmin=0,xmax=3,c="blue",linewidth=0.5,zorder=0)
ax.axhline(y=solucion.getMochila().getPeso(),xmin=0,xmax=3,c="red",linewidth=0.5,zorder=0)
autolabel(rects1)
autolabel(rects2)
ax.plot(kind='barh', stacked=True);
plt.show()
#============================================================================== | apache-2.0 |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Padova_inst/padova_inst_8/fullgrid/peaks_reader.py | 1 | 5306 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ---------------------------------------------------
print "Starting"
numFiles = 3
gridfile = [None]*numFiles
Elines = [None]*numFiles
for i in range(3):
for file in os.listdir('.'):
if file.endswith("padova_inst_{:d}.grd".format(i+1)):
gridfile[i] = file
print file
if file.endswith("padova_inst_{:d}.txt".format(i+1)):
Elines[i] = file
print file
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile[0], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile[1], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile[2], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines[0], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines[1], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines[2], 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
headers = headers[1:]
# ---------------------------------------------------
# ---------------------------------------------------
#To fix when hdens > 10
hdens_values_2 = empty(shape=[0, 1])
phi_values_2 = empty(shape=[0, 1])
Emissionlines_2 = empty(shape=[0, len(Emissionlines[0,:])])
for i in range(len(hdens_values)):
if float(hdens_values[i]) < 10.100 :
hdens_values_2 = append(hdens_values_2, hdens_values[i])
phi_values_2 = append(phi_values_2, phi_values[i])
Emissionlines_2 = vstack([Emissionlines_2, Emissionlines[i,:]])
#overwrite old arrays
hdens_values = hdens_values_2
phi_values = phi_values_2
Emissionlines = Emissionlines_2
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "peaks pulled"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
savetxt('peaks', max_values, delimiter='\t')
print "peaks saved"
| gpl-2.0 |
hainm/statsmodels | examples/python/tsa_dates.py | 29 | 1169 |
## Dates in timeseries models
from __future__ import print_function
import statsmodels.api as sm
import pandas as pd
# ## Getting started
data = sm.datasets.sunspots.load()
# Right now an annual date series must be datetimes at the end of the year.
dates = sm.tsa.datetools.dates_from_range('1700', length=len(data.endog))
# ## Using Pandas
#
# Make a pandas TimeSeries or DataFrame
endog = pd.TimeSeries(data.endog, index=dates)
# Instantiate the model
ar_model = sm.tsa.AR(endog, freq='A')
pandas_ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
# Out-of-sample prediction
pred = pandas_ar_res.predict(start='2005', end='2015')
print(pred)
# ## Using explicit dates
ar_model = sm.tsa.AR(data.endog, dates=dates, freq='A')
ar_res = ar_model.fit(maxlag=9, method='mle', disp=-1)
pred = ar_res.predict(start='2005', end='2015')
print(pred)
# This just returns a regular array, but since the model has date information attached, you can get the prediction dates in a roundabout way.
print(ar_res.data.predict_dates)
# Note: This attribute only exists if predict has been called. It holds the dates associated with the last call to predict.
| bsd-3-clause |
kcavagnolo/astroML | book_figures/chapter1/fig_moving_objects_multicolor.py | 3 | 4580 | """
SDSS Stripe 82 Moving Object Catalog
------------------------------------
Figure 1.12.
A multicolor scatter plot of the properties of asteroids from the SDSS Moving
Object Catalog (cf. figure 1.8). The left panel shows observational markers
of the chemical properties of the asteroids: two colors a* and i-z. The
right panel shows the orbital parameters: semimajor axis a vs. the sine of
the inclination. The color of points in the right panel reflects their
position in the left panel. This plot is similar to that used in
figures 3-4 of Parker et al 2008.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.datasets import fetch_moving_objects
from astroML.plotting.tools import devectorize_axes
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def black_bg_subplot(*args, **kwargs):
"""Create a subplot with black background"""
kwargs['axisbg'] = 'k'
ax = plt.subplot(*args, **kwargs)
# set ticks and labels to white
for spine in ax.spines.values():
spine.set_color('w')
for tick in ax.xaxis.get_major_ticks() + ax.yaxis.get_major_ticks():
for child in tick.get_children():
child.set_color('w')
return ax
def compute_color(mag_a, mag_i, mag_z, a_crit=-0.1):
"""
Compute the scatter-plot color using code adapted from
TCL source used in Parker 2008.
"""
# define the base color scalings
R = np.ones_like(mag_i)
G = 0.5 * 10 ** (-2 * (mag_i - mag_z - 0.01))
B = 1.5 * 10 ** (-8 * (mag_a + 0.0))
# enhance green beyond the a_crit cutoff
G += 10. / (1 + np.exp((mag_a - a_crit) / 0.02))
# normalize color of each point to its maximum component
RGB = np.vstack([R, G, B])
RGB /= RGB.max(0)
# return an array of RGB colors, which is shape (n_points, 3)
return RGB.T
#------------------------------------------------------------
# Fetch data and extract the desired quantities
data = fetch_moving_objects(Parker2008_cuts=True)
mag_a = data['mag_a']
mag_i = data['mag_i']
mag_z = data['mag_z']
a = data['aprime']
sini = data['sin_iprime']
# dither: magnitudes are recorded only to +/- 0.01
np.random.seed(0)
mag_a += -0.005 + 0.01 * np.random.random(size=mag_a.shape)
mag_i += -0.005 + 0.01 * np.random.random(size=mag_i.shape)
mag_z += -0.005 + 0.01 * np.random.random(size=mag_z.shape)
# compute RGB color based on magnitudes
color = compute_color(mag_a, mag_i, mag_z)
#------------------------------------------------------------
# set up the plot
fig = plt.figure(figsize=(5, 2.2), facecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, wspace=0.3,
bottom=0.2, top=0.93)
# plot the color-magnitude plot
ax = black_bg_subplot(121)
ax.scatter(mag_a, mag_i - mag_z,
c=color, s=0.5, lw=0)
devectorize_axes(ax, dpi=400)
ax.plot([0, 0], [-0.8, 0.6], '--w', lw=1)
ax.plot([0, 0.4], [-0.15, -0.15], '--w', lw=1)
ax.set_xlim(-0.3, 0.4)
ax.set_ylim(-0.8, 0.6)
ax.set_xlabel(r'${\rm a*}$', color='w')
ax.set_ylabel(r'${\rm i-z}$', color='w')
# plot the orbital parameters plot
ax = black_bg_subplot(122)
ax.scatter(a, sini,
c=color, s=0.5, lw=0, edgecolor='none')
devectorize_axes(ax, dpi=400)
ax.plot([2.5, 2.5], [-0.02, 0.3], '--w', lw=1)
ax.plot([2.82, 2.82], [-0.02, 0.3], '--w', lw=1)
ax.set_xlim(2.0, 3.3)
ax.set_ylim(-0.02, 0.3)
ax.set_xlabel(r'${\rm a (AU)}$', color='w')
ax.set_ylabel(r'${\rm sin(i)}$', color='w')
# label the plot
text_kwargs = dict(color='w', transform=plt.gca().transAxes,
ha='center', va='bottom')
ax.text(0.25, 1.02, 'Inner', **text_kwargs)
ax.text(0.53, 1.02, 'Mid', **text_kwargs)
ax.text(0.83, 1.02, 'Outer', **text_kwargs)
# Saving the black-background figure requires some extra arguments:
#fig.savefig('moving_objects.png',
# facecolor='black',
# edgecolor='none')
plt.show()
| bsd-2-clause |
VictorCarlquist/clFFT | src/scripts/perf/plotPerformance.py | 11 | 12413 | # ########################################################################
# Copyright 2013 Advanced Micro Devices, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ########################################################################
# to use this script, you will need to download and install the 32-BIT VERSION of:
# - Python 2.7 x86 (32-bit) - http://www.python.org/download/releases/2.7.1
#
# you will also need the 32-BIT VERSIONS of the following packages as not all the packages are available in 64bit at the time of this writing
# The ActiveState python distribution is recommended for windows
# (make sure to get the python 2.7-compatible packages):
# - NumPy 1.5.1 (32-bit, 64-bit unofficial, supports Python 2.4 - 2.7 and 3.1 - 3.2.) - http://sourceforge.net/projects/numpy/files/NumPy/
# - matplotlib 1.0.1 (32-bit & 64-bit, supports Python 2.4 - 2.7) - http://sourceforge.net/projects/matplotlib/files/matplotlib/
#
# For ActiveState Python, all that one should need to type is 'pypm install matplotlib'
import datetime
import sys
import argparse
import subprocess
import itertools
import os
import matplotlib
import pylab
from matplotlib.backends.backend_pdf import PdfPages
from fftPerformanceTesting import *
def plotGraph(dataForAllPlots, title, plottype, plotkwargs, xaxislabel, yaxislabel):
"""
display a pretty graph
"""
dh.write('Making graph\n')
colors = ['k','y','m','c','b','r','g']
#plottype = 'plot'
for thisPlot in dataForAllPlots:
getattr(pylab, plottype)(thisPlot.xdata, thisPlot.ydata,
'{}.-'.format(colors.pop()),
label=thisPlot.label, **plotkwargs)
if len(dataForAllPlots) > 1:
pylab.legend(loc='best')
pylab.title(title)
pylab.xlabel(xaxislabel)
pylab.ylabel(yaxislabel)
pylab.grid(True)
if args.outputFilename == None:
# if no pdf output is requested, spit the graph to the screen . . .
pylab.show()
else:
pylab.savefig(args.outputFilename,dpi=(1024/8))
# . . . otherwise, gimme gimme pdf
#pdf = PdfPages(args.outputFilename)
#pdf.savefig()
#pdf.close()
######## plotFromDataFile() Function to plot from data file begins ########
def plotFromDataFile():
data = []
"""
read in table(s) from file(s)
"""
for thisFile in args.datafile:
if not os.path.isfile(thisFile):
print 'No file with the name \'{}\' exists. Please indicate another filename.'.format(thisFile)
quit()
results = open(thisFile, 'r')
resultsContents = results.read()
resultsContents = resultsContents.rstrip().split('\n')
firstRow = resultsContents.pop(0)
if firstRow != tableHeader:
print 'ERROR: input file \'{}\' does not match expected format.'.format(thisFile)
quit()
for row in resultsContents:
row = row.split(',')
row = TableRow(TestCombination(row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9]), row[10])
data.append(GraphPoint(row.parameters.x, row.parameters.y, row.parameters.z, row.parameters.batchsize, row.parameters.precision, row.parameters.device, row.parameters.label, row.gflops))
"""
data sanity check
"""
# if multiple plotvalues have > 1 value among the data rows, the user must specify which to plot
multiplePlotValues = []
for option in plotvalues:
values = []
for point in data:
values.append(getattr(point, option))
multiplePlotValues.append(len(set(values)) > 1)
if multiplePlotValues.count(True) > 1 and args.plot == None:
print 'ERROR: more than one parameter of {} has multiple values. Please specify which parameter to plot with --plot'.format(plotvalues)
quit()
# if args.graphxaxis is not 'problemsize', the user should know that the results might be strange
if args.graphxaxis != 'problemsize':
xaxisvalueSet = []
for option in xaxisvalues:
if option != 'problemsize':
values = []
for point in data:
values.append(getattr(point, option))
xaxisvalueSet.append(len(set(values)) > 1)
if xaxisvalueSet.count(True) > 1:
print 'WARNING: more than one parameter of {} is varied. unexpected results may occur. please double check your graphs for accuracy.'.format(xaxisvalues)
# multiple rows should not have the same input values
pointInputs = []
for point in data:
pointInputs.append(point.__str__().split(';')[0])
if len(set(pointInputs)) != len(data):
print 'ERROR: imported table has duplicate rows with identical input parameters'
quit()
"""
figure out if we have multiple plots on this graph (and what they should be)
"""
if args.plot != None:
multiplePlots = args.plot
elif multiplePlotValues.count(True) == 1:
multiplePlots = plotvalues[multiplePlotValues.index(True)]
else:
# default to device if none of the options to plot have multiple values
multiplePlots = 'device'
"""
assemble data for the graphs
"""
data.sort(key=lambda row: int(getattr(row, args.graphxaxis)))
# choose scale for x axis
if args.xaxisscale == None:
# user didn't specify. autodetect
if int(getattr(data[len(data)-1], args.graphxaxis)) > 2000: # big numbers on x-axis
args.xaxisscale = 'log2'
elif int(getattr(data[len(data)-1], args.graphxaxis)) > 10000: # bigger numbers on x-axis
args.xaxisscale = 'log10'
else: # small numbers on x-axis
args.xaxisscale = 'linear'
if args.yaxisscale == None:
args.yaxisscale = 'linear'
plotkwargs = {}
if args.xaxisscale == 'linear':
plottype = 'plot'
elif args.xaxisscale == 'log2':
plottype = 'semilogx'
if (args.yaxisscale=='log2'):
plottype = 'loglog'
plotkwargs = {'basex':2,'basey':2}
elif (args.yaxisscale=='log10'):
plottype = 'loglog'
plotkwargs = {'basex':2,'basey':10}
elif (args.yaxisscale=='linear'):
plottype = 'semilogx'
plotkwargs = {'basex':2}
elif args.xaxisscale == 'log10':
plottype = 'semilogx'
if (args.yaxisscale=='log2'):
plottype = 'loglog'
plotkwargs = {'basex':10,'basey':2}
elif (args.yaxisscale=='log10'):
plottype = 'loglog'
plotkwargs = {'basex':10,'basey':10}
else:
print 'ERROR: invalid value for x-axis scale'
quit()
plots = set(getattr(row, multiplePlots) for row in data)
class DataForOnePlot:
def __init__(self, inlabel, inxdata, inydata):
self.label = inlabel
self.xdata = inxdata
self.ydata = inydata
dataForAllPlots=[]
for plot in plots:
dataForThisPlot = itertools.ifilter( lambda x: getattr(x, multiplePlots) == plot, data)
dataForThisPlot = list(itertools.islice(dataForThisPlot, None))
if args.graphxaxis == 'problemsize':
xdata = [int(row.x) * int(row.y) * int(row.z) * int(row.batchsize) for row in dataForThisPlot]
else:
xdata = [getattr(row, args.graphxaxis) for row in dataForThisPlot]
ydata = [getattr(row, args.graphyaxis) for row in dataForThisPlot]
dataForAllPlots.append(DataForOnePlot(plot,xdata,ydata))
"""
assemble labels for the graph or use the user-specified ones
"""
if args.graphtitle:
# use the user selection
title = args.graphtitle
else:
# autogen a lovely title
title = 'Performance vs. ' + args.graphxaxis.capitalize()
if args.xaxislabel:
# use the user selection
xaxislabel = args.xaxislabel
else:
# autogen a lovely x-axis label
if args.graphxaxis == 'cachesize':
units = '(bytes)'
else:
units = '(datapoints)'
xaxislabel = args.graphxaxis + ' ' + units
if args.yaxislabel:
# use the user selection
yaxislabel = args.yaxislabel
else:
# autogen a lovely y-axis label
if args.graphyaxis == 'gflops':
units = 'GFLOPS'
yaxislabel = 'Performance (' + units + ')'
"""
display a pretty graph
"""
colors = ['k','y','m','c','b','g','r']
def getkey(item):
return str(item.label)
dataForAllPlots.sort(key=getkey)
#for thisPlot in sorted(dataForAllPlots,key=getkey):
for thisPlot in sorted(dataForAllPlots,key=getkey):
getattr(pylab, plottype)(thisPlot.xdata, thisPlot.ydata, '{}.-'.format(colors.pop()), label=thisPlot.label, **plotkwargs)
if len(dataForAllPlots) > 1:
pylab.legend(loc='best')
pylab.title(title)
pylab.xlabel(xaxislabel)
pylab.ylabel(yaxislabel)
pylab.grid(True)
if args.outputFilename == None:
# if no pdf output is requested, spit the graph to the screen . . .
pylab.show()
else:
# . . . otherwise, gimme gimme pdf
#pdf = PdfPages(args.outputFilename)
#pdf.savefig()
#pdf.close()
pylab.savefig(args.outputFilename,dpi=(1024/8))
######### plotFromDataFile() Function to plot from data file ends #########
######## "main" program begins #####
"""
define and parse parameters
"""
xaxisvalues = ['x','y','z','batchsize','problemsize']
yaxisvalues = ['gflops']
plotvalues = ['device', 'precision', 'label']
parser = argparse.ArgumentParser(description='Plot performance of the clfft\
library. clfft.plotPerformance.py reads in data tables from clfft.\
measurePerformance.py and plots their values')
fileOrDb = parser.add_mutually_exclusive_group(required=True)
fileOrDb.add_argument('-d', '--datafile',
dest='datafile', action='append', default=None, required=False,
help='indicate a file to use as input. must be in the format output by\
clfft.measurePerformance.py. may be used multiple times to indicate\
multiple input files. e.g., -d cypressOutput.txt -d caymanOutput.txt')
parser.add_argument('-x', '--x_axis',
dest='graphxaxis', default=None, choices=xaxisvalues, required=True,
help='indicate which value will be represented on the x axis. problemsize\
is defined as x*y*z*batchsize')
parser.add_argument('-y', '--y_axis',
dest='graphyaxis', default='gflops', choices=yaxisvalues,
help='indicate which value will be represented on the y axis')
parser.add_argument('--plot',
dest='plot', default=None, choices=plotvalues,
help='indicate which of {} should be used to differentiate multiple plots.\
this will be chosen automatically if not specified'.format(plotvalues))
parser.add_argument('--title',
dest='graphtitle', default=None,
help='the desired title for the graph generated by this execution. if\
GRAPHTITLE contains any spaces, it must be entered in \"double quotes\".\
if this option is not specified, the title will be autogenerated')
parser.add_argument('--x_axis_label',
dest='xaxislabel', default=None,
help='the desired label for the graph\'s x-axis. if XAXISLABEL contains\
any spaces, it must be entered in \"double quotes\". if this option\
is not specified, the x-axis label will be autogenerated')
parser.add_argument('--x_axis_scale',
dest='xaxisscale', default=None, choices=['linear','log2','log10'],
help='the desired scale for the graph\'s x-axis. if nothing is specified,\
it will be selected automatically')
parser.add_argument('--y_axis_scale',
dest='yaxisscale', default=None, choices=['linear','log2','log10'],
help='the desired scale for the graph\'s y-axis. if nothing is specified,\
linear will be selected')
parser.add_argument('--y_axis_label',
dest='yaxislabel', default=None,
help='the desired label for the graph\'s y-axis. if YAXISLABEL contains any\
spaces, it must be entered in \"double quotes\". if this option is not\
specified, the y-axis label will be autogenerated')
parser.add_argument('--outputfile',
dest='outputFilename', default=None,
help='name of the file to output graphs. Supported formats: emf, eps, pdf, png, ps, raw, rgba, svg, svgz.')
args = parser.parse_args()
if args.datafile != None:
plotFromDataFile()
else:
print "Atleast specify if you want to use text files or database for plotting graphs. Use -h or --help option for more details"
quit()
| apache-2.0 |
cowlicks/numpy | numpy/lib/twodim_base.py | 83 | 26903 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print H[::-1] # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| bsd-3-clause |
finfou/tushare | tushare/stock/newsevent.py | 38 | 6714 | # -*- coding:utf-8 -*-
"""
新闻事件数据接口
Created on 2015/02/07
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from tushare.stock import cons as ct
from tushare.stock import news_vars as nv
import pandas as pd
from datetime import datetime
import lxml.html
from lxml import etree
import re
import json
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_latest_news(top=None, show_content=False):
"""
获取即时财经新闻
Parameters
--------
top:数值,显示最新消息的条数,默认为80条
show_content:是否显示新闻内容,默认False
Return
--------
DataFrame
classify :新闻类别
title :新闻标题
time :发布时间
url :新闻链接
content:新闻内容(在show_content为True的情况下出现)
"""
top = ct.PAGE_NUM[2] if top is None else top
try:
request = Request(nv.LATEST_URL % (ct.P_TYPE['http'], ct.DOMAINS['sina'],
ct.PAGES['lnews'], top,
_random()))
data_str = urlopen(request, timeout=10).read()
data_str = data_str.decode('GBK')
data_str = data_str.split('=')[1][:-1]
data_str = eval(data_str, type('Dummy', (dict,),
dict(__getitem__ = lambda s, n:n))())
data_str = json.dumps(data_str)
data_str = json.loads(data_str)
data_str = data_str['list']
data = []
for r in data_str:
rt = datetime.fromtimestamp(r['time'])
rtstr = datetime.strftime(rt, "%m-%d %H:%M")
arow = [r['channel']['title'], r['title'], rtstr, r['url']]
if show_content:
arow.append(latest_content(r['url']))
data.append(arow)
df = pd.DataFrame(data, columns=nv.LATEST_COLS_C if show_content else nv.LATEST_COLS)
return df
except Exception as er:
print(str(er))
def latest_content(url):
'''
获取即时财经新闻内容
Parameter
--------
url:新闻链接
Return
--------
string:返回新闻的文字内容
'''
try:
html = lxml.html.parse(url)
res = html.xpath('//div[@id=\"artibody\"]/p')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr).replace(' ', '')#.replace('\n\n', '\n').
html_content = lxml.html.fromstring(sarr)
content = html_content.text_content()
return content
except Exception as er:
print(str(er))
def get_notices(code=None, date=None):
'''
个股信息地雷
Parameters
--------
code:股票代码
date:信息公布日期
Return
--------
DataFrame,属性列表:
title:信息标题
type:信息类型
date:公告日期
url:信息内容URL
'''
if code is None:
return None
symbol = 'sh' + code if code[:1] == '6' else 'sz' + code
url = nv.NOTICE_INFO_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'],
ct.PAGES['ntinfo'], symbol)
url = url if date is None else '%s&gg_date=%s'%(url, date)
html = lxml.html.parse(url)
res = html.xpath('//table[@class=\"body_table\"]/tbody/tr')
data = []
for td in res:
title = td.xpath('th/a/text()')[0]
type = td.xpath('td[1]/text()')[0]
date = td.xpath('td[2]/text()')[0]
url = '%s%s%s'%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], td.xpath('th/a/@href')[0])
data.append([title, type, date, url])
df = pd.DataFrame(data, columns=nv.NOTICE_INFO_CLS)
return df
def notice_content(url):
'''
获取信息地雷内容
Parameter
--------
url:内容链接
Return
--------
string:信息内容
'''
try:
html = lxml.html.parse(url)
res = html.xpath('//div[@id=\"content\"]/pre/text()')[0]
return res.strip()
except Exception as er:
print(str(er))
def guba_sina(show_content=False):
"""
获取sina财经股吧首页的重点消息
Parameter
--------
show_content:是否显示内容,默认False
Return
--------
DataFrame
title, 消息标题
content, 消息内容(show_content=True的情况下)
ptime, 发布时间
rcounts,阅读次数
"""
from pandas.io.common import urlopen
try:
with urlopen(nv.GUBA_SINA_URL%(ct.P_TYPE['http'],
ct.DOMAINS['sina'])) as resp:
lines = resp.read()
html = lxml.html.document_fromstring(lines)
res = html.xpath('//ul[@class=\"list_05\"]/li')
heads = html.xpath('//div[@class=\"tit_04\"]')
data = []
for head in heads[:1]:
title = head.xpath('a/text()')[0]
url = head.xpath('a/@href')[0]
ds = [title]
ds.extend(_guba_content(url))
data.append(ds)
for row in res:
title = row.xpath('a[2]/text()')[0]
url = row.xpath('a[2]/@href')[0]
ds = [title]
ds.extend(_guba_content(url))
data.append(ds)
df = pd.DataFrame(data, columns=nv.GUBA_SINA_COLS)
df['rcounts'] = df['rcounts'].astype(float)
return df if show_content is True else df.drop('content', axis=1)
except Exception as er:
print(str(er))
def _guba_content(url):
try:
html = lxml.html.parse(url)
res = html.xpath('//div[@class=\"ilt_p\"]/p')
if ct.PY3:
sarr = [etree.tostring(node).decode('utf-8') for node in res]
else:
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr).replace(' ', '')#.replace('\n\n', '\n').
html_content = lxml.html.fromstring(sarr)
content = html_content.text_content()
ptime = html.xpath('//div[@class=\"fl_left iltp_time\"]/span/text()')[0]
rcounts = html.xpath('//div[@class=\"fl_right iltp_span\"]/span[2]/text()')[0]
reg = re.compile(r'\((.*?)\)')
rcounts = reg.findall(rcounts)[0]
return [content, ptime, rcounts]
except Exception:
return ['', '', '0']
def _random(n=16):
from random import randint
start = 10 ** (n - 1)
end = (10 ** n) - 1
return str(randint(start, end))
| bsd-3-clause |
jordipons/EUSIPCO2017 | src/patches.py | 1 | 9011 | import pickle
import glob
import numpy as np
import os
import pandas as pd
import json
from sklearn.preprocessing import StandardScaler
import common
import time
"""
patches.py: computes the patches and normalizes de data.
Requires the previous run of 'exp_setup.py'.
The results and parameters of this script are stored in common.DATA_FOLDER/patches/
Step 3/5 of the pipeline.
NOT deterministic experiment: random sampling and randomize training examples.
"""
config = {
'patches_code_version': 'elementWise_memory',
'setup' : 'dieleman_setup_eusipco2017', #experimental setup name in 'exp_setup.py'
'n_frames': '', # if '', compute n_frames from 'window', SET AS INT otherwise!
'window' : 3, # in seconds
'spec_processing' : 'logC', #'squared_log' or 'logC'
'normalization' : 'element_wise', #'global' or 'element_wise'
'mean_var_type' : 'memory', # 'calculus' or 'memory'
'mode_sampling' : 'overlap_sampling',
'param_sampling' : 187 #if 'overlap_sampling': param_sampling=hop_size
} #if 'random': param_sampling=number of samples
def sample_spec(spec,n_frames,mode_sampling,param_sampling):
'''
spec: input spectrogram to sample from
n_frames: length of the sample
mode_sampling: 'overlap_sampling' or 'random'
param_sampling: if 'overlap_sampling': param_sampling=hop_size
if 'random': param_sampling=number of samples
'''
if mode_sampling=='overlap_sampling':
for i in xrange(0,(int((spec.shape[0]-int(n_frames))/param_sampling)*param_sampling)+1,param_sampling):
yield spec[i:i+int(n_frames)]
else: # random sampling
for i in range(0,param_sampling):
r_idx = np.random.randint(0, high=int(spec.shape[0]) - int(n_frames) + 1)
yield spec[r_idx:r_idx + int(n_frames)]
def create_patches(set_file,set_type):
# Create patches folder for 'set_type': train, test, val.
patches_folder = common.DATA_FOLDER+config["patches_folder"] + set_type + "/"
if not os.path.exists(patches_folder):
os.makedirs(patches_folder)
# Get exp_setup partition: train, test, val.
df_items = pd.read_table(common.DATA_FOLDER+set_file, usecols=[0, 1, 2], names=['id', 'path', 'gt'])
# Create patches from spectrograms
n = 0
fw = open(patches_folder + "all_patches_ids.tsv","w") # create indexing file where all patches are saved
for index, row in df_items.iterrows():
# load spectrogram
spec = pickle.load(open(common.DATA_FOLDER+row['path']))
# normalize amplitude
# transpose to x,y = NxM instead of MxN.
if config['spec_processing'] == 'squared_log':
spec = common.logamplitude(np.abs(spec) ** 2).T#,ref_power=np.max)!!!!!!!!
elif config['spec_processing'] == 'logC':
spec = np.log10(10000*spec+1).T
# save
if int(spec.shape[0]) >= int(config['n_frames']):
sample_counter=0
for sample in sample_spec(spec,int(config['n_frames']),config['mode_sampling'],config['param_sampling']):
try:
patch_path=config["patches_folder"] + set_type + "/"+row['path'].split("/")[-2]+"/"
if not os.path.exists(common.DATA_FOLDER+patch_path):
os.makedirs(common.DATA_FOLDER+patch_path)
patch_path=patch_path+row['path'][row['path'].rfind("/")+1:row['path'].rfind(".")]+'_'+str(sample_counter)+'.npy'
fw.write("%s\t%s\t%s\t%s\n" % (row['id'],row['path'],sample_counter,patch_path)) # id, spectro_path, sample_counter, patch_path
# patches have NxM dimensions.
np.save(common.DATA_FOLDER+patch_path, np.asarray(sample)) #!!!!!!! asarray????
sample_counter=sample_counter+1
except Exception,e:
print str(e)
n+=1 # n is for tracking progress
if n%100==0:
print n
return patches_folder
def get_scaler(folder):
if config['mean_var_type']=='memory':
return get_scaler_memory(folder)
elif config['mean_var_type']=='calculus' and config['normalization']=='global':
return get_scaler_calculus(folder)
else:
print "ERROR: set 'normalization' or 'mean_var_type' correctly."
def get_scaler_calculus(folder):
total_mu=0
total_var=0
total_l=0
# Load patches
df_items = pd.read_table(folder+'all_patches_ids.tsv', usecols=[0, 1, 2, 3], names=['id', 'spectro_path', 'sample_count', 'patch_path'])
for index, row in df_items.iterrows():
if index%100==0:
print str(index+1)+'/'+str(df_items.shape[0])
patch_file=common.DATA_FOLDER+row['patch_path']
patch = np.load(patch_file)
# Normalize patches according to scaler
patch=patch.reshape(patch.shape[0] * patch.shape[1])
mu=np.mean(patch)
v=np.var(patch)
l=len(patch)
total_mu=(l*mu+total_l*total_mu)/(l+total_l)
a=l*((mu*mu)+v)
b=total_l*((total_mu*total_mu)+total_var)
c=(a+b)/(l+total_l)
total_var=c-(total_mu*total_mu)
total_l=l+total_l
return [total_mu,total_var]
def get_scaler_memory(folder):
# Load all patches for computing a BIASED estimation of the mean and var
df_items = pd.read_table(folder+'all_patches_ids.tsv', usecols=[0, 1, 2, 3], names=['id', 'spectro_path', 'sample_count', 'patch_path'])
counter=0
for index, row in df_items.iterrows():
if index%100==0:
print str(index+1)+'/'+str(df_items.shape[0])
patch_file=common.DATA_FOLDER+row['patch_path']
if counter==0:
patches=np.load(patch_file)
counter=counter+1
else:
patches=np.concatenate((patches,np.load(patch_file)),axis=0)
counter=counter+1
if counter==2000:
break
if config['normalization']=='global':
patches=patches.reshape(patches.shape[0] * patches.shape[1])
scaler = StandardScaler()
scaler.fit(patches)
## Check numerically mean/std normalization ##
t_patches=scaler.transform(patches)
print '\t- Mean @ visible patches: '+str(t_patches.mean())
print '\t- Std @ visible patches: '+str(t_patches.std())
print '\t- Var @ visible patches: '+str(t_patches.var())
return scaler
def save_normalized_patches(folder, scaler):
df_items = pd.read_table(folder+'all_patches_ids.tsv', usecols=[0, 1, 2, 3], names=['id', 'spectro_path', 'sample_count', 'patch_path'])
for index, row in df_items.iterrows():
patch_file=common.DATA_FOLDER+row['patch_path']
# Load patch
patch = np.load(patch_file)
shape = patch.shape
if config['normalization']=='global':
patch=patch.reshape(patch.shape[0] * patch.shape[1])
# Normalize patches according to scaler
if config['mean_var_type']=='memory':
patch=scaler.transform(patch)
elif config['mean_var_type']=='calculus':
patch = (patch-scaler[0])/scaler[1]
patch = patch.reshape(1,1,shape[0],shape[1])
np.save(patch_file, patch) # save patch with dimensions (1,1,N,M)
if __name__ == '__main__':
# Load parameters from previous processing steps: 'spectrograms.py' and 'exp_setup.py'.
params = json.load(open(common.DATA_FOLDER+"exp_setup/%s/params.json" % config["setup"]))
config['spectro_params'] = params['spectro_params']
config['setup_params'] = params
config['setup_params'].pop('spectro_params',None) # remove repeated variables
# Set patch parameters
if config['n_frames'] == '':
config['n_frames'] = int(config['window'] * config['spectro_params']['resample_sr'] / float(config['spectro_params']['hop']))
print 'Number of frames: '+str(config['n_frames'])
config['patches_folder'] = 'patches/patches_%s_%s_%s_%s_%s/' % (config['setup'],config['n_frames'],config['spec_processing'],config['patches_code_version'],int(time.time()))
config['xInput']=config['n_frames']
if config['spectro_params']['spectrogram_type']=='cqt':
config['yInput']=config['spectro_params']['cqt_bins']
elif config['spectro_params']['spectrogram_type']=='mel':
config['yInput']=config['spectro_params']['n_mels']
print config
print '- Generating training set..'
patches_folder_train = create_patches(config['setup_params']['setup_folder']+"trainset.tsv","train")
scaler = get_scaler(patches_folder_train) # compute scaler in training data
#if config['mean_var_type']=='memory':
# print '\tScalar-mean: '+str(scaler.mean_)
# print '\tScalar-var: '+str(scaler.var_)
#elif config['mean_var_type']=='calculus':
# print '\tScalar-mean: '+str(scaler[0])
# print '\tScalar-var: '+str(scaler[1])
save_normalized_patches(patches_folder_train, scaler)
print '- Generating validation set..'
patches_folder_val = create_patches(config['setup_params']['setup_folder']+"valset.tsv","val")
save_normalized_patches(patches_folder_val, scaler)
print '- Generating test set..'
patches_folder_test = create_patches(config['setup_params']['setup_folder']+"testset.tsv","test")
save_normalized_patches(patches_folder_test, scaler)
# Save scaler and parameters
json.dump(config, open(common.DATA_FOLDER+config["patches_folder"]+"params.json","w"))
pickle.dump(scaler,open(common.DATA_FOLDER+config["patches_folder"]+"scaler.pk",'wb'))
print 'Patches folder: '+str(config['patches_folder'])
# DOUBTS
## std? var? how to compute?
## ref_power=np.max!
## Transpose in:
# if config['spec_processing'] == 'squared_log':
# spec = common.logamplitude(np.abs(spec) ** 2).T#,ref_power=np.max)!!!!!!!!
# elif config['spec_processing'] == 'logC':
# spec = np.log10(10000*spec+1).T
| mit |
JosmanPS/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 248 | 2903 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
bitemyapp/ggplot | ggplot/tests/test_stat_calculate_methods.py | 12 | 2240 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from nose.tools import (assert_equal, assert_is, assert_is_not,
assert_raises)
import pandas as pd
from ggplot import *
from ggplot.utils.exceptions import GgplotError
from . import cleanup
@cleanup
def test_stat_bin():
# stat_bin needs the 'x' aesthetic to be numeric or a categorical
# and should complain if given anything else
class unknown(object):
pass
x = [unknown()] * 3
y = [1, 2, 3]
df = pd.DataFrame({'x': x, 'y': y})
gg = ggplot(aes(x='x', y='y'), df)
with assert_raises(GgplotError):
print(gg + stat_bin())
@cleanup
def test_stat_abline():
# slope and intercept function should return values
# of the same length
def fn_xy(x, y):
return [1, 2]
def fn_xy2(x, y):
return [1, 2, 3]
gg = ggplot(aes(x='wt', y='mpg'), mtcars)
# same length, no problem
print(gg + stat_abline(slope=fn_xy, intercept=fn_xy))
with assert_raises(GgplotError):
print(gg + stat_abline(slope=fn_xy, intercept=fn_xy2))
@cleanup
def test_stat_vhabline_functions():
def fn_x(x):
return 1
def fn_y(y):
return 1
def fn_xy(x, y):
return 1
gg = ggplot(aes(x='wt'), mtcars)
# needs y aesthetic
with assert_raises(GgplotError):
print(gg + stat_abline(slope=fn_xy))
# needs y aesthetic
with assert_raises(GgplotError):
print(gg + stat_abline(intercept=fn_xy))
gg = ggplot(aes(x='wt', y='mpg'), mtcars)
# Functions with 2 args, no problem
print(gg + stat_abline(slope=fn_xy, intercept=fn_xy))
# slope function should take 2 args
with assert_raises(GgplotError):
print(gg + stat_abline(slope=fn_x, intercept=fn_xy))
# intercept function should take 2 args
with assert_raises(GgplotError):
print(gg + stat_abline(slope=fn_xy, intercept=fn_y))
# intercept function should take 1 arg
with assert_raises(GgplotError):
print(gg + stat_vline(xintercept=fn_xy))
# intercept function should take 1 arg
with assert_raises(GgplotError):
print(gg + stat_hline(yintercept=fn_xy))
| bsd-2-clause |
jkarnows/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
peastman/msmbuilder | msmbuilder/hmm/discrete_approx.py | 12 | 6593 | """Discrete approximations to continuous distributions"""
# Author: Robert McGibbon <[email protected]>
# Contributors:
# Copyright (c) 2014, Stanford University
# All rights reserved.
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function, division, absolute_import
import numpy as np
import scipy.misc
import scipy.linalg
import scipy.optimize
from mdtraj.utils import ensure_type
__all__ = ['discrete_approx_mvn', 'NotSatisfiableError']
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class NotSatisfiableError(Exception):
pass
def discrete_approx_mvn(X, means, covars, match_variances=True):
"""Find a discrete approximation to a multivariate normal distribution.
The method employs find the discrete distribution with support only at the
supplied points X with minimal K-L divergence to a target multivariate
normal distribution under the constraints that the mean and variance
of the discrete distribution match the normal distribution exactly.
Parameters
----------
X : np.ndarray, shape=(n_points, n_features)
The allowable points
means : np.ndarray, shape=(n_features)
The mean vector of the MVN
covars : np.ndarray, shape=(n_features, n_features) or shape=(n_features,)
If covars is 2D, it's interpreted as the covariance matrix for
the model. If 1D, we assume a diagonal covariance matrix with the
specified diagonal entries.
match_variances : bool, optimal
When True, both the means and the variances of the discrete distribution
are constrained. Under some circumstances, this is not satisfiable (e.g.
if there aren't enough samples
Returns
-------
weights : np.ndarray, shape=(n_samples,)
The weight for each of the points in X in the resulting
discrete probability distribution
Notes
-----
The discrete distribution is one that has mass only at the specified
points. It can therefore be parameterized by a set of weights on each
point. If :math:`\{X_i\}` is the set of allowable points, and
:math:`\{w_i\}` are the weights, then our discrete distribution has
the form
.. math::
p(y; w) = w_i \sum \delta(y - X_i).
We chose the :math:`w_i` by minimizing the K-L divergence from the our
discrete distribution to the desired multivariate normal subject to a
constraint that the first moments of the discrete distribution match
the mean of the multivariate normal exactly, and that the variances
also match. Let :math:`q(x)` be the target distribution. The optimal
weights are then
.. math::
min_{\{w_i\}} \sum_i p(X_i; w) \log \frac{p(X_i; w)}{q(X_i)}
subject to
.. math::
\sum_i (X_i) p(X_i; w) = \int_\Omega (x) q(x) = \mu,
\sum_i (X_i-mu)**2 p(X_i; w) = \int_\Omega (x-mu) q(x).
References
----------
.. [1] Tanaka, Ken'ichiro, and Alexis Akira Toda. "Discrete approximations
of continuous distributions by maximum entropy." Economics Letters 118.3
(2013): 445-450.
"""
X = ensure_type(np.asarray(X), dtype=np.float32, ndim=2, name='X', warn_on_cast=False)
means = ensure_type(np.asarray(means), np.float64, ndim=1, name='means', warn_on_cast=False)
covars = np.asarray(covars)
# Get the un-normalized probability of each point X_i in the MVN
# `prob` are the q(X_i) in the mathematics
# `moments` are the \bar{T} that we want to match.
if covars.ndim == 1:
# diagonal covariance case
if not len(covars) == len(means):
raise ValueError('Shape Error: covars and means musth have the same length')
prob = np.exp(-0.5 * np.sum(1. / np.sqrt(covars) * (X - means) ** 2, axis=1))
moments = np.concatenate((means, covars)) if match_variances else means
elif covars.ndim == 2:
if not (covars.shape[0] == len(means) and covars.shape[1] == len(means)):
raise ValueError('Shape Error: covars must be square, with size = len(means)')
# full 2d covariance matrix
cv_chol = scipy.linalg.cholesky(covars, lower=True)
cv_sol = scipy.linalg.solve_triangular(cv_chol, (X - means).T, lower=True).T
prob = np.exp(-0.5 * (np.sum(cv_sol ** 2, axis=1)))
moments = np.concatenate((means, np.diag(covars))) if match_variances else means
else:
raise ValueError('covars must be 1D or 2D')
# this is T(x_i) for each X_i
moment_contributions = np.hstack((X, (X - means) ** 2)) if match_variances else X
def objective_and_grad(l):
dot = np.dot(moment_contributions, l)
lse = scipy.misc.logsumexp(dot, b=prob)
# value of the objective function
obj_value = lse - np.dot(l, moments)
# gradient of objective function
dot_max = dot.max(axis=0)
exp_term = np.sum(moment_contributions * (prob * np.exp(dot - dot_max)).reshape(-1, 1), axis=0)
log_numerator = np.log(exp_term) + dot_max
grad_value = np.exp(log_numerator - lse) - moments
return obj_value, grad_value
result = scipy.optimize.minimize(
objective_and_grad, jac=True, x0=np.ones_like(moments), method='BFGS')
if not result['success']:
raise NotSatisfiableError()
dot = np.dot(moment_contributions, result['x'])
log_denominator = scipy.misc.logsumexp(dot, b=prob)
weights = prob * np.exp(dot - log_denominator)
if not np.all(np.isfinite(weights)):
raise NotSatisfiableError()
weights = weights / np.sum(weights)
return weights
if __name__ == '__main__':
np.random.seed(10)
import matplotlib.pyplot as pp
length = 100
X = np.random.uniform(low=-5, high=5, size=(length, 1))
weights = discrete_approx_mvn(X, [0], [2])
pp.title('dot(weights, X) = %.5f, dot(weights, X**2)=%f' %
(np.dot(weights, X), np.dot(weights, X ** 2)))
for i in range(length):
pp.plot([X[i, 0], X[i, 0]], [0, weights[i]])
pp.figure()
X = np.random.uniform(low=-2, high=2, size=(length, 1))
weights = discrete_approx_mvn(X, [0], [1])
pp.title('dot(weights, X) = %.5f, dot(weights, X**2)=%f' %
(np.dot(weights, X), np.dot(weights, X ** 2)))
for i in range(length):
pp.plot([X[i, 0], X[i, 0]], [0, weights[i]])
pp.show()
| lgpl-2.1 |
drakipovic/deep-learning | 1. labos/fcann.py | 1 | 1825 | import numpy as np
from sklearn.preprocessing import OneHotEncoder
import matplotlib.pyplot as plt
from data import sample_gmm_2d, eval_perf_binary, graph_data, graph_surface
def forward(X, w_1, w_2, b_1, b_2):
s_1 = np.dot(X, w_1) + b_1
h_1 = np.maximum(s_1, np.zeros(s_1.shape))
s_2 = np.dot(h_1, w_2) + b_2
es = np.exp(s_2)
probs = es / np.sum(es, axis=1, keepdims=True)
return probs, h_1
def fcann_train(X, y, C, iterations=10000, delta=0.003, l=1e-3, hl_size=5):
#tezine prvog sloja
w_1 = np.random.randn(X.shape[1], hl_size)
b_1 = np.random.randn(1, hl_size)
#tezine drugog sloja
w_2 = np.random.randn(hl_size, C)
b_2 = np.random.randn(1, C)
for i in range(iterations):
probs, h_1 = forward(X, w_1, w_2, b_1, b_2)
gs2 = probs - y
grad_w2 = np.dot(h_1.T, gs2)
grad_b2 = np.sum(gs2, axis=0)
gh1 = np.dot(gs2, w_2.T)
gs1 = gh1 * (h_1 > 0)
grad_w1 = np.dot(X.T, gs1)
grad_b1 = np.sum(gs1, axis=0)
w_1 += -delta * grad_w1
w_2 += -delta * grad_w2
b_1 += -delta * grad_b1
b_2 += -delta * grad_b2
return w_1, w_2, b_1, b_2
if __name__ == "__main__":
X, y = sample_gmm_2d(6, 4, 30)
C = len(np.lib.arraysetops.unique(y))
#X = np.array([[1, 2], [2, 3], [4, 5]])
#y = np.array([0, 1, 1])[np.newaxis]
y_ = OneHotEncoder().fit_transform(y).toarray()
w_1, w_2, b_1, b_2 = fcann_train(X, y_, C)
probs, _ = forward(X, w_1, w_2, b_1, b_2)
Y = np.argmax(probs, axis=1)
y = y.flatten()
print eval_perf_binary(Y, y)
bbox = (np.min(X, axis=0), np.max(X, axis=0))
graph_surface(lambda x: np.argmax(forward(x, w_1, w_2, b_1, b_2)[0], axis=1), bbox, offset=0.5)
graph_data(X, y, Y) | mit |
tody411/InverseToon | inversetoon/geometry/ellipsoids.py | 1 | 5349 |
# -*- coding: utf-8 -*-
## @package inversetoon.geometry.ellipsoids
#
# Implementation of 2D ellipsoids.
# @author tody
# @date 2015/08/13
import math
import numpy as np
from numpy.linalg import eig, inv
## Ellipsoids
class Ellipsoids:
## Constructor
def __init__(self, points = []):
self._A = None
self._center = None
self._phi = None
self._dU = None
self._dV = None
self._axes = None
self._thetas = None
if len(points) > 0:
self.fit(points)
def fit(self, points):
self.fitParameters(points)
self.computeCenter()
self.computeRotation()
self.computeAxes()
self.computeThetas(points)
def fitParameters(self, points):
points = np.array(points)
x = points[:, 0]
y = points[:, 1]
D = np.array([x * x, x * y, y * y, x, y, np.ones_like(x)])
S = np.dot(D, D.T)
C = np.zeros((6, 6))
C[0, 2] = C[2, 0] = 2
C[1, 1] = -1
eigen_values, eigen_vectors = eig(np.dot(inv(S), C))
maxID = np.argmax(np.abs(eigen_values))
self._A = eigen_vectors[:, maxID]
def computeCenter(self):
A = self._A
a, b, c, d, f, g = A[0], A[1] / 2, A[2], A[3] / 2, A[4] / 2, A[5]
num = b * b - a * c
x0 = (c * d - b * f) / num
y0 = (a * f - b * d) / num
self._center = np.array([x0, y0])
def computeRotation(self):
A = self._A
a, b, c, d, f, g = A[0], A[1] / 2, A[2], A[3] / 2, A[4] / 2, A[5]
phi = 0.5 * np.arctan(2 * b / (a - c))
self._phi = phi
dU = np.array([np.cos(phi), np.sin(phi)])
dV = np.array([-np.sin(phi), np.cos(phi)])
self._dU = dU
self._dV = dV
def computeAxes(self):
A = self._A
a, b, c, d, f, g = A[0], A[1] / 2, A[2], A[3] / 2, A[4] / 2, A[5]
up = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)
down1=(b*b-a*c)*( (c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
down2=(b*b-a*c)*( (a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
res1 = res2 = up
res1 /= down1
res2 /= down2
res1=np.max([0.00001, res1])
res1=np.sqrt(res1)
res2=np.max([0.00001, res2])
res2=np.sqrt(res2)
self._axes = [res1, res2]
def computeTheta(self, p):
a, b = self._axes
phi = self._phi
c = self._center
dU = np.array([np.cos(phi), np.sin(phi)])
dV = np.array([-np.sin(phi), np.cos(phi)])
cp = p - c
u, v = [np.dot(cp, dU), np.dot(cp, dV)]
u /= a
v /= b
theta = np.arctan2(v, u)
return theta
def computeThetas(self, points):
self._thetas = [self.computeTheta(p) for p in points]
def pointAt(self, t):
a, b = self._axes
dU = self._dU
dV = self._dV
c = self._center
p = c + a * np.cos(t) * dU + b * np.sin(t) * dV
return p
def pointsAt(self, t):
a, b = self._axes
dU = self._dU
dV = self._dV
a_dU = a * dU
b_dV = b * dV
c = self._center
U = np.array([np.cos(t), np.cos(t)]).T
V = np.array([np.sin(t), np.sin(t)]).T
P = c + U * a_dU + V * b_dV
return P
def points(self):
return self.pointsAt(self._thetas)
def curvatureAt(self, t):
a, b = self._axes
u = np.cos(t)
v = np.sin(t)
up = a * b
down = b * b * u * u + a * a * v * v
down = math.pow(down, 1.5)
k = up / down
return k
def curvatures(self):
K = [self.curvatureAt(t) for t in self._thetas]
return K
def plotCenter(self, plt, color="g"):
plt.scatter(self._center[0], self._center[1], color=color)
def plotAxes(self, plt, color=[0.0, 0.2, 0.2]):
a, b = self._axes
dU = self._dU
dV = self._dV
a_dU = a * dU
b_dV = b * dV
c = self._center
a_axis = np.array([c - a_dU, c + a_dU])
b_axis = np.array([c - b_dV, c + b_dV])
plt.plot(a_axis[:,0], a_axis[:,1], "-", color=color)
plt.plot(b_axis[:,0], b_axis[:,1], "-", color=color)
def plotEllipsoids(self, plt, color="r"):
P = self.points()
plt.plot(P[:,0], P[:,1], "-", color=color)
def plotCurvatures(self, plt):
K = self.curvatures()
x = np.arange(len(K))
plt.plot(x, K, "-")
if __name__ == '__main__':
import matplotlib.pyplot as plt
from inversetoon.plot.window import showMaximize
a, b = 4.0, 3.0
c = 0.2 * np.random.rand(2)
t_min, t_max = [0.2 * np.pi, 1.2 * np.pi]
t = np.linspace(t_min, t_max, 100)
phi = 0.3 * np.pi
dU = np.array([np.cos(phi), np.sin(phi)])
dV = np.array([-np.sin(phi), np.cos(phi)])
U = np.array([np.cos(t), np.cos(t)]).T
V = np.array([np.sin(t), np.sin(t)]).T
points = c + a * U * dU + b * V * dV
points[:, 0] += 0.1 * np.random.rand(len(t))
points[:, 1] += 0.1 * np.random.rand(len(t))
ax = plt.subplot(121)
ax.set_aspect('1.0')
ax.scatter(points[:, 0], points[:, 1])
el = Ellipsoids(points)
el.plotCenter(ax)
el.plotAxes(ax)
el.plotEllipsoids(ax)
ax2 = plt.subplot(122)
el.plotCurvatures(ax2)
showMaximize() | mit |
mjudsp/Tsallis | examples/linear_model/plot_iris_logistic.py | 119 | 1679 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
YihaoLu/statsmodels | statsmodels/sandbox/examples/thirdparty/findow_0.py | 33 | 2147 | # -*- coding: utf-8 -*-
"""A quick look at volatility of stock returns for 2009
Just an exercise to find my way around the pandas methods.
Shows the daily rate of return, the square of it (volatility) and
a 5 day moving average of the volatility.
No guarantee for correctness.
Assumes no missing values.
colors of lines in graphs are not great
uses DataFrame and WidePanel to hold data downloaded from yahoo using matplotlib.
I haven't figured out storage, so the download happens at each run
of the script.
getquotes is from pandas\examples\finance.py
Created on Sat Jan 30 16:30:18 2010
Author: josef-pktd
"""
from statsmodels.compat.python import lzip
import numpy as np
import matplotlib.finance as fin
import matplotlib.pyplot as plt
import datetime as dt
import pandas as pa
def getquotes(symbol, start, end):
quotes = fin.quotes_historical_yahoo(symbol, start, end)
dates, open, close, high, low, volume = lzip(*quotes)
data = {
'open' : open,
'close' : close,
'high' : high,
'low' : low,
'volume' : volume
}
dates = pa.Index([dt.datetime.fromordinal(int(d)) for d in dates])
return pa.DataFrame(data, index=dates)
start_date = dt.datetime(2009, 1, 1)
end_date = dt.datetime(2010, 1, 1)
mysym = ['msft', 'ibm', 'goog']
indexsym = ['gspc', 'dji']
# download data
dmall = {}
for sy in mysym:
dmall[sy] = getquotes(sy, start_date, end_date)
# combine into WidePanel
pawp = pa.WidePanel.fromDict(dmall)
print(pawp.values.shape)
# select closing prices
paclose = pawp.getMinorXS('close')
# take log and first difference over time
paclose_ratereturn = paclose.apply(np.log).diff()
plt.figure()
paclose_ratereturn.plot()
plt.title('daily rate of return')
# square the returns
paclose_ratereturn_vol = paclose_ratereturn.apply(lambda x:np.power(x,2))
plt.figure()
plt.title('volatility (with 5 day moving average')
paclose_ratereturn_vol.plot()
# use convolution to get moving average
paclose_ratereturn_vol_mov = paclose_ratereturn_vol.apply(
lambda x:np.convolve(x,np.ones(5)/5.,'same'))
paclose_ratereturn_vol_mov.plot()
#plt.show()
| bsd-3-clause |
adamgreenhall/scikit-learn | sklearn/grid_search.py | 32 | 36586 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin, ChangedBehaviorWarning
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, default=3
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if estimator is a classifier
and the target y is binary or multiclass, or the number
of folds in KFold otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, optional
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if estimator is a classifier
and the target y is binary or multiclass, or the number
of folds in KFold otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
herilalaina/scikit-learn | examples/plot_missing_values.py | 35 | 3059 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better
results than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via
cross-validation. Sometimes dropping rows or using marker values is
more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = int(np.floor(n_samples * missing_rate))
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
Nilesh4145/Pseudoranger_trials | Models/lin_reg.py | 1 | 1567 | import csv
import numpy
from keras.layers import Dense, Input
from keras.models import Model, Sequential
import matplotlib.pyplot as plt
reader = csv.reader(open("track1489492822.csv", "r"), delimiter = ",")
l = list(reader)
x_train = []
x_res = []
y_train = []
y_res = []
for i in l[1:]:
x_train.append(i[4])
x_res.append(i[1])
y_train.append(i[5])
y_res.append(i[2])
X_train = numpy.array(x_train).astype("float64")
X_res = numpy.array(x_res).astype("float64")
Y_train = numpy.array(y_train).astype("float64")
Y_res = numpy.array(y_res).astype("float64")
model1= Sequential()
model1.add(Dense(12, input_dim=1, init='uniform', activation='linear'))
model1.add(Dense(1, init='uniform', activation='linear'))
model1.compile(loss='cosine_proximity', optimizer='sgd')
model1.fit(X_train, X_res, nb_epoch=50, batch_size=1)
new_reader = csv.reader(open("track1489609280.csv", "r"), delimiter = "\t")
l1 = list(new_reader)
test_x = []
test_y = []
for i in l1[1:]:
test_x.append(i[4])
test_y.append(i[5])
Test_x = numpy.array(test_x).astype("float64")
Test_y = numpy.array(test_y).astype("float64")
pred_x = model1.predict(Test_x)
#model1.fit(Y_train, Y_res, nb_epoch=50, batch_size=1)
#pred_y = model2.predict(Test_y)
a_x = []
a_y = []
for i in range(len(pred_x)):
a_x.append(pred_x[i]+Test_x[i])
a_y.append(pred_x[i]+Test_y[i])
fig = plt.figure()
sub1 = fig.add_subplot(221)
sub1.set_title('Un-filtered Orbit plot')
sub1.plot(test_x)
sub2 = fig.add_subplot(223)
sub2.set_title('Filtered Orbit plot')
sub2.plot(a_x)
plt.tight_layout()
plt.show() | mit |
PedroTrujilloV/nest-simulator | pynest/examples/csa_topology_example.py | 7 | 2628 | # -*- coding: utf-8 -*-
#
# csa_topology_example.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
This example shows a brute-force way of specifying connections
between NEST Topology layers using Connection Set Algebra.
We are working on better ways to do this.
"""
import nest
import nest.topology as topo
import matplotlib.pyplot as plt
try:
import csa
haveCSA = True
except ImportError:
haveCSA = False
def geometryFunction(topologyLayer):
"""
This factory returns a CSA-style geometry function for the given layer.
The function returned will return for each CSA-index the position in
space of the given neuron as a 2- or 3-element list.
Note: This function stores a copy of the neuron positions internally,
entailing memory overhead.
"""
positions = topo.GetPosition(nest.GetLeaves(topologyLayer)[0])
def geometry_function(idx):
"Return position of neuron with given CSA-index."
return positions[idx]
return geometry_function
def csa_topology_example():
# layers have 20x20 neurons and extent 1 x 1
pop1 = topo.CreateLayer({'elements': 'iaf_neuron',
'rows': 20, 'columns': 20})
pop2 = topo.CreateLayer({'elements': 'iaf_neuron',
'rows': 20, 'columns': 20})
# create CSA-style geometry functions and metric
g1 = geometryFunction(pop1)
g2 = geometryFunction(pop2)
d = csa.euclidMetric2d(g1, g2)
# Gaussian connectivity profile, sigma = 0.2, cutoff at 0.5
cs = csa.cset(csa.random * (csa.gaussian(0.2, 0.5) * d), 10000.0, 1.0)
# create connections
nest.CGConnect(pop1, pop2, cs, {"weight": 0, "delay": 1})
# show targets of center neuron
topo.PlotTargets(topo.FindCenterElement(pop1), pop2)
if __name__ == "__main__":
if haveCSA:
nest.ResetKernel()
csa_topology_example()
else:
print("This example requires CSA to be installed in order to run!")
| gpl-2.0 |
erjerison/adaptability | github_submission/qtl_detection_one_trait.py | 1 | 31004 | import qtl_detection_adaptability
import numpy
import matplotlib.pylab as pt
import regression
##Updated 11-2-2016 to include a second epistasis model, 'detect_qtls_with_epistasis2'
##Updated 12-21-2016 to calculate confidence intervals based on LOD drop-off during QTL detection
##Updated 1-18-2016 to include a function for finding QTLs on the two environments separately
def detect_qtls(genotype_mat, phenotype_list_sc, phenotype_list_ypd, helper_matrix_sc, helper_matrix_ypd, pops_per_seg_sc, pops_per_seg_ypd):
#Initialize residuals as phenotypes; format is [<phenotype>,<phenotype**2>,pops_per_seg]
n_segs = len(pops_per_seg_sc)
phenotype_means_sc = numpy.dot(phenotype_list_sc, helper_matrix_sc)/pops_per_seg_sc
phenotype_second_moments_sc = numpy.dot(phenotype_list_sc**2, helper_matrix_sc)/pops_per_seg_sc
phenotypes_sc = numpy.append(phenotype_means_sc.reshape((n_segs,1)), phenotype_second_moments_sc.reshape((n_segs,1)), axis=1)
phenotypes_sc = numpy.append(phenotypes_sc, pops_per_seg_sc.reshape((n_segs,1)), axis = 1)
phenotype_means_ypd = numpy.dot(phenotype_list_ypd, helper_matrix_ypd)/pops_per_seg_ypd
phenotype_second_moments_ypd = numpy.dot(phenotype_list_ypd**2, helper_matrix_ypd)/pops_per_seg_ypd
phenotypes_ypd = numpy.append(phenotype_means_ypd.reshape((n_segs,1)), phenotype_second_moments_ypd.reshape((n_segs,1)), axis=1)
phenotypes_ypd = numpy.append(phenotypes_ypd, pops_per_seg_ypd.reshape((n_segs,1)), axis = 1)
expanded_genotype_mat_sc = numpy.dot(helper_matrix_sc, genotype_mat)
expanded_genotype_mat_ypd = numpy.dot(helper_matrix_ypd, genotype_mat)
lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_sc)
lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_ypd)
lods = lods_sc + lods_ypd
top_lod = numpy.nanmax(lods)
top_lod_idx = numpy.nanargmax(lods)
##Confidence intervals around this peak
intervals = []
relative_height_lb = top_lod
relative_height_ub = top_lod
lb_index = top_lod_idx
ub_index = top_lod_idx
consecutive_low_scores = 0
while consecutive_low_scores < 40:
lb_index -= 1
relative_height_lb = lods[lb_index]
if relative_height_lb < top_lod - 1.5:
consecutive_low_scores += 1
else:
consecutive_low_scores = 0
if consecutive_low_scores == 1:
first_consecutive_low_idx = lb_index
consecutive_low_scores = 0
while consecutive_low_scores < 40:
ub_index += 1
relative_height_ub = lods[ub_index]
if relative_height_ub < top_lod - 1.5:
consecutive_low_scores += 1
else:
consecutive_low_scores = 0
if consecutive_low_scores == 1:
first_consecutive_high_idx = ub_index
bootstrapped_lods = []
n_iter = 1000
for i in xrange(0,n_iter):
#print i
#print "Generating permutation..."
permutation = numpy.random.permutation(numpy.arange(n_segs))
permuted_phenotype_matrix_ypd = phenotypes_ypd[permutation,:]
permuted_phenotype_matrix_sc = phenotypes_sc[permutation,:]
permuted_lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_ypd)
permuted_lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_sc)
permuted_lods = permuted_lods_sc + permuted_lods_ypd
#print permuted_lods
bootstrapped_lods.append(numpy.nanmax(permuted_lods))
#print bootstrapped_lods
sig_threshold = numpy.sort(bootstrapped_lods)[::-1][49]
#sig_threshold = numpy.sort(bootstrapped_lods)[::-1][99]
# pt.plot(lods)
# pt.plot(permuted_lods,'g')
# pt.axhline(sig_threshold, 0, 1, 'k')
# pt.show()
print 'sig_threshold =', sig_threshold
#print chromosome_peak_lod_idxs
if top_lod > sig_threshold:
new_QTLs = [top_lod_idx]
intervals.append([first_consecutive_low_idx, first_consecutive_high_idx])
print intervals
current_QTLs = new_QTLs
all_QTLs_found = False
while all_QTLs_found ==False:
print current_QTLs
#Fit a linear model using the current QTL list--or a nonlinear model
qtl_matrix_sc = expanded_genotype_mat_sc[:,current_QTLs]
qtl_matrix_ypd = expanded_genotype_mat_ypd[:,current_QTLs]
beta_sc, betanorm_sc, F_sc = regression.ordinary_linear_regression(phenotype_list_sc,qtl_matrix_sc)
beta_ypd, betanorm_ypd, F_ypd = regression.ordinary_linear_regression(phenotype_list_ypd,qtl_matrix_ypd)
residuals_sc = phenotype_list_sc - F_sc(numpy.dot(beta_sc,qtl_matrix_sc.T))
residuals_ypd = phenotype_list_ypd - F_ypd(numpy.dot(beta_ypd,qtl_matrix_ypd.T))
residuals_means_sc = numpy.dot(residuals_sc, helper_matrix_sc)/pops_per_seg_sc
residuals_second_moments_sc = numpy.dot(residuals_sc**2, helper_matrix_sc)/pops_per_seg_sc
phenotypes_new_sc = numpy.append(residuals_means_sc.reshape((n_segs,1)),residuals_second_moments_sc.reshape((n_segs,1)), axis=1)
phenotypes_new_sc = numpy.append(phenotypes_new_sc, pops_per_seg_sc.reshape((n_segs,1)), axis = 1)
residuals_means_ypd = numpy.dot(residuals_ypd, helper_matrix_ypd)/pops_per_seg_ypd
residuals_second_moments_ypd = numpy.dot(residuals_ypd**2, helper_matrix_ypd)/pops_per_seg_ypd
phenotypes_new_ypd = numpy.append(residuals_means_ypd.reshape((n_segs,1)), residuals_second_moments_ypd.reshape((n_segs,1)), axis=1)
phenotypes_new_ypd = numpy.append(phenotypes_new_ypd, pops_per_seg_ypd.reshape((n_segs,1)), axis = 1)
lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_new_sc)
lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_new_ypd)
lods = lods_sc + lods_ypd
top_lod = numpy.nanmax(lods)
top_lod_idx = numpy.nanargmax(lods)
relative_height_lb = top_lod
relative_height_ub = top_lod
lb_index = top_lod_idx
ub_index = top_lod_idx
consecutive_low_scores = 0
while consecutive_low_scores < 20:
lb_index -= 1
relative_height_lb = lods[lb_index]
if relative_height_lb < top_lod - 2:
consecutive_low_scores += 1
else:
consecutive_low_scores = 0
if consecutive_low_scores == 1:
first_consecutive_low_idx = lb_index
consecutive_low_scores = 0
while consecutive_low_scores < 20:
ub_index += 1
relative_height_ub = lods[ub_index]
if relative_height_ub < top_lod - 2:
consecutive_low_scores += 1
else:
consecutive_low_scores = 0
if consecutive_low_scores == 1:
first_consecutive_high_idx = ub_index
bootstrapped_lods = []
n_iter = 1000
for i in xrange(0,n_iter):
permutation = numpy.random.permutation(numpy.arange(n_segs))
permuted_phenotype_matrix_ypd = phenotypes_new_ypd[permutation,:]
permuted_phenotype_matrix_sc = phenotypes_new_sc[permutation,:]
permuted_lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_ypd)
permuted_lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_sc)
permuted_lods = permuted_lods_sc + permuted_lods_ypd
bootstrapped_lods.append(numpy.nanmax(permuted_lods))
sig_threshold = numpy.sort(bootstrapped_lods)[-50] #p < .05
#sig_threshold = numpy.sort(bootstrapped_lods)[-100] #p < .05
print 'sig_threshold =', sig_threshold
#pt.plot(lods)
#pt.plot(permuted_lods,'g')
#pt.axhline(sig_threshold, 0, 1, 'k')
#pt.show()
if top_lod > sig_threshold:
current_QTLs.append(top_lod_idx)
intervals.append([first_consecutive_low_idx, first_consecutive_high_idx])
else:
print 'all_QTLs_found'
all_QTLs_found = True
return current_QTLs, beta_ypd*betanorm_ypd, beta_sc*betanorm_sc, numpy.array(intervals)
def detect_qtls_one_envt(genotype_mat, phenotype_list, helper_matrix, pops_per_seg):
#Initialize residuals as phenotypes; format is [<phenotype>,<phenotype**2>,pops_per_seg]
n_segs = len(pops_per_seg)
phenotype_means = numpy.dot(phenotype_list, helper_matrix)/pops_per_seg
phenotype_second_moments = numpy.dot(phenotype_list**2, helper_matrix)/pops_per_seg
phenotypes = numpy.append(phenotype_means.reshape((n_segs,1)), phenotype_second_moments.reshape((n_segs,1)), axis=1)
phenotypes = numpy.append(phenotypes, pops_per_seg.reshape((n_segs,1)), axis = 1)
expanded_genotype_mat = numpy.dot(helper_matrix, genotype_mat)
lods = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes)
top_lod = numpy.nanmax(lods)
top_lod_idx = numpy.nanargmax(lods)
##Confidence intervals around this peak
intervals = []
relative_height_lb = top_lod
relative_height_ub = top_lod
lb_index = top_lod_idx
ub_index = top_lod_idx
consecutive_lowores = 0
while consecutive_lowores < 20:
lb_index -= 1
relative_height_lb = lods[lb_index]
if relative_height_lb < top_lod - 1.5:
consecutive_lowores += 1
else:
consecutive_lowores = 0
if consecutive_lowores == 1:
first_consecutive_low_idx = lb_index
consecutive_lowores = 0
while consecutive_lowores < 20:
ub_index += 1
relative_height_ub = lods[ub_index]
if relative_height_ub < top_lod - 1.5:
consecutive_lowores += 1
else:
consecutive_lowores = 0
if consecutive_lowores == 1:
first_consecutive_high_idx = ub_index
bootstrapped_lods = []
n_iter = 1000
for i in xrange(0,n_iter):
#print i
#print "Generating permutation..."
permutation = numpy.random.permutation(numpy.arange(n_segs))
permuted_phenotype_matrix = phenotypes[permutation,:]
permuted_lods = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix)
#print permuted_lods
bootstrapped_lods.append(numpy.nanmax(permuted_lods))
#print bootstrapped_lods
sig_threshold = numpy.sort(bootstrapped_lods)[::-1][49]
#sig_threshold = numpy.sort(bootstrapped_lods)[::-1][99]
# pt.plot(lods)
# pt.plot(permuted_lods,'g')
# pt.axhline(sig_threshold, 0, 1, 'k')
# pt.show()
print 'sig_threshold =', sig_threshold
#print chromosome_peak_lod_idxs
if top_lod > sig_threshold:
new_QTLs = [top_lod_idx]
intervals.append([first_consecutive_low_idx, first_consecutive_high_idx])
print intervals
current_QTLs = new_QTLs
all_QTLs_found = False
while all_QTLs_found ==False:
print current_QTLs
#Fit a linear model using the current QTL list--or a nonlinear model
qtl_matrix = expanded_genotype_mat[:,current_QTLs]
beta, betanorm, F = regression.ordinary_linear_regression(phenotype_list,qtl_matrix)
residuals = phenotype_list - F(numpy.dot(beta,qtl_matrix.T))
residuals_means = numpy.dot(residuals, helper_matrix)/pops_per_seg
residuals_second_moments = numpy.dot(residuals**2, helper_matrix)/pops_per_seg
phenotypes_new = numpy.append(residuals_means.reshape((n_segs,1)),residuals_second_moments.reshape((n_segs,1)), axis=1)
phenotypes_new = numpy.append(phenotypes_new, pops_per_seg.reshape((n_segs,1)), axis = 1)
lods = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_new)
top_lod = numpy.nanmax(lods)
top_lod_idx = numpy.nanargmax(lods)
relative_height_lb = top_lod
relative_height_ub = top_lod
lb_index = top_lod_idx
ub_index = top_lod_idx
consecutive_lowores = 0
while consecutive_lowores < 20:
lb_index -= 1
relative_height_lb = lods[lb_index]
if relative_height_lb < top_lod - 1.5:
consecutive_lowores += 1
else:
consecutive_lowores = 0
if consecutive_lowores == 1:
first_consecutive_low_idx = lb_index
consecutive_lowores = 0
while consecutive_lowores < 20:
ub_index += 1
relative_height_ub = lods[ub_index]
if relative_height_ub < top_lod - 1.5:
consecutive_lowores += 1
else:
consecutive_lowores = 0
if consecutive_lowores == 1:
first_consecutive_high_idx = ub_index
bootstrapped_lods = []
n_iter = 1000
for i in xrange(0,n_iter):
permutation = numpy.random.permutation(numpy.arange(n_segs))
permuted_phenotype_matrix = phenotypes_new[permutation,:]
permuted_lods = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix)
bootstrapped_lods.append(numpy.nanmax(permuted_lods))
sig_threshold = numpy.sort(bootstrapped_lods)[-50] #p < .05
#sig_threshold = numpy.sort(bootstrapped_lods)[-100] #p < .05
print 'sig_threshold =', sig_threshold
#pt.plot(lods)
#pt.plot(permuted_lods,'g')
#pt.axhline(sig_threshold, 0, 1, 'k')
#pt.show()
if top_lod > sig_threshold:
current_QTLs.append(top_lod_idx)
intervals.append([first_consecutive_low_idx, first_consecutive_high_idx])
else:
print 'all_QTLs_found'
all_QTLs_found = True
return current_QTLs, beta*betanorm, numpy.array(intervals)
def calculate_qtl_confidence_intervals_lods(qtl_locs, genotype_mat, phenotype_sc, phenotype_ypd, helper_matrix_sc=numpy.identity(229), helper_matrix_ypd=numpy.identity(229), pops_per_seg_sc=numpy.ones((229,)), pops_per_seg_ypd=numpy.ones((229,))):
#This function takes an arbitrary number of phenotypes (columns of phenotype_mat) and assumes qtls have been detected on them jointly
#evol_env_vector records which environment populations with a given phenotype evolved, if applicable; 1=sc at 37 C, 0=ypd at 30 C.
#Confidence intervals are calculated based on the location at which the LOD score (log-likelihood) falls to half its maximum value.
#Initialize residuals as phenotypes; format is [<phenotype>,<phenotype**2>,pops_per_seg]
n_segs = len(pops_per_seg_sc)
#n_phenotypes = len(evol_env_vector)
n_loci = genotype_mat.shape[1]
lod_idxs = []
intervals = []
real_qtl_locs = []
##Set up phenotype matrixes
phenotype_means_sc = numpy.dot(phenotype_sc, helper_matrix_sc)/pops_per_seg_sc
phenotype_second_moments_sc = numpy.dot(phenotype_sc**2, helper_matrix_sc)/pops_per_seg_sc
phenotypes_sc = numpy.append(phenotype_means_sc.reshape((n_segs,1)), phenotype_second_moments_sc.reshape((n_segs,1)), axis=1)
phenotypes_sc = numpy.append(phenotypes_sc, pops_per_seg_sc.reshape((n_segs,1)), axis = 1)
phenotype_means_ypd = numpy.dot(phenotype_ypd, helper_matrix_ypd)/pops_per_seg_ypd
phenotype_second_moments_ypd = numpy.dot(phenotype_ypd**2, helper_matrix_ypd)/pops_per_seg_ypd
phenotypes_ypd = numpy.append(phenotype_means_ypd.reshape((n_segs,1)), phenotype_second_moments_ypd.reshape((n_segs,1)), axis=1)
phenotypes_ypd = numpy.append(phenotypes_ypd, pops_per_seg_ypd.reshape((n_segs,1)), axis = 1)
lods = numpy.zeros((n_loci,))
lods += qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_sc)
lods += qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_ypd)
for qtl in qtl_locs:
if qtl > 20:
real_peak = qtl - 20 + numpy.nanargmax(lods[qtl-20:qtl+20])
else:
real_peak = numpy.nanargmax(lods[0:qtl+20])
peak_height = lods[real_peak]
relative_height_lb = peak_height
relative_height_ub = peak_height
lb_index = real_peak
ub_index = real_peak
print real_peak
print peak_height
while relative_height_lb > .5*peak_height:
lb_index -= 1
relative_height_lb = lods[lb_index]
while relative_height_ub > .5*peak_height:
ub_index += 1
relative_height_ub = lods[ub_index]
intervals.append([lb_index, ub_index])
real_qtl_locs.append(real_peak)
return real_qtl_locs, numpy.array(intervals)
def detect_qtls_above_fitness(genotype_mat, phenotype_list_sc, phenotype_list_ypd, initfit_list_sc, initfit_list_ypd, helper_matrix_sc, helper_matrix_ypd, pops_per_seg_sc, pops_per_seg_ypd):
#Initialize residuals as phenotypes; format is [<phenotype>,<phenotype**2>,pops_per_seg]
n_segs = len(pops_per_seg_sc)
n_pops_sc = sum(pops_per_seg_sc)
n_pops_ypd = sum(pops_per_seg_ypd)
expanded_genotype_mat_sc = numpy.dot(helper_matrix_sc, genotype_mat)
expanded_genotype_mat_ypd = numpy.dot(helper_matrix_ypd, genotype_mat)
#symmetrize the genotype matrix
genotype_mat = 1./2.*(genotype_mat - (1 - genotype_mat))
expanded_genotype_mat_sc = 1./2.*(expanded_genotype_mat_sc - (1 - expanded_genotype_mat_sc))
expanded_genotype_mat_ypd = 1./2.*(expanded_genotype_mat_ypd - (1 - expanded_genotype_mat_ypd))
#Initial dependent variables are initial fitnesses
X_sc = numpy.dot(helper_matrix_sc, initfit_list_sc).reshape((n_pops_sc,1))
X_ypd = numpy.dot(helper_matrix_ypd, initfit_list_ypd).reshape((n_pops_ypd,1))
current_QTLs = []
all_QTLs_found = False
while all_QTLs_found ==False:
print current_QTLs
#Fit a linear model using the current QTL list--or a nonlinear model
qtl_matrix_sc = numpy.append(X_sc, expanded_genotype_mat_sc[:,current_QTLs], axis = 1)
qtl_matrix_ypd = numpy.append(X_ypd, expanded_genotype_mat_ypd[:,current_QTLs], axis = 1)
beta_sc, betanorm_sc, F_sc = regression.ordinary_linear_regression(phenotype_list_sc,qtl_matrix_sc)
beta_ypd, betanorm_ypd, F_ypd = regression.ordinary_linear_regression(phenotype_list_ypd,qtl_matrix_ypd)
residuals_sc = phenotype_list_sc - F_sc(numpy.dot(beta_sc,qtl_matrix_sc.T))
residuals_ypd = phenotype_list_ypd - F_ypd(numpy.dot(beta_ypd,qtl_matrix_ypd.T))
residuals_means_sc = numpy.dot(residuals_sc, helper_matrix_sc)/pops_per_seg_sc
residuals_second_moments_sc = numpy.dot(residuals_sc**2, helper_matrix_sc)/pops_per_seg_sc
phenotypes_new_sc = numpy.append(residuals_means_sc.reshape((n_segs,1)),residuals_second_moments_sc.reshape((n_segs,1)), axis=1)
phenotypes_new_sc = numpy.append(phenotypes_new_sc, pops_per_seg_sc.reshape((n_segs,1)), axis = 1)
residuals_means_ypd = numpy.dot(residuals_ypd, helper_matrix_ypd)/pops_per_seg_ypd
residuals_second_moments_ypd = numpy.dot(residuals_ypd**2, helper_matrix_ypd)/pops_per_seg_ypd
phenotypes_new_ypd = numpy.append(residuals_means_ypd.reshape((n_segs,1)), residuals_second_moments_ypd.reshape((n_segs,1)), axis=1)
phenotypes_new_ypd = numpy.append(phenotypes_new_ypd, pops_per_seg_ypd.reshape((n_segs,1)), axis = 1)
lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_new_sc)
lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_new_ypd)
lods = lods_sc + lods_ypd
#pt.plot(lods)
top_lod = numpy.nanmax(lods)
top_lod_idx = numpy.nanargmax(lods)
#print top_lod
bootstrapped_lods = []
n_iter = 1000
for i in xrange(0,n_iter):
permutation = numpy.random.permutation(numpy.arange(n_segs))
permuted_phenotype_matrix_ypd = phenotypes_new_ypd[permutation,:]
permuted_phenotype_matrix_sc = phenotypes_new_sc[permutation,:]
permuted_lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_ypd)
permuted_lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_sc)
permuted_lods = permuted_lods_sc + permuted_lods_ypd
bootstrapped_lods.append(numpy.nanmax(permuted_lods))
sig_threshold = numpy.sort(bootstrapped_lods)[-49]
print 'sig_threshold =', sig_threshold
#print numpy.sort(bootstrapped_lods)
#pt.plot(permuted_lods,'g')
#pt.axhline(sig_threshold,0,1,'k')
#pt.show()
if top_lod > sig_threshold:
current_QTLs.append(top_lod_idx)
else:
print 'all_QTLs_found'
all_QTLs_found = True
return current_QTLs, beta_ypd*betanorm_ypd, beta_sc*betanorm_sc
def detect_qtls_with_epistasis(genotype_mat, phenotype_list_sc, phenotype_list_ypd, helper_matrix_sc, helper_matrix_ypd, pops_per_seg_sc, pops_per_seg_ypd):
#Initialize residuals as phenotypes; format is [<phenotype>,<phenotype**2>,pops_per_seg]
n_segs = len(pops_per_seg_sc)
n_pops_sc = sum(pops_per_seg_sc)
n_pops_ypd = sum(pops_per_seg_ypd)
expanded_genotype_mat_sc = numpy.dot(helper_matrix_sc, genotype_mat)
expanded_genotype_mat_ypd = numpy.dot(helper_matrix_ypd, genotype_mat)
#symmetrize the genotype matrix
genotype_mat = 1./2.*(genotype_mat - (1 - genotype_mat))
expanded_genotype_mat_sc = 1./2.*(expanded_genotype_mat_sc - (1 - expanded_genotype_mat_sc))
expanded_genotype_mat_ypd = 1./2.*(expanded_genotype_mat_ypd - (1 - expanded_genotype_mat_ypd))
kre33_loc = 9596
kre_genotypes = genotype_mat[:,kre33_loc]
kre_genotypes_sc = expanded_genotype_mat_sc[:,kre33_loc]
kre_genotypes_ypd = expanded_genotype_mat_ypd[:,kre33_loc]
current_main_effect_QTLs = []#new_QTLs
current_epistatic_QTLs = []
all_QTLs_found = False
while all_QTLs_found ==False:
print current_main_effect_QTLs
print current_epistatic_QTLs
#Fit a linear model using the current QTL list--or a nonlinear model
coefficient_matrix_sc = kre_genotypes_sc.reshape((n_pops_sc,1))
if len(current_main_effect_QTLs) > .5:
coefficient_matrix_sc = numpy.append(coefficient_matrix_sc, expanded_genotype_mat_sc[:,current_main_effect_QTLs], axis=1)
if len(current_epistatic_QTLs) > .5:
coefficient_matrix_sc = numpy.append(coefficient_matrix_sc, kre_genotypes_sc.reshape((n_pops_sc,1))*expanded_genotype_mat_sc[:,current_epistatic_QTLs], axis=1)
coefficient_matrix_ypd = kre_genotypes_ypd.reshape((n_pops_ypd,1))
if len(current_main_effect_QTLs) > .5:
coefficient_matrix_ypd = numpy.append(coefficient_matrix_ypd, expanded_genotype_mat_ypd[:,current_main_effect_QTLs], axis=1)
if len(current_epistatic_QTLs) > .5:
coefficient_matrix_ypd = numpy.append(coefficient_matrix_ypd, kre_genotypes_ypd.reshape((n_pops_ypd,1))*expanded_genotype_mat_ypd[:,current_epistatic_QTLs], axis=1)
beta_sc, betanorm_sc, F_sc = regression.ordinary_linear_regression(phenotype_list_sc,coefficient_matrix_sc)
beta_ypd, betanorm_ypd, F_ypd = regression.ordinary_linear_regression(phenotype_list_ypd,coefficient_matrix_ypd)
residuals_sc = phenotype_list_sc - F_sc(numpy.dot(beta_sc,coefficient_matrix_sc.T))
residuals_ypd = phenotype_list_ypd - F_ypd(numpy.dot(beta_ypd,coefficient_matrix_ypd.T))
residuals_means_sc = numpy.dot(residuals_sc, helper_matrix_sc)/pops_per_seg_sc
residuals_second_moments_sc = numpy.dot(residuals_sc**2, helper_matrix_sc)/pops_per_seg_sc
phenotypes_new_sc = numpy.append(residuals_means_sc.reshape((n_segs,1)),residuals_second_moments_sc.reshape((n_segs,1)), axis=1)
phenotypes_new_sc = numpy.append(phenotypes_new_sc, pops_per_seg_sc.reshape((n_segs,1)), axis = 1)
residuals_means_ypd = numpy.dot(residuals_ypd, helper_matrix_ypd)/pops_per_seg_ypd
residuals_second_moments_ypd = numpy.dot(residuals_ypd**2, helper_matrix_ypd)/pops_per_seg_ypd
phenotypes_new_ypd = numpy.append(residuals_means_ypd.reshape((n_segs,1)), residuals_second_moments_ypd.reshape((n_segs,1)), axis=1)
phenotypes_new_ypd = numpy.append(phenotypes_new_ypd, pops_per_seg_ypd.reshape((n_segs,1)), axis = 1)
#print phenotypes_new_sc
##Calculate lods for new main-effect loci
lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_new_sc)
lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, phenotypes_new_ypd)
lods = lods_sc + lods_ypd
# pt.figure()
# pt.plot(lods)
# pt.show()
top_lod = numpy.nanmax(lods)
top_lod_idx = numpy.nanargmax(lods)
print top_lod
##Calculate potential epistatic effects of loci already in the model
if len(current_main_effect_QTLs) > .5:
genotype_mat_interactions = kre_genotypes.reshape((n_segs,1))*genotype_mat[:,current_main_effect_QTLs]
#print genotype_mat_interactions
lods_sc_ints = qtl_detection_adaptability.calculate_lods(genotype_mat_interactions, phenotypes_new_sc)
lods_ypd_ints = qtl_detection_adaptability.calculate_lods(genotype_mat_interactions, phenotypes_new_ypd)
lods_interactions = lods_sc_ints + lods_ypd_ints
top_lod_int = numpy.nanmax(lods_interactions)
top_lod_int_idx = current_main_effect_QTLs[numpy.nanargmax(lods_interactions)]
print top_lod_int
else:
top_lod_int = 0
bootstrapped_lods = []
n_iter = 1000
for i in xrange(0,n_iter):
permutation = numpy.random.permutation(numpy.arange(n_segs))
permuted_phenotype_matrix_ypd = phenotypes_new_ypd[permutation,:]
permuted_phenotype_matrix_sc = phenotypes_new_sc[permutation,:]
permuted_lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_ypd)
permuted_lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_sc)
permuted_lods = permuted_lods_sc + permuted_lods_ypd
if len(current_main_effect_QTLs) > .5:
permuted_lods_sc_ints = qtl_detection_adaptability.calculate_lods(genotype_mat_interactions, permuted_phenotype_matrix_sc)
permuted_lods_ypd_ints = qtl_detection_adaptability.calculate_lods(genotype_mat_interactions, permuted_phenotype_matrix_ypd)
permuted_lods_interactions = permuted_lods_sc_ints + permuted_lods_ypd_ints
all_permuted_lods = numpy.append(permuted_lods, permuted_lods_interactions)
else:
all_permuted_lods = permuted_lods
bootstrapped_lods.append(numpy.nanmax(all_permuted_lods))
sig_threshold = numpy.sort(bootstrapped_lods)[-49]
print 'sig_threshold =', sig_threshold
if (top_lod > sig_threshold or top_lod_int > sig_threshold):
if top_lod > top_lod_int:
current_main_effect_QTLs.append(top_lod_idx)
elif top_lod_int > top_lod:
current_epistatic_QTLs.append(top_lod_int_idx)
else:
print 'all_QTLs_found'
all_QTLs_found = True
return current_main_effect_QTLs, current_epistatic_QTLs, beta_ypd*betanorm_ypd, beta_sc*betanorm_sc
def detect_qtls_with_epistasis2(genotype_mat, phenotype_list_sc, initfit_list_sc, phenotype_list_ypd, initfit_list_ypd, helper_matrix_sc, helper_matrix_ypd, pops_per_seg_sc, pops_per_seg_ypd):
#Initialize residuals as phenotypes; format is [<phenotype>,<phenotype**2>,pops_per_seg]
n_segs = len(pops_per_seg_sc)
n_pops_sc = sum(pops_per_seg_sc)
n_pops_ypd = sum(pops_per_seg_ypd)
expanded_genotype_mat_sc = numpy.dot(helper_matrix_sc, genotype_mat)
expanded_genotype_mat_ypd = numpy.dot(helper_matrix_ypd, genotype_mat)
#symmetrize the genotype matrix
genotype_mat = 1./2.*(genotype_mat - (1 - genotype_mat))
expanded_genotype_mat_sc = expanded_genotype_mat_sc - .5
expanded_genotype_mat_ypd = expanded_genotype_mat_ypd - .5
#expanded_genotype_mat_sc = 1./2.*(expanded_genotype_mat_sc - (1 - expanded_genotype_mat_sc))
#expanded_genotype_mat_ypd = 1./2.*(expanded_genotype_mat_ypd - (1 - expanded_genotype_mat_ypd))
kre33_loc = 9596
kre_genotypes = genotype_mat[:,kre33_loc]
kre_genotypes_sc = expanded_genotype_mat_sc[:,kre33_loc]
kre_genotypes_ypd = expanded_genotype_mat_ypd[:,kre33_loc]
current_QTLs = [] #new_QTLs
#At each step we are going to fit the model: delta_X = a + bX + c*kre_genotypes + sum_i=1^n_qtls d_i1*kre_genotypes*g_i + d_i2*(1-kre_genotypes)*g_i
#At the final step, we will fit the full model and determine if all the coefficients are significant.
##Initialize: fit delta_X = a + bX + c*kre_genotypes
X_sc = numpy.concatenate((numpy.dot(helper_matrix_sc, initfit_list_sc).reshape((n_pops_sc,1)), kre_genotypes_sc.reshape((n_pops_sc,1)), numpy.ones((n_pops_sc,1))), axis = 1)
X_ypd = numpy.concatenate((numpy.dot(helper_matrix_ypd, initfit_list_ypd).reshape((n_pops_ypd,1)), kre_genotypes_ypd.reshape((n_pops_ypd,1)),numpy.ones((n_pops_ypd,1))), axis = 1)
all_QTLs_found = False
while all_QTLs_found ==False:
#If this is not the first iteration, add the (potentially epistatic) qtls to the model
if len(current_QTLs) > .5:
qtl_mat_sc = expanded_genotype_mat_sc[:, current_QTLs]
qtl_mat_ypd = expanded_genotype_mat_ypd[:, current_QTLs]
X_sc_temp = numpy.concatenate((X_sc, qtl_mat_sc, qtl_mat_sc*kre_genotypes_sc.reshape((n_pops_sc,1))), axis=1)
X_ypd_temp = numpy.concatenate((X_ypd, qtl_mat_ypd, qtl_mat_ypd*kre_genotypes_ypd.reshape((n_pops_ypd,1))), axis=1)
#print X_sc_temp.shape
else:
X_sc_temp = X_sc
X_ypd_temp = X_ypd
#Calculate residuals:
beta_sc = numpy.dot(numpy.linalg.inv(numpy.dot(X_sc_temp.T, X_sc_temp)), numpy.dot(X_sc_temp.T, phenotype_list_sc))
residuals_sc = phenotype_list_sc - numpy.dot(X_sc_temp, beta_sc) #check dot product direction
beta_ypd = numpy.dot(numpy.linalg.inv(numpy.dot(X_ypd_temp.T, X_ypd_temp)), numpy.dot(X_ypd_temp.T, phenotype_list_ypd))
residuals_ypd = phenotype_list_ypd - numpy.dot(X_ypd_temp, beta_ypd)
residuals_means_sc = numpy.dot(residuals_sc, helper_matrix_sc)/pops_per_seg_sc
residuals_second_moments_sc = numpy.dot(residuals_sc**2, helper_matrix_sc)/pops_per_seg_sc
residual_mat_sc = numpy.concatenate((residuals_means_sc.reshape((n_segs,1)), residuals_second_moments_sc.reshape((n_segs,1)), pops_per_seg_sc.reshape((n_segs,1))), axis=1)
residuals_means_ypd = numpy.dot(residuals_ypd, helper_matrix_ypd)/pops_per_seg_ypd
residuals_second_moments_ypd = numpy.dot(residuals_ypd**2, helper_matrix_ypd)/pops_per_seg_ypd
residual_mat_ypd = numpy.concatenate((residuals_means_ypd.reshape((n_segs,1)), residuals_second_moments_ypd.reshape((n_segs,1)), pops_per_seg_ypd.reshape((n_segs,1))), axis=1)
##Calculate lods for new loci
lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, residual_mat_sc)
lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, residual_mat_ypd)
lods = lods_sc + lods_ypd
top_lod = numpy.nanmax(lods)
top_lod_idx = numpy.nanargmax(lods)
##Bootstrap over segregants
bootstrapped_lods = []
n_iter = 1000
for i in xrange(0,n_iter):
permutation = numpy.random.permutation(numpy.arange(n_segs))
permuted_phenotype_matrix_ypd = residual_mat_ypd[permutation,:]
permuted_phenotype_matrix_sc = residual_mat_sc[permutation,:]
permuted_lods_ypd = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_ypd)
permuted_lods_sc = qtl_detection_adaptability.calculate_lods(genotype_mat, permuted_phenotype_matrix_sc)
permuted_lods = permuted_lods_sc + permuted_lods_ypd
bootstrapped_lods.append(numpy.nanmax(permuted_lods))
sig_threshold = numpy.sort(bootstrapped_lods)[-49]
print 'sig_threshold =', sig_threshold
#print numpy.sort(bootstrapped_lods)
#pt.plot(permuted_lods,'g')
#pt.plot(lods,'b')
#pt.axhline(sig_threshold,0,1,'k')
#pt.show()
if top_lod > sig_threshold:
current_QTLs.append(top_lod_idx)
else:
print 'all_QTLs_found'
all_QTLs_found = True
return current_QTLs, beta_sc, beta_ypd | mit |
larsmans/scikit-learn | sklearn/manifold/locally_linear.py | 15 | 24841 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = np.asarray(X)
Z = np.asarray(Z)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
if X.dtype.kind == 'i':
X = X.astype(np.float)
if Z.dtype.kind == 'i':
Z = Z.astype(np.float)
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
rseubert/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.