repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
potash/scikit-learn | examples/manifold/plot_compare_methods.py | 31 | 4051 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`sphx_glr_auto_examples_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 5, 10)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
massmutual/scikit-learn | sklearn/utils/extmath.py | 70 | 21951 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N) or (M, )
Argument to the logistic function
out: array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
copy: bool, optional
Copy X or not.
Returns
-------
out: array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
hugobowne/scikit-learn | sklearn/preprocessing/tests/test_label.py | 34 | 18227 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
# one-class case defaults to negative label
# For dense case:
inp = ["pos", "pos", "pos", "pos"]
lb = LabelBinarizer(sparse_output=False)
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# For sparse case:
lb = LabelBinarizer(sparse_output=True)
got = lb.fit_transform(inp)
assert_true(issparse(got))
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got.toarray())
assert_array_equal(lb.inverse_transform(got.toarray()), inp)
lb = LabelBinarizer(sparse_output=False)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
assert_raises(ValueError, LabelBinarizer().fit_transform, y_seq_of_seqs)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
le.fit(["apple", "orange"])
msg = "bad input shape"
assert_raise_message(ValueError, msg, le.transform, "apple")
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, 1, -1])
assert_raises(ValueError, le.inverse_transform, [-1])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
orbitfold/tardis | tardis/tests/integration_tests/plot_helpers.py | 1 | 3893 | import os
import tempfile
from pytest_html import extras
from tardis import __githash__ as tardis_githash
class BasePlotSaver(object):
def __init__(self, request, dokuwiki_url=None, assets_dirpath=None):
"""Base Class for RemotePlotSaver and LocalPlotSaver classes.
These help in uploading plots to DokuWiki instance or saving them
locally in a directory.
Parameters
----------
request : _pytest.python.RequestObject
dokuwiki_url : str
assets_dirpath : str
"""
self.request = request
self._plots = list()
self.plot_html = list()
self.dokuwiki_url = dokuwiki_url
self.assets_dirpath = assets_dirpath
def add(self, plot, name):
"""Accept a plot figure and add it to ``self._plots``.
Parameters
----------
plot : matplotlib.pyplot.figure
name : str
"""
self._plots.append((plot, name))
def save(self, plot, filepath, report):
"""Mark pass / fail and save a plot with ``name`` to ``filepath``.
Parameters
----------
plot : matplotlib.pyplot.figure
filepath : str
report : _pytest.runner.TestReport
"""
axes = plot.axes[0]
if report.passed:
axes.text(0.8, 0.8, 'passed', transform=axes.transAxes,
bbox={'facecolor': 'green', 'alpha': 0.5, 'pad': 10})
else:
axes.text(0.8, 0.8, 'failed', transform=axes.transAxes,
bbox={'facecolor': 'red', 'alpha': 0.5, 'pad': 10})
plot.savefig(filepath)
def get_extras(self):
"""Return ``self.plot_html`` which is further added into html report.
Returns
-------
list
List of strings containing raw html snippet to embed images.
"""
return self.plot_html
thumbnail_html_remote = """
<div class="image" style="float: left">
<a href="#">
<img src= "{dokuwiki_url}lib/exe/fetch.php?media=reports:{githash}:{name}.png" />
</a>
</div>
"""
class RemotePlotSaver(BasePlotSaver):
def __init__(self, request, dokuwiki_url):
super(RemotePlotSaver, self).__init__(request, dokuwiki_url=dokuwiki_url)
def upload(self, report):
"""Upload content of ``self._plots`` to ``self.dokuwiki_url``.
Parameters
----------
report : _pytest.runner.TestReport
"""
for plot, name in self._plots:
plot_file = tempfile.NamedTemporaryFile(suffix=".png")
self.save(plot, plot_file.name, report)
self.request.config.dokureport.doku_conn.medias.add(
"reports:{0}:{1}.png".format(tardis_githash[:7], name),
plot_file.name
)
self.plot_html.append(extras.html(
thumbnail_html_remote.format(
dokuwiki_url=self.dokuwiki_url,
githash=tardis_githash[:7],
name=name)
)
)
plot_file.close()
thumbnail_html_local = """
<div class="image" style="float: left">
<a href="#">
<img src= "assets/{name}.png" />
</a>
</div>
"""
class LocalPlotSaver(BasePlotSaver):
def __init__(self, request, assets_dirpath):
super(LocalPlotSaver, self).__init__(request, assets_dirpath=assets_dirpath)
def upload(self, report):
"""Save content of ``self._plots`` to ``self.assets_dirpath``.
Parameters
----------
report : _pytest.runner.TestReport
"""
for plot, name in self._plots:
self.save(plot, os.path.join(
self.assets_dirpath, "{0}.png".format(name)), report
)
self.plot_html.append(extras.html(
thumbnail_html_local.format(name=name))
)
| bsd-3-clause |
leonardbj/AIMS | src/Regression/regression.py | 1 | 6698 | """ Description here
Author: Leonard Berrada
Date: 4 Nov 2015
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
try:
import seaborn as sns
sns.set(color_codes=True)
except:
pass
class RegressionModel:
def __init__(self,
data_dict=None,
**kwargs):
assert data_dict.has_key('ytrain')
self._training_df = pd.DataFrame()
self.n_training = len(data_dict['ytrain'])
self._training_df['y'] = data_dict['ytrain']
if data_dict.has_key('xtrain'):
self._training_df['x'] = data_dict['xtrain']
else:
self._training_df['x'] = np.arange(self.n_training)
if data_dict.has_key('ytruthtrain'):
self._training_df['ytruth'] = data_dict['ytruthtrain']
if data_dict.has_key('ytest'):
self._testing_df = pd.DataFrame()
self.n_testing = len(data_dict['ytest'])
self._testing_df['y'] = data_dict['ytest']
if data_dict.has_key('xtest'):
self._testing_df['x'] = data_dict['xtest']
else:
self._testing_df['x'] = self.n_training + \
np.arange(self.n_testing)
if data_dict.has_key('ytruthtest'):
self._testing_df['ytruth'] = data_dict['ytruthtest']
else:
self.n_testing = 0
print("done")
print("-" * 50)
print("showing headers for verification...")
print("Training Data :")
print(self._training_df.head())
if self.n_testing:
print("Testing Data :")
print(self._testing_df.head())
self.center_normalize()
print("NB: data has been centered and normalized")
def center_normalize(self):
self.y_mean = np.mean(self.Y_training())
self.y_std = np.std(self.Y_training())
self._training_df['y'] = (self.Y_training() - self.y_mean) / self.y_std
if self.n_testing:
self._testing_df['y'] = (
self.Y_testing() - self.y_mean) / self.y_std
if hasattr(self._training_df, "ytruth"):
self._training_df['ytruth'] = (
self.Y_truth_training() - self.y_mean) / self.y_std
self._testing_df['ytruth'] = (
self.Y_truth_testing() - self.y_mean) / self.y_std
def X_training(self,
indices=None,
start=None,
stop=None):
if hasattr(indices, "__len__"):
return self._training_df.x.values[indices]
else:
return self._training_df.x.values[start:stop]
def X_testing(self,
indices=None,
start=None,
stop=None):
if hasattr(indices, "__len__"):
return self._testing_df.x.values[indices]
else:
return self._testing_df.x.values[start:stop]
def Y_training(self,
indices=None,
start=None,
stop=None):
if hasattr(indices, "__len__"):
return self._training_df.y.values[indices]
else:
return self._training_df.y.values[start:stop]
def Y_testing(self,
indices=None,
start=None,
stop=None):
if not self.n_testing:
return []
if hasattr(indices, "__len__"):
return self._testing_df.y.values[indices]
else:
return self._testing_df.y.values[start:stop]
def Y_pred(self,
indices=None,
start=None,
stop=None):
if hasattr(indices, "__len__"):
return self._pred_df.ypred.values[indices]
else:
return self._pred_df.ypred.values[start:stop]
def Y_error(self,
indices=None,
start=None,
stop=None):
if hasattr(indices, "__len__"):
return self._pred_df.yerr.values[indices]
else:
return self._pred_df.yerr.values[start:stop]
def Y_truth_training(self,
indices=None,
start=None,
stop=None):
if hasattr(indices, "__len__"):
return self._training_df.ytruth.values[indices]
else:
return self._training_df.ytruth.values[start:stop]
def Y_truth_testing(self,
indices=None,
start=None,
stop=None):
if hasattr(indices, "__len__"):
return self._testing_df.ytruth.values[indices]
else:
return self._testing_df.ytruth.values[start:stop]
def embed_data(self):
n = self.n_training - self.p
self._emb_matrix = np.zeros((n, self.p))
for k in range(self.p):
self._emb_matrix[:, k] = self.Y_training(start=self.p - 1 - k,
stop=self.p - 1 - k + n)
def fit(self):
raise NotImplementedError("method should be overwritten")
def predict(self, testing_data):
raise NotImplementedError("method should be overwritten")
def get(self, attr_name):
return getattr(self, attr_name)
def plot_attr(self, attr_name, show=False, **kwargs):
attr_to_plot = getattr(self, attr_name)
plt.plot(attr_to_plot, **kwargs)
if show:
plt.show()
def plot_var(self, var_name, set_="", lag=None, show=False, **kwargs):
var_to_plot = getattr(self, var_name)
plt.plot(var_to_plot(start=lag), **kwargs)
if show:
plt.show()
def display(self, out=""):
plt.plot(self.X_training(stop=-self.p),
self.Y_training(start=self.p),
c='k',
ms=4)
try:
plt.plot(self.X_testing() - self.p,
self.Y_testing(),
c='b',
ms=4)
except:
pass
plt.plot(self.Y_pred(),
c='r',
ms=4)
plt.plot(self.Y_error(),
c='g',
alpha=0.5,
ms=4)
if out:
try:
plt.savefig(out,
transparent=False,
dpi=200,
bbox_inches='tight')
except:
print(
"could not save plot in %s, please make sure directory exists" % out)
plt.show()
| mit |
stuartarchibald/numba | numba/tests/doc_examples/test_examples.py | 5 | 7262 | # Contents in this file are referenced from the sphinx-generated docs.
# "magictoken" is used for markers as beginning and ending of example text.
import sys
import unittest
from numba.tests.support import captured_stdout
class MatplotlibBlocker:
'''Blocks the import of matplotlib, so that doc examples that attempt to
plot the output don't result in plots popping up and blocking testing.'''
def find_spec(self, fullname, path, target=None):
if fullname == 'matplotlib':
msg = 'Blocked import of matplotlib for test suite run'
raise ImportError(msg)
class DocsExamplesTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._mpl_blocker = MatplotlibBlocker()
def setUp(self):
sys.meta_path.insert(0, self._mpl_blocker)
def tearDown(self):
sys.meta_path.remove(self._mpl_blocker)
def test_mandelbrot(self):
with captured_stdout():
# magictoken.ex_mandelbrot.begin
from timeit import default_timer as timer
try:
from matplotlib.pylab import imshow, show
have_mpl = True
except ImportError:
have_mpl = False
import numpy as np
from numba import jit
@jit(nopython=True)
def mandel(x, y, max_iters):
"""
Given the real and imaginary parts of a complex number,
determine if it is a candidate for membership in the Mandelbrot
set given a fixed number of iterations.
"""
i = 0
c = complex(x,y)
z = 0.0j
for i in range(max_iters):
z = z * z + c
if (z.real * z.real + z.imag * z.imag) >= 4:
return i
return 255
@jit(nopython=True)
def create_fractal(min_x, max_x, min_y, max_y, image, iters):
height = image.shape[0]
width = image.shape[1]
pixel_size_x = (max_x - min_x) / width
pixel_size_y = (max_y - min_y) / height
for x in range(width):
real = min_x + x * pixel_size_x
for y in range(height):
imag = min_y + y * pixel_size_y
color = mandel(real, imag, iters)
image[y, x] = color
return image
image = np.zeros((500 * 2, 750 * 2), dtype=np.uint8)
s = timer()
create_fractal(-2.0, 1.0, -1.0, 1.0, image, 20)
e = timer()
print(e - s)
if have_mpl:
imshow(image)
show()
# magictoken.ex_mandelbrot.end
def test_moving_average(self):
with captured_stdout():
# magictoken.ex_moving_average.begin
import numpy as np
from numba import guvectorize
@guvectorize(['void(float64[:], intp[:], float64[:])'],
'(n),()->(n)')
def move_mean(a, window_arr, out):
window_width = window_arr[0]
asum = 0.0
count = 0
for i in range(window_width):
asum += a[i]
count += 1
out[i] = asum / count
for i in range(window_width, len(a)):
asum += a[i] - a[i - window_width]
out[i] = asum / count
arr = np.arange(20, dtype=np.float64).reshape(2, 10)
print(arr)
print(move_mean(arr, 3))
# magictoken.ex_moving_average.end
def test_nogil(self):
with captured_stdout():
# magictoken.ex_no_gil.begin
import math
import threading
from timeit import repeat
import numpy as np
from numba import jit
nthreads = 4
size = 10**6
def func_np(a, b):
"""
Control function using Numpy.
"""
return np.exp(2.1 * a + 3.2 * b)
@jit('void(double[:], double[:], double[:])', nopython=True,
nogil=True)
def inner_func_nb(result, a, b):
"""
Function under test.
"""
for i in range(len(result)):
result[i] = math.exp(2.1 * a[i] + 3.2 * b[i])
def timefunc(correct, s, func, *args, **kwargs):
"""
Benchmark *func* and print out its runtime.
"""
print(s.ljust(20), end=" ")
# Make sure the function is compiled before the benchmark is
# started
res = func(*args, **kwargs)
if correct is not None:
assert np.allclose(res, correct), (res, correct)
# time it
print('{:>5.0f} ms'.format(min(repeat(
lambda: func(*args, **kwargs), number=5, repeat=2)) * 1000))
return res
def make_singlethread(inner_func):
"""
Run the given function inside a single thread.
"""
def func(*args):
length = len(args[0])
result = np.empty(length, dtype=np.float64)
inner_func(result, *args)
return result
return func
def make_multithread(inner_func, numthreads):
"""
Run the given function inside *numthreads* threads, splitting
its arguments into equal-sized chunks.
"""
def func_mt(*args):
length = len(args[0])
result = np.empty(length, dtype=np.float64)
args = (result,) + args
chunklen = (length + numthreads - 1) // numthreads
# Create argument tuples for each input chunk
chunks = [[arg[i * chunklen:(i + 1) * chunklen] for arg in
args] for i in range(numthreads)]
# Spawn one thread per chunk
threads = [threading.Thread(target=inner_func, args=chunk)
for chunk in chunks]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return result
return func_mt
func_nb = make_singlethread(inner_func_nb)
func_nb_mt = make_multithread(inner_func_nb, nthreads)
a = np.random.rand(size)
b = np.random.rand(size)
correct = timefunc(None, "numpy (1 thread)", func_np, a, b)
timefunc(correct, "numba (1 thread)", func_nb, a, b)
timefunc(correct, "numba (%d threads)" % nthreads, func_nb_mt, a, b)
# magictoken.ex_no_gil.end
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
shikhardb/scikit-learn | sklearn/tests/test_multiclass.py | 9 | 24243 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import fit_ovr
from sklearn.multiclass import fit_ovo
from sklearn.multiclass import fit_ecoc
from sklearn.multiclass import predict_ovr
from sklearn.multiclass import predict_ovo
from sklearn.multiclass import predict_ecoc
from sklearn.multiclass import predict_proba_ovr
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
with ignore_warnings():
assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()],
LabelBinarizer(), [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
@ignore_warnings
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = [["spam", "eggs"], ["spam"], ["ham", "eggs", "spam"],
["ham", "eggs"], ["ham"]]
# y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]]
Y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
# test input as lists of tuples
clf = assert_warns(DeprecationWarning,
OneVsRestClassifier(base_clf).fit,
X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_equal(set(y_pred), set(["spam", "eggs"]))
assert_true(clf.multilabel_)
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
decision_only.fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
return_indicator=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
ovr.fit(iris.data, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
prediction_from_list = ovo.fit(iris.data,
list(iris.target)).predict(iris.data)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
@ignore_warnings
def test_deprecated():
base_estimator = DecisionTreeClassifier(random_state=0)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
all_metas = [
(OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr),
(OneVsOneClassifier, fit_ovo, predict_ovo, None),
(OutputCodeClassifier, fit_ecoc, predict_ecoc, None),
]
for MetaEst, fit_func, predict_func, proba_func in all_metas:
try:
meta_est = MetaEst(base_estimator,
random_state=0).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train,
random_state=0)
except TypeError:
meta_est = MetaEst(base_estimator).fit(X_train, Y_train)
fitted_return = fit_func(base_estimator, X_train, Y_train)
if len(fitted_return) == 2:
estimators_, classes_or_lb = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
X_test),
meta_est.predict(X_test))
if proba_func is not None:
assert_almost_equal(proba_func(estimators_, X_test,
is_multilabel=False),
meta_est.predict_proba(X_test))
else:
estimators_, classes_or_lb, codebook = fitted_return
assert_almost_equal(predict_func(estimators_, classes_or_lb,
codebook, X_test),
meta_est.predict(X_test))
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
ebilionis/pysmc | examples/simple_model.py | 2 | 3474 | """
A simple mixture model to test the capabilities of SMC.
Author:
Ilias Bilionis
Date:
9/22/2013
"""
import pymc
import numpy as np
import math
def make_model():
# The gamma parameter
gamma = 1.
@pymc.stochastic(dtype=float)
def mixture(value=1., gamma=gamma, pi=[0.2, 0.8], mu=[-2., 3.],
sigma=[0.01, 0.01]):
"""
The log probability of a mixture of normal densities.
:param value: The point of evaluation.
:type value : float
:param gamma: The parameter characterizing the SMC one-parameter
family.
:type gamma : float
:param pi : The weights of the components.
:type pi : 1D :class:`numpy.ndarray`
:param mu : The mean of each component.
:type mu : 1D :class:`numpy.ndarray`
:param sigma: The standard deviation of each component.
:type sigma : 1D :class:`numpy.ndarray`
"""
# Make sure everything is a numpy array
pi = np.array(pi)
mu = np.array(mu)
sigma = np.array(sigma)
# The number of components in the mixture
n = pi.shape[0]
# pymc.normal_like requires the precision not the variance:
tau = np.sqrt(1. / sigma ** 2)
# The following looks a little bit awkward because of the need for
# numerical stability:
p = np.log(pi)
p += np.array([pymc.normal_like(value, mu[i], tau[i])
for i in range(n)])
p = math.fsum(np.exp(p))
# logp should never be negative, but it can be zero...
if p <= 0.:
return -np.inf
return gamma * math.log(p)
return locals()
def eval_stochastic_variable(var, values):
"""
Evaluate the logarithm of the probability of ``var`` at ``values``.
:param var : The stochastic variable whose probability should be
evaluated.
:type var : :class:`pymc.Stochastic`
:param values: The points of evalulation.
:type values : list or :class:`numpy.ndarray`
:returns : The logarithm of the probabilities of the variable
at all ``values``.
:rtype : 1D :class:`numpy.ndarray`
"""
n = len(values)
y = np.zeros(n)
for i in range(n):
var.value = values[i]
y[i] = math.exp(var.logp)
return y
if __name__ == '__main__':
import matplotlib.pyplot as plt
x = np.linspace(-2, 3, 200)
# Plot the original probability density
m = make_model()
y = eval_stochastic_variable(m['mixture'], x)
plt.plot(x, y, linewidth=2)
plt.xlabel('$x$', fontsize=16)
plt.ylabel('$p(x)$', fontsize=16)
plt.savefig('../doc/source/images/simple_model_pdf.png')
# Plot some members of the one-parameter SMC family of probability densities
plt.clf()
gammas = [1., 0.7, 0.5, 0.1, 0.05, 0.01]
for gamma in gammas:
# m['mixture'].parents['gamma'] = gamma
m['gamma'] = gamma
y = eval_stochastic_variable(m['mixture'], x)
plt.plot(x, y, linewidth=2)
plt.xlabel('$x$', fontsize=16)
plt.ylabel('$\pi_\gamma(x)$', fontsize=16)
legend_labels = ['$\gamma = %1.2f$' % gamma for gamma in gammas]
plt.legend(legend_labels, loc='upper left')
plt.show()
# plt.savefig('../doc/source/images/simple_model_pdf_family.png')
| lgpl-3.0 |
mmessick/Tax-Calculator | taxcalc/calculate.py | 1 | 11930 | import pandas as pd
from pandas import DataFrame
import math
import numpy as np
from .utils import *
from .functions import *
from .parameters import Parameters
from .records import Records
import copy
all_cols = set()
def add_df(alldfs, df):
for col in df.columns:
if col not in all_cols:
all_cols.add(col)
alldfs.append(df[col])
else:
dup_index = [i for i,
series in enumerate(alldfs) if series.name == col][0]
alldfs[dup_index] = df[col]
def calculator(params, records, mods="", **kwargs):
update_mods = {}
if mods:
if isinstance(mods, str):
import json
dd = json.loads(mods)
dd = {int(k): (np.array(v) if type(v) == list else v)
for k, v in dd.items()}
update_mods.update(dd)
else:
update_mods.update(mods)
final_mods = toolz.merge_with(toolz.merge, update_mods,
{params.current_year: kwargs})
params.implement_reform(final_mods)
if final_mods:
max_yr = max(yr for yr in final_mods)
else:
max_yr = 0
if (params.current_year < max_yr):
msg = ("Modifications are for year {0} and Parameters are for"
" year {1}. Parameters will be advanced to year {0}")
print(msg.format(max_yr, params.current_year))
while params.current_year < max_yr:
params.set_year(params.current_year + 1)
if (records.current_year < max_yr):
msg = ("Modifications are for year {0} and Records are for"
" year {1}. Records will be advanced to year {0}")
print(msg.format(max_yr, records.current_year))
while records.current_year < max_yr:
records.increment_year()
calc = Calculator(params, records)
return calc
class Calculator(object):
def __init__(self, params=None, records=None, sync_years=True, **kwargs):
if isinstance(params, Parameters):
self._params = params
else:
msg = 'Must supply tax parameters as a Parameters object'
raise ValueError(msg)
if isinstance(records, Records):
self._records = records
elif isinstance(records, str):
self._records = Records.from_file(records, **kwargs)
else:
msg = 'Must supply tax records as a file path or Records object'
raise ValueError(msg)
if sync_years and self._records.current_year == 2008:
print("You loaded data for " +
str(self._records.current_year) + '.')
while self._records.current_year < self._params.current_year:
self._records.increment_year()
print("Your data have beeen extrapolated to " +
str(self._records.current_year) + ".")
assert self._params.current_year == self._records.current_year
@property
def params(self):
return self._params
@property
def records(self):
return self._records
def calc_all(self):
FilingStatus(self.params, self.records)
Adj(self.params, self.records)
CapGains(self.params, self.records)
SSBenefits(self.params, self.records)
AGI(self.params, self.records)
ItemDed(self.params, self.records)
EI_FICA(self.params, self.records)
AMED(self.params, self.records)
StdDed(self.params, self.records)
XYZD(self.params, self.records)
NonGain(self.params, self.records)
TaxGains(self.params, self.records)
MUI(self.params, self.records)
AMTI(self.params, self.records)
F2441(self.params, self.records)
DepCareBen(self.params, self.records)
ExpEarnedInc(self.params, self.records)
RateRed(self.params, self.records)
NumDep(self.params, self.records)
ChildTaxCredit(self.params, self.records)
AmOppCr(self.params, self.records)
LLC(self.params, self.records)
RefAmOpp(self.params, self.records)
NonEdCr(self.params, self.records)
AddCTC(self.params, self.records)
F5405(self.params, self.records)
C1040(self.params, self.records)
DEITC(self.params, self.records)
OSPC_TAX(self.params, self.records)
ExpandIncome(self.params, self.records)
def calc_all_test(self):
all_dfs = []
add_df(all_dfs, FilingStatus(self.params, self.records))
add_df(all_dfs, Adj(self.params, self.records))
add_df(all_dfs, CapGains(self.params, self.records))
add_df(all_dfs, SSBenefits(self.params, self.records))
add_df(all_dfs, AGI(self.params, self.records))
add_df(all_dfs, ItemDed(self.params, self.records))
add_df(all_dfs, EI_FICA(self.params, self.records))
add_df(all_dfs, AMED(self.params, self.records))
add_df(all_dfs, StdDed(self.params, self.records))
add_df(all_dfs, XYZD(self.params, self.records))
add_df(all_dfs, NonGain(self.params, self.records))
add_df(all_dfs, TaxGains(self.params, self.records))
add_df(all_dfs, MUI(self.params, self.records))
add_df(all_dfs, AMTI(self.params, self.records))
add_df(all_dfs, F2441(self.params, self.records))
add_df(all_dfs, DepCareBen(self.params, self.records))
add_df(all_dfs, ExpEarnedInc(self.params, self.records))
add_df(all_dfs, RateRed(self.params, self.records))
add_df(all_dfs, NumDep(self.params, self.records))
add_df(all_dfs, ChildTaxCredit(self.params, self.records))
add_df(all_dfs, AmOppCr(self.params, self.records))
add_df(all_dfs, LLC(self.params, self.records))
add_df(all_dfs, RefAmOpp(self.params, self.records))
add_df(all_dfs, NonEdCr(self.params, self.records))
add_df(all_dfs, AddCTC(self.params, self.records))
add_df(all_dfs, F5405(self.params, self.records))
add_df(all_dfs, C1040(self.params, self.records))
add_df(all_dfs, DEITC(self.params, self.records))
add_df(all_dfs, OSPC_TAX(self.params, self.records))
add_df(all_dfs, ExpandIncome(self.params, self.records))
totaldf = pd.concat(all_dfs, axis=1)
return totaldf
def increment_year(self):
self.records.increment_year()
self.params.set_year(self.params.current_year + 1)
@property
def current_year(self):
return self.params.current_year
def mtr(self, income_type_string, diff=100):
"""
This method calculates the marginal tax rate for every record.
In order to avoid kinks, we find the marginal rates associated with
both a tax increase and a tax decrease and use the more modest of
the two.
"""
income_type = getattr(self, income_type_string)
# Calculate the base level of taxes.
self.calc_all()
taxes_base = np.copy(self._ospctax)
# Calculate the tax change with a marginal increase in income.
setattr(self, income_type_string, income_type + diff)
self.calc_all()
delta_taxes_up = self._ospctax - taxes_base
# Calculate the tax change with a marginal decrease in income.
setattr(self, income_type_string, income_type - diff)
self.calc_all()
delta_taxes_down = taxes_base - self._ospctax
# Reset the income_type to its starting point to avoid
# unintended consequences.
setattr(self, income_type_string, income_type)
self.calc_all()
# Choose the more modest effect of either adding or subtracting income
delta_taxes = np.where(np.absolute(delta_taxes_up) <=
np.absolute(delta_taxes_down),
delta_taxes_up, delta_taxes_down)
# Calculate the marginal tax rate
mtr = delta_taxes / diff
return mtr
def diagnostic_table(self, num_years=5):
table = []
row_years = []
calc = copy.deepcopy(self)
for i in range(0, num_years):
calc.calc_all()
row_years.append(calc.params._current_year)
# totoal number of records
returns = calc.records.s006.sum()
# AGI
agi = (calc.records.c00100 * calc.records.s006).sum()
# number of itemizers
ID1 = calc.records.c04470 * calc.records.s006
STD1 = calc.records._standard * calc.records.s006
deduction = np.maximum(calc.records.c04470, calc.records._standard)
# S TD1 = (calc.c04100 + calc.c04200)*calc.s006
NumItemizer1 = (calc.records.s006[(calc.records.c04470 > 0) *
(calc.records.c00100 > 0)].sum())
# itemized deduction
ID = ID1[calc.records.c04470 > 0].sum()
NumSTD = calc.records.s006[(calc.records._standard > 0) *
(calc.records.c00100 > 0)].sum()
# standard deduction
STD = STD1[(calc.records._standard > 0) *
(calc.records.c00100 > 0)].sum()
# personal exemption
PE = (calc.records.c04600 *
calc.records.s006)[calc.records.c00100 > 0].sum()
# taxable income
taxinc = (calc.records.c04800 * calc.records.s006).sum()
# regular tax
regular_tax = (calc.records.c05200 * calc.records.s006).sum()
# AMT income
AMTI = (calc.records.c62100 * calc.records.s006).sum()
# total AMTs
AMT = (calc.records.c09600 * calc.records.s006).sum()
# number of people paying AMT
NumAMT1 = calc.records.s006[calc.records.c09600 > 0].sum()
# tax before credits
tax_bf_credits = (calc.records.c05800 * calc.records.s006).sum()
# tax before nonrefundable credits 09200
tax_bf_nonrefundable = (calc.records.c09200 *
calc.records.s006).sum()
# refundable credits
refundable = (calc.records._refund * calc.records.s006).sum()
# nonrefuncable credits
nonrefundable = (calc.records.c07100 * calc.records.s006).sum()
# ospc_tax
revenue1 = (calc.records._ospctax * calc.records.s006).sum()
table.append([returns / math.pow(10, 6), agi / math.pow(10, 9),
NumItemizer1 / math.pow(10, 6), ID / math.pow(10, 9),
NumSTD / math.pow(10, 6), STD / math.pow(10, 9),
PE / math.pow(10, 9), taxinc / math.pow(10, 9),
regular_tax / math.pow(10, 9),
AMTI / math.pow(10, 9), AMT / math.pow(10, 9),
NumAMT1 / math.pow(10, 6),
tax_bf_credits / math.pow(10, 9),
refundable / math.pow(10, 9),
nonrefundable / math.pow(10, 9),
revenue1 / math.pow(10, 9)])
calc.increment_year()
df = DataFrame(table, row_years,
["Returns (#m)", "AGI ($b)", "Itemizers (#m)",
"Itemized Deduction ($b)",
"Standard Deduction Filers (#m)",
"Standard Deduction ($b)", "Personal Exemption ($b)",
"Taxable income ($b)", "Regular Tax ($b)",
"AMT income ($b)", "AMT amount ($b)",
"AMT number (#m)", "Tax before credits ($b)",
"refundable credits ($b)",
"nonrefundable credits ($b)",
"ospctax ($b)"])
df = df.transpose()
pd.options.display.float_format = '{:8,.1f}'.format
return df
| mit |
chintak/face_detection | helper.py | 1 | 3766 | import matplotlib as mpl
mpl.use('Agg')
import numpy as np
import lasagne
from nolearn.lasagne import NeuralNet
import cPickle as pickle
from models import *
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import AxesGrid
class StepVariableUpdate(object):
def __init__(self, name, changes={}):
self.name = name
assert type(changes) == dict, (
"Expected: dictionary, "
"with keys denoting the epoch and values denoting the changed value."
)
self.changes = changes
self.epochs = sorted(changes.keys())
def __call__(self, nn, train_history):
epoch = train_history[-1]['epoch']
if epoch in self.epochs:
new_value = np.float32(self.changes[epoch])
print "Update: learning rate from %f to %f" % (getattr(nn, self.name).get_value(), new_value)
getattr(nn, self.name).set_value(new_value)
class AdjustVariable(object):
def __init__(self, name, start=0.01, stop=0.0001):
self.name = name
self.start, self.stop = start, stop
self.ls = None
def __call__(self, nn, train_history):
if self.ls is None:
self.ls = np.linspace(self.start, self.stop, 100)
epoch = train_history[-1]['epoch']
new_value = np.float32(self.ls[epoch - 1])
getattr(nn, self.name).set_value(new_value)
def load_network(fname, config="nnet_4c3d_1233_convs_layer", batch_iterator="BatchIterator"):
nnet = globals()[config](batch_iterator)
net_pkl = pickle.load(open(fname, 'rb'))
nnet.load_params_from(net_pkl)
return nnet
def save_model_params(net, history, folder, debug=True):
if not debug:
net.save_params_to(os.path.join(
folder, 'model_%d.pkl' % len(history)))
def plot_weight_matrix(Z, outname, save=True):
num = Z.shape[0]
fig = plt.figure(1, (80, 80))
fig.subplots_adjust(left=0.05, right=0.95)
grid = AxesGrid(fig, (1, 4, 2), # similar to subplot(142)
nrows_ncols=(int(np.ceil(num / 10.)), 10),
axes_pad=0.04,
share_all=True,
label_mode="L",
)
for i in range(num):
im = grid[i].imshow(Z[i, :, :, :].mean(
axis=0), cmap='gray')
for i in range(grid.ngrids):
grid[i].axis('off')
for cax in grid.cbar_axes:
cax.toggle_label(False)
if save:
fig.savefig(outname, bbox_inches='tight')
fig.clear()
def plot_weight_matrix_grid(net, history, folder, debug=True):
"""
A grid of 2x2 images with a single colorbar
"""
if debug:
return
params = net.get_all_params_values()
convs = [k for k in params.keys() if 'conv' in k]
outdir = os.path.join(folder, 'outputs', 'epoch_%d' % (len(history)))
if not os.path.exists(outdir):
os.makedirs(outdir)
with Parallel(n_jobs=3) as parallel:
parallel(delayed(plot_weight_matrix)(params[k][0],
os.path.join(outdir, 'weights_%s.png' % k))
for k in convs)
def plot_learning_curve(_, history, folder, debug=True):
arr = np.asarray(
map(lambda k: [k['epoch'], k['train_loss'], k['valid_loss']], history))
plt.figure()
plt.plot(arr[:, 0], arr[:, 1], 'r', marker='o',
label='Training loss', linewidth=2.0)
plt.plot(arr[:, 0], arr[:, 2], 'b', marker='o',
label='Validation loss', linewidth=2.0)
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.ylim([0.0, np.max(arr[:, 1:]) * 1.3])
plt.title('Learning curve')
plt.legend()
if not debug:
plt.savefig('%s/learning_curve.png' % folder, bbox_inches='tight')
plt.close()
| apache-2.0 |
winklerand/pandas | pandas/core/sparse/list.py | 6 | 4148 | import warnings
import numpy as np
from pandas.core.base import PandasObject
from pandas.io.formats.printing import pprint_thing
from pandas.core.dtypes.common import is_scalar
from pandas.core.sparse.array import SparseArray
from pandas.util._validators import validate_bool_kwarg
import pandas._libs.sparse as splib
class SparseList(PandasObject):
"""
Data structure for accumulating data to be converted into a
SparseArray. Has similar API to the standard Python list
Parameters
----------
data : scalar or array-like
fill_value : scalar, default NaN
"""
def __init__(self, data=None, fill_value=np.nan):
# see gh-13784
warnings.warn("SparseList is deprecated and will be removed "
"in a future version", FutureWarning, stacklevel=2)
self.fill_value = fill_value
self._chunks = []
if data is not None:
self.append(data)
def __unicode__(self):
contents = '\n'.join(repr(c) for c in self._chunks)
return '{self}\n{contents}'.format(self=object.__repr__(self),
contents=pprint_thing(contents))
def __len__(self):
return sum(len(c) for c in self._chunks)
def __getitem__(self, i):
if i < 0:
if i + len(self) < 0: # pragma: no cover
raise ValueError('{index} out of range'.format(index=i))
i += len(self)
passed = 0
j = 0
while i >= passed + len(self._chunks[j]):
passed += len(self._chunks[j])
j += 1
return self._chunks[j][i - passed]
def __setitem__(self, i, value):
raise NotImplementedError
@property
def nchunks(self):
return len(self._chunks)
@property
def is_consolidated(self):
return self.nchunks == 1
def consolidate(self, inplace=True):
"""
Internally consolidate chunks of data
Parameters
----------
inplace : boolean, default True
Modify the calling object instead of constructing a new one
Returns
-------
splist : SparseList
If inplace=False, new object, otherwise reference to existing
object
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not inplace:
result = self.copy()
else:
result = self
if result.is_consolidated:
return result
result._consolidate_inplace()
return result
def _consolidate_inplace(self):
new_values = np.concatenate([c.sp_values for c in self._chunks])
new_index = _concat_sparse_indexes([c.sp_index for c in self._chunks])
new_arr = SparseArray(new_values, sparse_index=new_index,
fill_value=self.fill_value)
self._chunks = [new_arr]
def copy(self):
"""
Return copy of the list
Returns
-------
new_list : SparseList
"""
new_splist = SparseList(fill_value=self.fill_value)
new_splist._chunks = list(self._chunks)
return new_splist
def to_array(self):
"""
Return SparseArray from data stored in the SparseList
Returns
-------
sparr : SparseArray
"""
self.consolidate(inplace=True)
return self._chunks[0]
def append(self, value):
"""
Append element or array-like chunk of data to the SparseList
Parameters
----------
value: scalar or array-like
"""
if is_scalar(value):
value = [value]
sparr = SparseArray(value, fill_value=self.fill_value)
self._chunks.append(sparr)
self._consolidated = False
def _concat_sparse_indexes(indexes):
all_indices = []
total_length = 0
for index in indexes:
# increment by offset
inds = index.to_int_index().indices + total_length
all_indices.append(inds)
total_length += index.length
return splib.IntIndex(total_length, np.concatenate(all_indices))
| bsd-3-clause |
sirca/clusterous | demo/ipython-lite/ipython/profile/ipython_kernel_config.py | 2 | 15101 | # Configuration file for ipython-kernel.
c = get_config()
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp, ConnectionFileMixin
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.IPKernelApp.exec_PYTHONSTARTUP = True
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.IPKernelApp.ip = u''
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = <class 'IPython.kernel.zmq.ipkernel.IPythonKernel'>
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = u''
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
#
# c.IPKernelApp.transport = 'tcp'
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.IPKernelApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The IPython profile to use.
# c.IPKernelApp.profile = u'default'
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = u''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.IPKernelApp.gui = None
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
#------------------------------------------------------------------------------
# IPythonKernel configuration
#------------------------------------------------------------------------------
# IPythonKernel will inherit config from: Kernel
# Whether to use appnope for compatiblity with OS X App Nap.
#
# Only affects OS X >= 10.9.
# c.IPythonKernel._darwin_app_nap = True
#
# c.IPythonKernel._execute_sleep = 0.0005
#
# c.IPythonKernel._poll_interval = 0.05
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#
# c.ZMQInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'Linux'
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.ZMQInteractiveShell.display_page = False
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.separate_out2 = ''
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
# The part of the banner to be printed before the profile
# c.ZMQInteractiveShell.banner1 = 'Python 2.7.9 |Anaconda 2.2.0 (64-bit)| (default, Mar 9 2015, 16:20:48) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.0.0 -- An enhanced Interactive Python.\nAnaconda is brought to you by Continuum Analytics.\nPlease check out: http://continuum.io/thanks and https://binstar.org\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# The part of the banner to be printed after the profile
# c.ZMQInteractiveShell.banner2 = ''
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.ZMQInteractiveShell.logstart = False
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Start logging to the given file in append mode.
# c.ZMQInteractiveShell.logappend = ''
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.quiet = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = u'username'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The UUID identifying this session.
# c.Session.session = u''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# execution key, for extra authentication.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# path to file containing execution key.
# c.Session.keyfile = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
| apache-2.0 |
DD1984/skydrop_stm32 | skydrop/utils/serial_chart/chart_3D.py | 5 | 2332 | import serial
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
def add_line(name, x, index):
item = {}
item["name"] = name
item["data"] = np.zeros(len(x))
item["index"] = index
item["axis"] = False
return item
time = np.arange(2)
y = []
y.append(add_line("x", time, 3))
y.append(add_line("y", time, 4))
y.append(add_line("z", time, 5))
index = 01
s = serial.Serial("/dev/ttyUSB0", 921600)
fig = plt.figure(1, figsize=(15,13))
ax = fig.add_subplot(111, projection='3d')
axis_x, = ax.plot(time, y[0]["data"], y[1]["data"], "r")
axis_y, = ax.plot(time, y[0]["data"], y[1]["data"], "g")
axis_z, = ax.plot(time, y[0]["data"], y[1]["data"], "b")
single, = ax.plot(time, y[0]["data"], y[1]["data"], "k-o", lw=5, zs = y[2]["data"])
ax.set_autoscale_on(True)
leg = ["x", "y", "z", "sum"]
plt.legend(leg)
plt.ion()
plt.show()
v_min = 100000
v_max = -100000
skip = 0
while True:
line = s.readline()
data = line.split(";")
# print data
for line in y:
val = 0
try:
tmp = data[line["index"]]
val = float(tmp)
except:
print "Err"
line["data"][index] = val
if val > v_max:
v_max = val
if val < v_min:
v_min = val
'''draw'''
if skip % 20 == 0:
single.set_xdata(y[0]["data"])
single.set_ydata(y[1]["data"])
single.set_3d_properties(zs = y[2]["data"])
x_data = [0, y[0]["data"][1]]
y_data = [0, 0]
axis_x.set_xdata(x_data)
axis_x.set_ydata(y_data)
axis_x.set_3d_properties(zs = [0,0])
x_data = [y[0]["data"][1], y[0]["data"][1]]
y_data = [0, y[1]["data"][1]]
axis_y.set_xdata(x_data)
axis_y.set_ydata(y_data)
axis_y.set_3d_properties(zs = [0,0])
x_data = [y[0]["data"][1], y[0]["data"][1]]
y_data = [y[1]["data"][1], y[1]["data"][1]]
axis_z.set_xdata(x_data)
axis_z.set_ydata(y_data)
axis_z.set_3d_properties(zs = y[2]["data"])
ax.set_ylim([-v_max, v_max])
ax.set_xlim([-v_max, v_max])
ax.set_zlim([-v_max, v_max])
ax.figure.canvas.draw()
skip += 1
# index = 1
| gpl-2.0 |
frank-tancf/scikit-learn | sklearn/utils/tests/test_seq_dataset.py | 47 | 2486 | # Author: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.seq_dataset import ArrayDataset, CSRDataset
from sklearn.datasets import load_iris
from numpy.testing import assert_array_equal
from nose.tools import assert_equal
iris = load_iris()
X = iris.data.astype(np.float64)
y = iris.target.astype(np.float64)
X_csr = sp.csr_matrix(X)
sample_weight = np.arange(y.size, dtype=np.float64)
def assert_csr_equal(X, Y):
X.eliminate_zeros()
Y.eliminate_zeros()
assert_equal(X.shape[0], Y.shape[0])
assert_equal(X.shape[1], Y.shape[1])
assert_array_equal(X.data, Y.data)
assert_array_equal(X.indices, Y.indices)
assert_array_equal(X.indptr, Y.indptr)
def test_seq_dataset():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
for dataset in (dataset1, dataset2):
for i in range(5):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
def test_seq_dataset_shuffle():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
# not shuffled
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, i)
assert_equal(idx2, i)
for i in range(5):
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
seed = 77
dataset1._shuffle_py(seed)
dataset2._shuffle_py(seed)
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, idx2)
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
| bsd-3-clause |
CleverInsight/sparx | setup.py | 1 | 1324 | import os
from setuptools import setup, find_packages
from sparx import __version__
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
requirements = [
"pandas",
"numpy",
"scipy",
"scikit-learn"
]
setup(
name = "sparx",
version = "0.0.1",
description = "Sparx is an advanced and simplified data munging, wrangling and preparation library",
long_description = read('README.rst'),
url = 'http://cleverinsight.co',
license = 'BSD',
author = 'Bastin Robins J',
author_email = '[email protected]',
packages = find_packages(exclude=['tests']),
include_package_data = True,
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: Other/Proprietary License',
'Operating System :: OS Independent',
"Natural Language :: English",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5"
],
install_requires = requirements,
tests_require = [],
)
| bsd-2-clause |
lbdreyer/iris | lib/iris/tests/test_mapping.py | 3 | 7620 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Tests map creation.
"""
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import numpy as np
import numpy.testing as np_testing
import cartopy.crs as ccrs
import iris
import iris.coord_systems
import iris.cube
import iris.tests.stock
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
import iris.plot as iplt
# A specific cartopy Globe matching the iris RotatedGeogCS default.
_DEFAULT_GLOBE = ccrs.Globe(
semimajor_axis=6371229.0, semiminor_axis=6371229.0, ellipse=None
)
@tests.skip_plot
@tests.skip_data
class TestBasic(tests.GraphicsTest):
def setUp(self):
super().setUp()
self.cube = iris.tests.stock.realistic_4d()
def test_contourf(self):
cube = self.cube[0, 0]
iplt.contourf(cube)
self.check_graphic()
def test_pcolor(self):
cube = self.cube[0, 0]
iplt.pcolor(cube)
self.check_graphic()
def test_unmappable(self):
cube = self.cube[0, 0]
cube.coord("grid_longitude").standard_name = None
iplt.contourf(cube)
self.check_graphic()
def test_default_projection_and_extent(self):
self.assertEqual(
iplt.default_projection(self.cube),
ccrs.RotatedPole(357.5 - 180, 37.5, globe=_DEFAULT_GLOBE),
)
np_testing.assert_array_almost_equal(
iplt.default_projection_extent(self.cube),
(3.59579163e02, 3.59669159e02, -1.28250003e-01, -3.82499993e-02),
decimal=3,
)
@tests.skip_data
@tests.skip_plot
class TestUnmappable(tests.GraphicsTest):
def setUp(self):
super().setUp()
src_cube = iris.tests.stock.global_pp()
# Make a cube that can't be located on the globe.
cube = iris.cube.Cube(src_cube.data)
cube.add_dim_coord(
iris.coords.DimCoord(
np.arange(96, dtype=np.float32) * 100, long_name="x", units="m"
),
1,
)
cube.add_dim_coord(
iris.coords.DimCoord(
np.arange(73, dtype=np.float32) * 100, long_name="y", units="m"
),
0,
)
cube.standard_name = "air_temperature"
cube.units = "K"
self.cube = cube
def test_simple(self):
iplt.contourf(self.cube, coords=["y", "x"])
self.check_graphic()
@tests.skip_data
@tests.skip_plot
class TestMappingSubRegion(tests.GraphicsTest):
def setUp(self):
super().setUp()
cube_path = tests.get_data_path(
("PP", "aPProt1", "rotatedMHtimecube.pp")
)
cube = iris.load_cube(cube_path)[0]
# make the data smaller to speed things up.
self.cube = cube[::10, ::10]
def test_simple(self):
# First sub-plot
plt.subplot(221)
plt.title("Default")
iplt.contourf(self.cube)
plt.gca().coastlines("110m")
# Second sub-plot
plt.subplot(222, projection=ccrs.Mollweide(central_longitude=120))
plt.title("Molleweide")
iplt.contourf(self.cube)
plt.gca().coastlines("110m")
# Third sub-plot (the projection part is redundant, but a useful
# test none-the-less)
ax = plt.subplot(223, projection=iplt.default_projection(self.cube))
plt.title("Native")
iplt.contour(self.cube)
ax.coastlines("110m")
# Fourth sub-plot
ax = plt.subplot(2, 2, 4, projection=ccrs.PlateCarree())
plt.title("PlateCarree")
iplt.contourf(self.cube)
ax.coastlines("110m")
self.check_graphic()
def test_default_projection_and_extent(self):
self.assertEqual(
iplt.default_projection(self.cube),
ccrs.RotatedPole(357.5 - 180, 37.5, globe=_DEFAULT_GLOBE),
)
np_testing.assert_array_almost_equal(
iplt.default_projection_extent(self.cube),
(313.01998901, 391.11999512, -22.48999977, 24.80999947),
)
@tests.skip_data
@tests.skip_plot
class TestLowLevel(tests.GraphicsTest):
def setUp(self):
super().setUp()
self.cube = iris.tests.stock.global_pp()
self.few = 4
self.few_levels = list(range(280, 300, 5))
self.many_levels = np.linspace(
self.cube.data.min(), self.cube.data.max(), 40
)
def test_simple(self):
iplt.contour(self.cube)
self.check_graphic()
def test_params(self):
iplt.contourf(self.cube, self.few)
self.check_graphic()
iplt.contourf(self.cube, self.few_levels)
self.check_graphic()
iplt.contourf(self.cube, self.many_levels)
self.check_graphic()
def test_keywords(self):
iplt.contourf(self.cube, levels=self.few_levels)
self.check_graphic()
iplt.contourf(self.cube, levels=self.many_levels, alpha=0.5)
self.check_graphic()
@tests.skip_data
@tests.skip_plot
class TestBoundedCube(tests.GraphicsTest):
def setUp(self):
super().setUp()
self.cube = iris.tests.stock.global_pp()
# Add some bounds to this data (this will actually make the bounds
# invalid as they will straddle the north pole and overlap on the
# dateline, but that doesn't matter for this test.)
self.cube.coord("latitude").guess_bounds()
self.cube.coord("longitude").guess_bounds()
def test_pcolormesh(self):
# pcolormesh can only be drawn in native coordinates (or more
# specifically, in coordinates that don't wrap).
plt.axes(projection=ccrs.PlateCarree(central_longitude=180))
iplt.pcolormesh(self.cube)
self.check_graphic()
def test_grid(self):
iplt.outline(self.cube)
self.check_graphic()
def test_default_projection_and_extent(self):
self.assertEqual(
iplt.default_projection(self.cube), ccrs.PlateCarree()
)
np_testing.assert_array_almost_equal(
iplt.default_projection_extent(self.cube),
[0.0, 360.0, -89.99995422, 89.99998474],
)
np_testing.assert_array_almost_equal(
iplt.default_projection_extent(
self.cube, mode=iris.coords.BOUND_MODE
),
[-1.875046, 358.124954, -90, 90],
)
@tests.skip_data
@tests.skip_plot
class TestLimitedAreaCube(tests.GraphicsTest):
def setUp(self):
super().setUp()
cube_path = tests.get_data_path(("PP", "aPProt1", "rotated.pp"))
self.cube = iris.load_cube(cube_path)[::20, ::20]
self.cube.coord("grid_latitude").guess_bounds()
self.cube.coord("grid_longitude").guess_bounds()
def test_pcolormesh(self):
iplt.pcolormesh(self.cube)
self.check_graphic()
def test_grid(self):
iplt.pcolormesh(self.cube, facecolors="none", edgecolors="blue")
# the result is a graphic which has coloured edges. This is a mpl bug,
# see https://github.com/matplotlib/matplotlib/issues/1302
self.check_graphic()
def test_outline(self):
iplt.outline(self.cube)
self.check_graphic()
def test_scatter(self):
iplt.points(self.cube)
plt.gca().coastlines("110m")
self.check_graphic()
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
dinos66/CommunityImageRetrieval | mainStaticPersonTask.py | 1 | 4634 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name:
# Purpose: This .py file is the main Framewok file
#
# Required libs: python-dateutil, numpy,matplotlib,pyparsing
# Author: konkonst
#
# Created: 20/08/2013
# Copyright: (c) ITI (CERTH) 2013
# Licence: <apache licence 2.0>
#-------------------------------------------------------------------------------
import time,os,pickle,glob,shutil
from staticCommPersonTask import communitystatic
print('staticCommPersonCentered')
print(time.asctime( time.localtime(time.time()) ))
'''PARAMETERS'''
#Construct the data class from scratch: 1-yes / 2- from the community detection/ else-perform only the ranking
dataextract = 1
#Community detection method. 'Ahn','Demon' and 'Copra' for overlapping and 'Louvain' for non. Ahn carries a threshold.
commDetectMethod = ['Demon', 0.66]
#User sets desired number of displayed top images
topImages = 8
#User sets desired number of most frequent people to retrieve
topPeople = 200
#Provide people set or leave empty to retrieve images for the number of topPeople as set above
peopleSet = ['justin_timberlake','oprah_winfrey','lady_gaga','justin_bieber','michael_schumacher','miley_cyrus','jk_rowling','zinedine_zidane','barack_obama','prince_william','brad_pitt_actor','leonardo_dicaprio','natalie_portman']
peopleSet.sort()
peopleSet = [] #Uncomment this to activate the use of the rankedPeople.txt pool of users
#Delete all previous folders containing results? (Does not apply to the html files)
delFolders = 0
filename = glob.glob("./data/txt/*.txt")
filename = [x for x in filename if x[11:].startswith('noDups')]
for idx,files in enumerate(filename):
print(str(idx+1) + '.' + files[11:-4])
selection = int(input('Select a dataset from the above: '))-1
dataset_path_results = "./data/GETTY_"+filename[selection][24:-4]+"/staticPersonCentered_"+commDetectMethod[0]+"/results/"
dataset_path_tmp = "./data/GETTY_"+filename[selection][24:-4]+"/staticPersonCentered_"+commDetectMethod[0]+"/tmp/"
if not os.path.exists(dataset_path_results+"rankedPeople.txt"):
print('You need to run the personPopularity.py first. Look into that...')
exit()
'''Functions'''
t = time.time()
if dataextract==1:#Start from scratch
data = communitystatic.from_txt(filename[selection],dataset_path_results,dataset_path_tmp)
dataPck = open(dataset_path_tmp + "allPersondata.pck", "wb")
pickle.dump(data, dataPck , protocol = 2)
dataPck.close()
del(data)
elapsed = time.time() - t
print('Stage 1: %.2f seconds' % elapsed)
if dataextract==1 or dataextract==2:#If the basic data (authors, mentions, time) has been created
data = pickle.load(open(dataset_path_tmp + "allPersondata.pck", "rb"))
captiondict = data.captiondict
print('static Community detection method selected is :'+commDetectMethod[0])
dataStatic=data.extraction(commDetectMethod)
del(data)
elapsed = time.time() - t
print('\nStage 2: %.2f seconds' % elapsed)
decisionforAll = input('\nRetrieve the topImages by screening them one by one???(y or n) ')
if dataextract ==1 or dataextract ==2 or dataextract ==3:#Only ranking beyond this point
data = pickle.load(open(dataset_path_tmp + "allPersondata.pck", "rb"))
captiondict = data.captiondict
del(data)
dataStatic = pickle.load(open(dataset_path_tmp + 'comm_'+commDetectMethod[0]+'.pck','rb'))
#delete folders if you're starting from scratch
if delFolders == 1:
result_files = glob.glob(dataset_path_results+'/analysis/*.txt')
if result_files:
for file in result_files:
os.remove(file)
if not peopleSet:
with open(dataset_path_results+'rankedPeople.txt','r') as f:
for lineId,line in enumerate(f):
if lineId>topPeople-1:
break
line = line.split('\t')
peopleSet.append(line[0])
for person in peopleSet:
if decisionforAll != str('n') and not os.path.exists(dataset_path_results+'html/'+person):
os.makedirs(dataset_path_results+'html/'+person)
if decisionforAll != str('n'):
personDecision = input('\nRetrieve images for '+person+'???(y or n) ')
if decisionforAll == str('n'):
print("\nRetrieval Commences for "+person)
if decisionforAll == str('n') or personDecision == str('y'):
dataStatic.photoRetrieval(topImages, person, captiondict,decisionforAll)
dataStatic.popularity_coappearence(topImages, person, captiondict)
elapsed = time.time() - t
print('\nStage 3: %.2f seconds' % elapsed)
| apache-2.0 |
trhongbinwang/data_science_journey | deep_learning/tensorflow/tutorials/tutorial2/04_logistic_regression.py | 1 | 5332 | """Simple tutorial using code from the TensorFlow example for Regression.
Parag K. Mital, Jan. 2016"""
# pip3 install --upgrade
# https://storage.googleapis.com/tensorflow/mac/tensorflow-0.6.0-py3-none-any.whl
# %%
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
import numpy as np
import matplotlib.pyplot as plt
# hyperparameters
n_input = 784
n_output = 10
def load_data():
# %%
# get the classic mnist dataset
# one-hot means a sparse vector for every observation where only
# the class label is 1, and every other class is 0.
# more info here:
# https://www.tensorflow.org/versions/0.6.0/tutorials/mnist/download/index.html#dataset-object
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
# %%
# mnist is now a DataSet with accessors for:
# 'train', 'test', and 'validation'.
# within each, we can access:
# images, labels, and num_examples
print(mnist.train.num_examples,
mnist.test.num_examples,
mnist.validation.num_examples)
# %% the images are stored as:
# n_observations x n_features tensor (n-dim array)
# the labels are stored as n_observations x n_labels,
# where each observation is a one-hot vector.
print(mnist.train.images.shape, mnist.train.labels.shape)
# %% the range of the values of the images is from 0-1
print(np.min(mnist.train.images), np.max(mnist.train.images))
# %% we can visualize any one of the images by reshaping it to a 28x28 image
plt.imshow(np.reshape(mnist.train.images[100, :], (28, 28)), cmap='gray')
return mnist
def inputs_placeholder():
# %% We can create a container for an input image using tensorflow's graph:
# We allow the first dimension to be None, since this will eventually
# represent our mini-batches, or how many images we feed into a network
# at a time during training/validation/testing.
# The second dimension is the number of features that the image has.
net_input = tf.placeholder(tf.float32, [None, n_input])
# %% We'll create a placeholder for the true output of the network
y_true = tf.placeholder(tf.float32, [None, 10])
return [net_input, y_true]
def model(net_input, y_true):
# %% We can write a simple regression (y = W*x + b) as:
W = tf.Variable(tf.zeros([n_input, n_output]))
b = tf.Variable(tf.zeros([n_output]))
net_output = tf.nn.softmax(tf.matmul(net_input, W) + b)
# %% And then write our loss function:
cross_entropy = -tf.reduce_sum(y_true * tf.log(net_output))
# %% This would equate each label in our one-hot vector between the
# prediction and actual using the argmax as the predicted label
correct_prediction = tf.equal(
tf.argmax(net_output, 1), tf.argmax(y_true, 1))
# %% And now we can look at the mean of our network's correct guesses
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# %% We can tell the tensorflow graph to train w/ gradient descent using
# our loss function and an input learning rate
optimizer = tf.train.GradientDescentOptimizer(
0.01).minimize(cross_entropy)
return [optimizer, accuracy]
def train(mnist, net_input, y_true, optimizer, accuracy):
'''
data: mnist
graph: (inputs) net_input, y_true, (outputs) optimizer, accuracy
'''
# %% We now create a new session to actually perform the initialization the
# variables:
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# %% Now actually do some training:
batch_size = 100
n_epochs = 10
for epoch_i in range(n_epochs):
for batch_i in range(mnist.train.num_examples // batch_size):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={
net_input: batch_xs,
y_true: batch_ys
})
print(sess.run(accuracy,
feed_dict={
net_input: mnist.validation.images,
y_true: mnist.validation.labels
}))
# %% Print final test accuracy:
print(sess.run(accuracy,
feed_dict={
net_input: mnist.test.images,
y_true: mnist.test.labels
}))
# %%
if __name__ == '__main__':
''' '''
# load data
mnist = load_data()
# inputs
[net_input, y_true] = inputs_placeholder()
# model
[optimizer, accuracy] = model(net_input, y_true)
# train
train(mnist, net_input, y_true, optimizer, accuracy)
"""
# We could do the same thing w/ Keras like so:
from keras.models import Sequential
model = Sequential()
from keras.layers.core import Dense, Activation
model.add(Dense(output_dim=10, input_dim=784, init='zero'))
model.add(Activation("softmax"))
from keras.optimizers import SGD
model.compile(loss='categorical_crossentropy',
optimizer=SGD(lr=learning_rate))
model.fit(mnist.train.images, mnist.train.labels, nb_epoch=n_epochs,
batch_size=batch_size, show_accuracy=True)
objective_score = model.evaluate(mnist.test.images, mnist.test.labels,
batch_size=100, show_accuracy=True)
"""
| apache-2.0 |
lioritan/Thesis | problems/bruteforce_v2.py | 1 | 22198 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 13 14:08:19 2014
@author: liorf
"""
from numpy import *
from matplotlib.mlab import find
from scipy.stats import mode
import time
def entropy(tags): #this is 0 if all same tag, 1 if uniform, lower=better
'''computes entropy on tags. Assumes binary 0-1 tagging only.(not +-1 !!!)
entropy= sum(-f*log2(f)) where f are the frequencies of each value'''
freqs = bincount(tags)/(1.0*len(tags))
nonzeros= find(freqs !=0)
if size(nonzeros)<= 1:
return 0.0 #edge case
tmp = freqs[nonzeros]
return sum(-tmp*log2(tmp))
#TODO: info_gain_ratio for non binary features and possibly need to do AIC/BIC ?
def info_gain(curr_node_tags, feature_values): #0 if same divide, 1 if perfect
'''computes simple info-gain for a split. '''
curr_ent = entropy(curr_node_tags) #current entropy H(T)
#sum over all values: #elements with this value/#total elements * entropy(elements with this value)
cond_ent = 0.0
total_elem_sz = 1.0*len(curr_node_tags)
for value in set(feature_values):
locs= find(feature_values == value)
value_prob = len(locs)/total_elem_sz
cond_ent += value_prob*entropy(curr_node_tags[locs])
return curr_ent- cond_ent
def is_set_valued(relation,relname):
return relname.startswith('reverse_') or relname=='type' or relname=='possible_cure' or (relname.startswith('drug_') and not relname=='drug_moiety')
return relname.startswith('reverse_') or relname=='type' #yago
return isinstance(relation.values()[0], list) #general, slow
def is_in_relation(x, relation,relname, *args):
'''args[0]=optional target
x is a single object. this works fine with [] as param'''
res=[]
flag= is_set_valued(relation,relname)
if flag is False:
for y in x:
bob = relation.get(y)
if bob is not None:
res+=[bob]
else: #relation is reversed
for y in x:
res+= relation.get(y, [])
if len(args)==0:
return res #list of strings
return args[0] in res
def apply_transforms(relations, transforms, objects):
'''transforms is list of relation+direction pairs.
objects is set of objects(set of sets'''
curr_objs=objects
for relation in transforms:
curr_objs= [is_in_relation(obj, relations[relation], relation) for obj in curr_objs]
return curr_objs
def split_and_subtree(query_chosen, recursive_step_obj):
query_results=array([query_chosen(x) for x in recursive_step_obj.objects])
pos_inds=find(query_results==1)
neg_inds=find(query_results!=1)
recursive_step_obj.left_son= TreeRecursiveSRLStep(recursive_step_obj.objects[neg_inds], recursive_step_obj.tagging[neg_inds], recursive_step_obj.relations, recursive_step_obj.transforms, recursive_step_obj.n, recursive_step_obj.MAX_DEPTH, recursive_step_obj.SPLIT_THRESH, recursive_step_obj.cond)
recursive_step_obj.right_son=TreeRecursiveSRLStep(recursive_step_obj.objects[pos_inds], recursive_step_obj.tagging[pos_inds], recursive_step_obj.relations, recursive_step_obj.transforms, recursive_step_obj.n, recursive_step_obj.MAX_DEPTH, recursive_step_obj.SPLIT_THRESH, recursive_step_obj.cond)
return query_chosen,recursive_step_obj.left_son,recursive_step_obj.right_son
def generate_relational_features(objects, relations, max_depth, n):
'''create all of the features! 2011 paper? 2012 paper? ESA?
pass
#MAX_DEPTH=2 #TODO!
#SPLIT_THRESH=3 #TODO!
MAX_SIZE= 5000 #TODO: change this in future(needed to make it run fast)
IGTHRESH=0.05
#BAD_RELATION=False
class TreeRecursiveSRLStep(object):
def __init__(self, objects, tagging, relations, steps_to_curr, n, MAX_DEPTH, SPLIT_THRESH,cond=False):
self.relations= relations
self.objects =array(objects)
self.tagging=tagging
if len(objects) > 0:
self.chosen_tag= mode(tagging)[0]
else:
self.chosen_query=None
self.justify='no objects'
self.chosen_tag=None
self.transforms= steps_to_curr
self.ig = -1.0
self.chosen_query=None
self.cond=cond
self.n=n
self.MAX_DEPTH=MAX_DEPTH
self.SPLIT_THRESH=SPLIT_THRESH
def pick_split_query(self):
'''pick one query(if simple query on objects give high IG, do that, otherwise go recursive and build tree as query'''
all_words=set()
for words in self.objects:
for word in words:
all_words.add(word)
max_ig,best_word=-1.0,''
for word in all_words:
word_ig= info_gain(self.tagging, array([1 if (word in obj) else 0 for obj in self.objects]))
if word_ig>=max_ig:
max_ig,best_word=word_ig,word
self.chosen_query, self.ig, self.justify=lambda x: 1 if (best_word in x) else 0, max_ig, 'hasword:'+best_word
if self.cond is True:
return split_and_subtree(self.chosen_query, self)
#Build relation-based features(super table) for objects, see if any query good enough
best_ig, transforms_used, constant= self.ig,[],''
depth=0
transforms=[([],self.n)]
while depth<self.MAX_DEPTH+1:
pagu=[]
new_transforms=[]
#print len(base_vals)
#print depth
for base_transforms,n in transforms:
for relation in self.relations:
if len(base_transforms)>0 and (relation==base_transforms[-1] or relation=='reverse_'+base_transforms[-1] or relation==base_transforms[-1].replace('reverse_','')):
continue #no using the relation you came with on the way back...
feature_vals= apply_transforms(self.relations, base_transforms+[relation],self.objects)
if sum(map(len, feature_vals))==0 : #no objects have relevant values. This may leave us with objects whose feature values are [], which means any query will return false...
continue #not relevant
relation_constants= set()
for obj in feature_vals:
for const in obj:
relation_constants.add(const)
avg_for_rel=0.0
sz=len(relation_constants)
if sz>=MAX_SIZE:
continue #For now, skip.
relation_constants.add(None)
for const in relation_constants:
labels= None
if const is None:
labels= array([1 if len(val)==0 else 0 for val in feature_vals])
else:
labels= array([1 if const in val else 0 for val in feature_vals])
ig_for_const= info_gain(self.tagging,labels)
avg_for_rel+=ig_for_const
if ig_for_const>=best_ig and ig_for_const> IGTHRESH:
best_ig, transforms_used, constant= ig_for_const, base_transforms+[relation],const
new_transforms.append((relation, avg_for_rel/len(relation_constants)))
del feature_vals
del relation_constants
if len(new_transforms)==0:
break #no more recursion
#now we subsample
transforms_pos, avg_igs= zip(*new_transforms)
#transforms_pos, avg_igs= self.filter_bad_rels(transforms_pos, avg_igs)
if len(transforms_pos)==0:
break #no more recursion
avg_igs= array(avg_igs)
transforms_pos= array(transforms_pos, dtype=object)
choices=random.choice(transforms_pos, n, True, avg_igs/sum(avg_igs))
temp={}
for relation in choices:
if temp.has_key(relation):
temp[relation]+=1
continue
temp[relation]=1
for (a,b) in temp.items():
pagu.append((base_transforms+[a],b))
#print transforms
transforms= pagu
depth+=1
#print best_ig, self.ig
if best_ig >= self.ig:
if constant is None:
self.chosen_query= lambda x: 1 if len(apply_transforms(self.relations, transforms_used, [x])[0])==0 else 0
else:
self.chosen_query= lambda x: apply_transforms(self.relations, transforms_used, [x], constant)[0]
self.ig, self.justify= best_ig, str(transforms_used)+' '+str(constant)
if self.ig <= 0: #no query is useful
self.chosen_query=None
print 'big problem'
return None,None,None
return split_and_subtree(self.chosen_query, self)
def filter_bad_rels(self, relations, value_things):
#filter transforms+non-relevant since doesn't apply
#relations-relations I consider moving to
new_rel_fet=[]
new_avg_ig=[]
for i,relation in enumerate(relations):
if relation in self.transforms or 'reverse_'+relation in self.transforms or relation.replace('reverse_','') in self.transforms:
continue
if value_things[i]<=0.0:
continue #ig is 0->no point
barf=False
new_objs=apply_transforms(self.relations, [relation], self.objects)
if sum([len(obj) for obj in new_objs])==0:#all objects are []
continue
for other_rel in self.relations.keys():
if other_rel==relation or other_rel=='reverse_'+relation or other_rel==relation.replace('reverse_',''):
continue
feature_vals=[is_in_relation(obj, self.relations[other_rel]) for obj in new_objs]
val_lens=[len(val) for val in feature_vals]
if sum(val_lens)>0 :
barf=True
break
if barf:
new_rel_fet.append(relation)
new_avg_ig.append(value_things[i])
return new_rel_fet, array(new_avg_ig)
class TreeRecursiveSRLClassifier(object):
def __init__(self, objects, tagging, relations, transforms, n, MAX_DEPTH, SPLIT_THRESH, cond=False):
self.relations= relations
self.objects =objects
self.tagging=tagging
self.transforms=transforms
self.cond=cond
self.n=n
self.MAX_DEPTH=MAX_DEPTH
self.SPLIT_THRESH=SPLIT_THRESH
def train(self):
self.tree_sets= [TreeRecursiveSRLStep(self.objects, self.tagging, self.relations, self.transforms, self.n, self.MAX_DEPTH, self.SPLIT_THRESH, self.cond)] #initally all in same node
for node in self.tree_sets:
if len(node.objects)<=self.SPLIT_THRESH or all(node.tagging==1) or all(node.tagging==0):#consistent/too small to split
node.justify='leafed(thresh/constistant)'
node.chosen_query=None
continue #leaf
_,left, right=node.pick_split_query()
if left is None or right is None:
node.chosen_query=None
continue#another leaf case...
self.tree_sets.append(left)
self.tree_sets.append(right)
self.query_tree=self.tree_sets[0] #root
def predict(self, new_object):
curr_node= self.query_tree
if curr_node.chosen_tag is None:#edge case in the case of consistent
return 0#some arbitrary rule
while curr_node.chosen_query is not None:
if curr_node.right_son.chosen_tag is None: #query splits all to one side
curr_node=curr_node.left_son
continue
if curr_node.left_son.chosen_tag is None: #other side
curr_node=curr_node.right_son
continue
transformed_obj= apply_transforms(curr_node.relations, curr_node.transforms, [new_object])
query_val= curr_node.chosen_query(transformed_obj[0]) #this works
if query_val==1:
curr_node=curr_node.right_son
else:
curr_node=curr_node.left_son
return int(curr_node.chosen_tag)
if __name__=='__main__':
#Toy example for debugging
messages=['cocoa price increase in 1964 because of cuban_missile_crisis',
'cocoa kill person according to research from france university',
'rice price worldwide in constant decrease due to export increase from china since 1990',
'pineapple cake serve in oslo_peace_conference',
'apple is not actual forbidden fruit scientist say actually pear instead',
'20 person dead 40 injure in earthquake in turkey',
'u.s. is no longer largest consumer of goods according to survey',
'potato consumption in u.s. increase due new potato craze',
'edward_snoden_leak put nsa in bad spot president barrack_obama to give statement tomorrow',
'dog not allergic to cocoa according to new study from mit',
'ireland_potato_famine memorial day riot cause 4 dead', #hard one since potato is america but still stuff. Mby a noisy example?
'wheat and cucumber consumption on worldwide decline except in u.s.',
'new corn based recipe will rock your word',
'broccoli vote funny word of year read more inside',
'new president of mexico allergic to avocado cannot eat guacamole',
'india origin of moussaka eggplant import to turkey from india',
'oslo_peace_conference best thing ever',
'10 year since oslo_peace_conference what change',
'cuban_missile_crisis cause rise in potato price',
'paris celebrate memorial french_revolution with cake',
'orange most cultivate fruit in world according to recent survey',
'sweet_potato diet increase in popularity due to celebrity endorsement',
'cat allergic to pepper according to new study from mit',
'ginger cost skyrocket due to u.s. sushi craze in los_angeles',
'bible forbid sweet_potato according to rabi from israel',
'2016_olympics possible not take place in brazil but in mexico',
'canada_squash soup recipe popular in u.s.'
] #messages on fruits/veggies that originally from america is concept. have some fruit, some america, some both, some neither
msg_objs=[a.split(' ') for a in messages]
message_labels = (array([1,1,-1,1,-1,-1,-1,1,-1,1,1,-1,1,-1,1,-1,-1,
-1,1,-1,-1,1,1,-1,1,-1,1])+1)/2
test_msgs= ['potato and tomato sound the same and also come from same continent list of 10 things from the new world which surprise',
'2014_israel_president_election soon 6 candidate for title',
'eggplant soup popular in asia',
'pumpkin cost worldwide increase 40 percent during halloween',
'tomato favourite fruit of italy',
'massive news coverage of 2016_olympics expect due to location',
'rice has medical quality according to mit research',
'pumpkin may color urine if consume in large quantity',
'religious riot in the_netherlands',
'cocoa ban in china lifted']#this test set is too hard. pumpkin is impossible, and cocoa_ban is kind of also impossible
test=[a.split(' ') for a in test_msgs]
test_lbl= (array([1,-1,-1,1,1,-1,-1,1,-1,1])+1)/2
vld_msgs=['rome less visit than vatican_city according to census data',
'why the french_revolution help shape the world today',
'sweet_potato famine suspect in ireland connection to ireland_potato_famine suspect',
'doctor treat cancer with salad claim pepper and tomato have medicinal effects',
'russia annex crimea_peninsula president vladimir_putin to make statement',
'fish cost worldwide increase due to over-fishing',
'cocoa flavor orange tree develop in mit',
'pineapple goes well with avocado according to flavor specialist',
'orange orange in the_netherlands',
'corn voted most corny new world food']
vld=[a.split(' ') for a in vld_msgs]
vld_lbls=(array([-1,-1,1,1,-1,-1,1,1,-1,1])+1)/2
relations={}
# relations['type']={'potato':'vegetable', 'cuban_missile_crisis':'event', 'cocoa':'fruit', 'france':'country', 'rice':'cereal', 'china':'country', 'pineapple':'fruit', 'oslo_peace_conference':'event'
# , 'apple':'fruit', 'pear':'fruit', 'turkey':'country', 'u.s.':'country', 'edward_snoden_leak':'event', 'nsa':'organization', 'obama':'person', 'dog':'animal', 'mit':'university',
# 'ireland_potato_famine':'event', 'wheat':'cereal', 'cucumber':'vegetable', 'chile':'country', 'cuba':'country', 'venezuela':'country', 'brazil':'country', 'norway':'country',
# 'italy':'country', 'syria':'country', 'india':'country', 'norway':'country', 'ireland':'country', 'north_america':'continent', 'south_america':'continent', 'europe':'continent',
# 'asia':'continent', 'tomato':'fruit', '2014_israel_president_election':'event', 'israel':'country', 'mexico':'country'}
relations['country_of_origin']={'potato':'chile', 'cocoa':'venezuela', 'rice':'china', 'pineapple':'brazil', 'apple':'turkey', 'pear':'italy', 'wheat':'syria', 'cucumber':'india', 'tomato':'mexico',
'broccoli':'italy', 'corn':'mexico', 'avocado':'mexico', 'eggplant':'india', 'orange':'china', 'sweet_potato':'peru','pumpkin':'u.s.','pepper':'mexico','ginger':'china', 'canada_squash':'canada',
'blarf':'ggg','nof':'fluff','poo':'goffof','fgfgfgfg':'gggg','a':'b', 'r':'f','t':'t'}#applys to fruits/vegs/crops
relations['continent']={'cuba':'south_america', 'france':'europe', 'china':'asia', 'norway':'europe', 'turkey':'asia', 'u.s.':'north_america',
'chile':'south_america', 'venezuela':'south_america', 'brazil':'south_america', 'italy':'europe', 'ireland':'europe', 'syria':'asia', 'india':'asia',
'mexico':'south_america', 'israel':'asia', 'vatican':'europe','russia':'asia', 'peru':'south_america', 'canada':'north_america',
'f':'g','b':'c','ggg':'fff','fluff':'t','t':'t','d':'d'}#apply to country
relations['capital_of']={'paris':'france', 'washington_dc':'u.s.','china':'beijing','mexico':'mexico_city','brazil':'brasilia','cuba':'havana','norway':'oslo','turkey':'ankara','chile':'santiago','venezuela':'caracas','italy':'rome','vatican':'vatican_city','ireland':'dublin','syria':'damascus','india':'new_delhi', 'russia':'muscow',
'f':'f','r':'r','d':'d','q':'p','fff':'ffg'}
relations['city_of']={'paris':'france','los_angeles':'u.s.', 'washington_dc':'u.s.','china':'beijing','mexico':'mexico_city','brazil':'brasilia','cuba':'havana','norway':'oslo','turkey':'ankara','chile':'santiago','venezuela':'caracas','italy':'rome','vatican':'vatican_city','ireland':'dublin','syria':'damascus','india':'new_delhi', 'russia':'muscow',
'f':'f','t':'t','q':'q','p':'p'}
relations['president_of']={'vladimir_putin':'russia','barrack_obama':'u.s.',
'q':'f', 'r':'r', 'f':'f','b':'c','t':'t','d':'d'}
#relations['calorie_content_kcal']={'tomato':18, 'potato':77, 'rice':365, 'pineapple':50, 'apple':52, 'pear':57, 'wheat':327, 'cucumber':16}#apply to fruit/vegetable, numeric. missing for cocoa
relations['happend_in_place']={'cuban_missile_crisis':'cuba', 'oslo_peace_conference':'norway', 'edward_snoden_leak':'u.s.', 'ireland_potato_famine':'ireland', '2014_israel_president_election':'israel','french_revolution':'france','2016_olympics':'brazil', 'cocoa_ban':'china',
'fu':'f','r':'r','b':'b','c':'c','d':'d'}#apply to event(cuba missile crisis)
#relations['happend_on_date']={'cuban_missile_crisis':1962, 'oslo_peace_conference':1993, 'edward_snoden_leak':2013, 'ireland_potato_famine':1845, '2014_israel_president_election':2014} #apply to event, numeric
for key in relations.keys():
new_key= 'reverse_'+key
relations[new_key]= {}
for (a,b) in relations[key].items():
if relations[new_key].has_key(b):
relations[new_key][b].append(a)
continue
relations[new_key][b]= [a]
#now for actual stuff:
# SPLIT_THRESH=4
# blah1=TreeRecursiveSRLClassifier(msg_objs, message_labels, relations, [], True)#no recursive!
# before=time.time()
# blah1.train()
# print time.time()-before
# pred1trn=array([blah1.predict(x) for x in msg_objs])
# print mean(pred1trn!=message_labels)
# pred1tst=array([blah1.predict(x) for x in test])
# print mean(pred1tst!=test_lbl)
# MAX_DEPTH=0
# blah2=TreeRecursiveSRLClassifier(msg_objs, message_labels, relations, [])#no recursive but has relation usage...
# before=time.time()
# blah2.train()
# print time.time()-before
# pred2trn=array([blah2.predict(x) for x in msg_objs])
# print mean(pred2trn!=message_labels)
# pred2tst=array([blah2.predict(x) for x in test])
# print mean(pred2tst!=test_lbl)
blah3=TreeRecursiveSRLClassifier(msg_objs, message_labels, relations, [], 200, 2, 3)
before=time.time()
blah3.train()
print time.time()-before
pred3trn=array([blah3.predict(x) for x in msg_objs])
print mean(pred3trn!=message_labels)
pred3tst=array([blah3.predict(x) for x in test])
print mean(pred3tst!=test_lbl) | gpl-2.0 |
h2educ/scikit-learn | sklearn/feature_selection/tests/test_chi2.py | 221 | 2398 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
rvraghav93/scikit-learn | examples/applications/plot_prediction_latency.py | 13 | 11475 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
from sklearn.utils import shuffle
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[[i], :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
random_seed = 13
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=n_train, random_state=random_seed)
X_train, y_train = shuffle(X_train, y_train, random_state=random_seed)
X_scaler = StandardScaler()
X_train = X_scaler.fit_transform(X_train)
X_test = X_scaler.transform(X_test)
y_scaler = StandardScaler()
y_train = y_scaler.fit_transform(y_train[:, None])[:, 0]
y_test = y_scaler.transform(y_test[:, None])[:, 0]
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[[0]])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
# #############################################################################
# Main code
start_time = time.time()
# #############################################################################
# Benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
jlegendary/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/finance.py | 69 | 20558 | """
A collection of modules for collecting, analyzing and plotting
financial data. User contributions welcome!
"""
#from __future__ import division
import os, time, warnings
from urllib import urlopen
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
try: import datetime
except ImportError:
raise ImportError('The finance module requires datetime support (python2.3)')
import numpy as np
from matplotlib import verbose, get_configdir
from dates import date2num
from matplotlib.cbook import Bunch
from matplotlib.collections import LineCollection, PolyCollection
from matplotlib.colors import colorConverter
from lines import Line2D, TICKLEFT, TICKRIGHT
from patches import Rectangle
from matplotlib.transforms import Affine2D
configdir = get_configdir()
cachedir = os.path.join(configdir, 'finance.cache')
def parse_yahoo_historical(fh, asobject=False, adjusted=True):
"""
Parse the historical data in file handle fh from yahoo finance and return
results as a list of
d, open, close, high, low, volume
where d is a floating poing representation of date, as returned by date2num
if adjust=True, use adjusted prices
"""
results = []
lines = fh.readlines()
datefmt = None
for line in lines[1:]:
vals = line.split(',')
if len(vals)!=7: continue
datestr = vals[0]
if datefmt is None:
try:
datefmt = '%Y-%m-%d'
dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
except ValueError:
datefmt = '%d-%b-%y' # Old Yahoo--cached file?
dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
d = date2num(dt)
open, high, low, close = [float(val) for val in vals[1:5]]
volume = int(vals[5])
if adjusted:
aclose = float(vals[6])
m = aclose/close
open *= m
high *= m
low *= m
close = aclose
results.append((d, open, close, high, low, volume))
results.reverse()
if asobject:
if len(results)==0: return None
else:
date, open, close, high, low, volume = map(np.asarray, zip(*results))
return Bunch(date=date, open=open, close=close, high=high, low=low, volume=volume)
else:
return results
def fetch_historical_yahoo(ticker, date1, date2, cachename=None):
"""
Fetch historical data for ticker between date1 and date2. date1 and
date2 are datetime instances
Ex:
fh = fetch_historical_yahoo('^GSPC', d1, d2)
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
a file handle is returned
"""
ticker = ticker.upper()
d1 = (date1.month-1, date1.day, date1.year)
d2 = (date2.month-1, date2.day, date2.year)
urlFmt = 'http://table.finance.yahoo.com/table.csv?a=%d&b=%d&c=%d&d=%d&e=%d&f=%d&s=%s&y=0&g=d&ignore=.csv'
url = urlFmt % (d1[0], d1[1], d1[2],
d2[0], d2[1], d2[2], ticker)
if cachename is None:
cachename = os.path.join(cachedir, md5(url).hexdigest())
if os.path.exists(cachename):
fh = file(cachename)
verbose.report('Using cachefile %s for %s'%(cachename, ticker))
else:
if not os.path.isdir(cachedir): os.mkdir(cachedir)
fh = file(cachename, 'w')
fh.write(urlopen(url).read())
fh.close()
verbose.report('Saved %s data to cache file %s'%(ticker, cachename))
fh = file(cachename, 'r')
return fh
def quotes_historical_yahoo(ticker, date1, date2, asobject=False, adjusted=True, cachename=None):
"""
Get historical data for ticker between date1 and date2. date1 and
date2 are datetime instances
results are a list of tuples
(d, open, close, high, low, volume)
where d is a floating poing representation of date, as returned by date2num
if asobject is True, the return val is an object with attrs date,
open, close, high, low, volume, which are equal length arrays
if adjust=True, use adjusted prices
Ex:
sp = f.quotes_historical_yahoo('^GSPC', d1, d2, asobject=True, adjusted=True)
returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
[n,bins,patches] = hist(returns, 100)
mu = mean(returns)
sigma = std(returns)
x = normpdf(bins, mu, sigma)
plot(bins, x, color='red', lw=2)
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
"""
fh = fetch_historical_yahoo(ticker, date1, date2, cachename)
try: ret = parse_yahoo_historical(fh, asobject, adjusted)
except IOError, exc:
warnings.warn('urlopen() failure\n' + url + '\n' + exc.strerror[1])
return None
return ret
def plot_day_summary(ax, quotes, ticksize=3,
colorup='k', colordown='r',
):
"""
quotes is a list of (time, open, close, high, low, ...) tuples
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
time must be in float date format - see date2num
ax : an Axes instance to plot to
ticksize : open/close tick marker in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
lines = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open : color = colorup
else : color = colordown
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color=color,
antialiased=False, # no need to antialias vert lines
)
oline = Line2D(
xdata=(t, t), ydata=(open, open),
color=color,
antialiased=False,
marker=TICKLEFT,
markersize=ticksize,
)
cline = Line2D(
xdata=(t, t), ydata=(close, close),
color=color,
antialiased=False,
markersize=ticksize,
marker=TICKRIGHT)
lines.extend((vline, oline, cline))
ax.add_line(vline)
ax.add_line(oline)
ax.add_line(cline)
ax.autoscale_view()
return lines
def candlestick(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
quotes is a list of (time, open, close, high, low, ...) tuples.
As long as the first 5 elements of the tuples are these values,
the tuple can be as long as you want (eg it may store volume).
time must be in float days format - see date2num
Plot the time, open, close, high, low as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
ax : an Axes instance to plot to
width : fraction of a day for the rectangle width
colorup : the color of the rectangle where close >= open
colordown : the color of the rectangle where close < open
alpha : the rectangle alpha level
return value is lines, patches where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
OFFSET = width/2.0
lines = []
patches = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open :
color = colorup
lower = open
height = close-open
else :
color = colordown
lower = close
height = open-close
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color='k',
linewidth=0.5,
antialiased=True,
)
rect = Rectangle(
xy = (t-OFFSET, lower),
width = width,
height = height,
facecolor = color,
edgecolor = color,
)
rect.set_alpha(alpha)
lines.append(vline)
patches.append(rect)
ax.add_line(vline)
ax.add_patch(rect)
ax.autoscale_view()
return lines, patches
def plot_day_summary2(ax, opens, closes, highs, lows, ticksize=4,
colorup='k', colordown='r',
):
"""
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
ax : an Axes instance to plot to
ticksize : size of open and close ticks in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
# the ticks will be from ticksize to 0 in points at the origin and
# we'll translate these to the i, close location
openSegments = [ ((-ticksize, 0), (0, 0)) ]
# the ticks will be from 0 to ticksize in points at the origin and
# we'll translate these to the i, close location
closeSegments = [ ((0, 0), (ticksize, 0)) ]
offsetsOpen = [ (i, open) for i, open in zip(xrange(len(opens)), opens) if open != -1 ]
offsetsClose = [ (i, close) for i, close in zip(xrange(len(closes)), closes) if close != -1 ]
scale = ax.figure.dpi * (1.0/72.0)
tickTransform = Affine2D().scale(scale, 0.0)
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,1
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,1
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(rangeSegments)==len(offsetsOpen))
assert(len(offsetsOpen)==len(offsetsClose))
assert(len(offsetsClose)==len(colors))
useAA = 0, # use tuple here
lw = 1, # and here
rangeCollection = LineCollection(rangeSegments,
colors = colors,
linewidths = lw,
antialiaseds = useAA,
)
openCollection = LineCollection(openSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsOpen,
transOffset = ax.transData,
)
openCollection.set_transform(tickTransform)
closeCollection = LineCollection(closeSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsClose,
transOffset = ax.transData,
)
closeCollection.set_transform(tickTransform)
minpy, maxx = (0, len(rangeSegments))
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(rangeCollection)
ax.add_collection(openCollection)
ax.add_collection(closeCollection)
return rangeCollection, openCollection, closeCollection
def candlestick2(ax, opens, closes, highs, lows, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""
Represent the open, close as a bar line and high low range as a
vertical line.
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
return value is lineCollection, barCollection
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
delta = width/2.
barVerts = [ ( (i-delta, open), (i-delta, close), (i+delta, close), (i+delta, open) ) for i, open, close in zip(xrange(len(opens)), opens, closes) if open != -1 and close!=-1 ]
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(barVerts)==len(rangeSegments))
useAA = 0, # use tuple here
lw = 0.5, # and here
rangeCollection = LineCollection(rangeSegments,
colors = ( (0,0,0,1), ),
linewidths = lw,
antialiaseds = useAA,
)
barCollection = PolyCollection(barVerts,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = useAA,
linewidths = lw,
)
minx, maxx = 0, len(rangeSegments)
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
ax.add_collection(rangeCollection)
return rangeCollection, barCollection
def volume_overlay(ax, opens, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The opens and closes
are used to determine the color of the bar. -1 is missing. If a
value is missing on one it must be missing on all
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
"""
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
delta = width/2.
bars = [ ( (i-delta, 0), (i-delta, v), (i+delta, v), (i+delta, 0)) for i, v in enumerate(volumes) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = (0,),
linewidths = (0.5,),
)
corners = (0, 0), (len(bars), max(volumes))
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
return barCollection
def volume_overlay2(ax, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The closes are used to
determine the color of the bar. -1 is missing. If a value is
missing on one it must be missing on all
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
nb: first point is not displayed - it is used only for choosing the
right color
"""
return volume_overlay(ax,closes[:-1],closes[1:],volumes[1:],colorup,colordown,width,alpha)
def volume_overlay3(ax, quotes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. quotes is a list of (d,
open, close, high, low, volume) and close-open is used to
determine the color of the bar
kwarg
width : the bar width in points
colorup : the color of the lines where close1 >= close0
colordown : the color of the lines where close1 < close0
alpha : bar transparency
"""
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
dates, opens, closes, highs, lows, volumes = zip(*quotes)
colors = [colord[close1>=close0] for close0, close1 in zip(closes[:-1], closes[1:]) if close0!=-1 and close1 !=-1]
colors.insert(0,colord[closes[0]>=opens[0]])
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, volume), (right, volume), (right, 0)) for d, open, close, high, low, volume in quotes]
sx = ax.figure.dpi * (1.0/72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx,sy)
dates = [d for d, open, close, high, low, volume in quotes]
offsetsBars = [(d, 0) for d in dates]
useAA = 0, # use tuple here
lw = 0.5, # and here
barCollection = PolyCollection(bars,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (min(dates), max(dates))
miny = 0
maxy = max([volume for d, open, close, high, low, volume in quotes])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
#print 'datalim', ax.dataLim.get_bounds()
#print 'viewlim', ax.viewLim.get_bounds()
ax.add_collection(barCollection)
ax.autoscale_view()
return barCollection
def index_bar(ax, vals,
facecolor='b', edgecolor='l',
width=4, alpha=1.0, ):
"""
Add a bar collection graph with height vals (-1 is missing).
ax : an Axes instance to plot to
width : the bar width in points
alpha : bar transparency
"""
facecolors = (colorConverter.to_rgba(facecolor, alpha),)
edgecolors = (colorConverter.to_rgba(edgecolor, alpha),)
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, v), (right, v), (right, 0)) for v in vals if v != -1 ]
sx = ax.figure.dpi * (1.0/72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx,sy)
offsetsBars = [ (i, 0) for i,v in enumerate(vals) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = facecolors,
edgecolors = edgecolors,
antialiaseds = (0,),
linewidths = (0.5,),
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (0, len(offsetsBars))
miny = 0
maxy = max([v for v in vals if v!=-1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
return barCollection
| gpl-3.0 |
aoifeboyle/tardis | docs/physics/plasma/plasma_plots/nebular_ionization_balance.py | 13 | 4485 | import os
from matplotlib import colors
from tardis import atomic, plasma_array, util
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
#Making 2 Figures for ionization balance and level populations
plt.figure(1).clf()
ax1 = plt.figure(1).add_subplot(111)
plt.figure(2).clf()
ax2 = plt.figure(2).add_subplot(111)
# expanding the tilde to the users directory
atom_fname = os.path.expanduser('~/.tardis/si_kurucz.h5')
# reading in the HDF5 File
atom_data = atomic.AtomData.from_hdf5(atom_fname)
#The atom_data needs to be prepared to create indices. The Class needs to know which atomic numbers are needed for the
#calculation and what line interaction is needed (for "downbranch" and "macroatom" the code creates special tables)
atom_data.prepare_atom_data([14], 'scatter')
#Initializing the NebularPlasma class using the from_abundance class method.
#This classmethod is normally only needed to test individual plasma classes
#Usually the plasma class just gets the number densities from the model class
nebular_plasma = plasma.NebularPlasma.from_abundance(10000, 0.5, {'Si': 1}, 1e-13, atom_data, 10.)
#Initializing a dataframe to store the ion populations and level populations for the different temperatures
ion_number_densities = pd.DataFrame(index=nebular_plasma.ion_populations.index)
level_populations = pd.DataFrame(index=nebular_plasma.level_populations.ix[14, 1].index)
t_rads = np.linspace(2000, 20000, 100)
#Calculating the different ion populations and level populuatios for the given temperatures
for t_rad in t_rads:
nebular_plasma.update_radiationfield(t_rad, w=1.0)
#getting total si number density
si_number_density = nebular_plasma.number_density.get_value(14)
#Normalizing the ion populations
ion_density = nebular_plasma.ion_populations / si_number_density
ion_number_densities[t_rad] = ion_density
#normalizing the level_populations for Si II
current_level_population = nebular_plasma.level_populations.ix[14, 1] / nebular_plasma.ion_populations.ix[14, 1]
#normalizing with statistical weight
current_level_population /= atom_data.levels.ix[14, 1].g
level_populations[t_rad] = current_level_population
ion_colors = ['b', 'g', 'r', 'k']
for ion_number in [0, 1, 2, 3]:
current_ion_density = ion_number_densities.ix[14, ion_number]
ax1.plot(current_ion_density.index, current_ion_density.values, '%s-' % ion_colors[ion_number],
label='Si %s W=1.0' % util.int_to_roman(ion_number + 1).upper())
#only plotting every 5th radiation temperature
t_rad_normalizer = colors.Normalize(vmin=2000, vmax=20000)
t_rad_color_map = plt.cm.ScalarMappable(norm=t_rad_normalizer, cmap=plt.cm.jet)
for t_rad in t_rads[::5]:
ax2.plot(level_populations[t_rad].index, level_populations[t_rad].values, color=t_rad_color_map.to_rgba(t_rad))
ax2.semilogy()
#Calculating the different ion populations for the given temperatures with W=0.5
ion_number_densities = pd.DataFrame(index=nebular_plasma.ion_populations.index)
for t_rad in t_rads:
nebular_plasma.update_radiationfield(t_rad, w=0.5)
#getting total si number density
si_number_density = nebular_plasma.number_density.get_value(14)
#Normalizing the ion populations
ion_density = nebular_plasma.ion_populations / si_number_density
ion_number_densities[t_rad] = ion_density
#normalizing the level_populations for Si II
current_level_population = nebular_plasma.level_populations.ix[14, 1] / nebular_plasma.ion_populations.ix[14, 1]
#normalizing with statistical weight
current_level_population /= atom_data.levels.ix[14, 1].g
level_populations[t_rad] = current_level_population
#Plotting the ion fractions
for ion_number in [0, 1, 2, 3]:
print "w=0.5"
current_ion_density = ion_number_densities.ix[14, ion_number]
ax1.plot(current_ion_density.index, current_ion_density.values, '%s--' % ion_colors[ion_number],
label='Si %s W=0.5' % util.int_to_roman(ion_number + 1).upper())
for t_rad in t_rads[::5]:
ax2.plot(level_populations[t_rad].index, level_populations[t_rad].values, color=t_rad_color_map.to_rgba(t_rad),
linestyle='--')
ax2.semilogy()
t_rad_color_map.set_array(t_rads)
cb = plt.figure(2).colorbar(t_rad_color_map)
ax1.set_xlabel('T [K]')
ax1.set_ylabel('Number Density Fraction')
ax1.legend()
ax2.set_xlabel('Level Number for Si II')
ax2.set_ylabel('Number Density Fraction')
cb.set_label('T [K]')
plt.show() | bsd-3-clause |
xyguo/scikit-learn | sklearn/model_selection/_validation.py | 5 | 36985 | """
The :mod:`sklearn.model_selection._validation` module includes classes and
functions to validate the model.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from ..base import is_classifier, clone
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.fixes import astype
from ..utils.validation import _is_arraylike, _num_samples
from ..externals.joblib import Parallel, delayed, logger
from ..metrics.scorer import check_scoring
from ..exceptions import FitFailedWarning
from ._split import KFold
from ._split import LabelKFold
from ._split import LeaveOneLabelOut
from ._split import LeaveOneOut
from ._split import LeavePLabelOut
from ._split import LeavePOut
from ._split import ShuffleSplit
from ._split import LabelShuffleSplit
from ._split import StratifiedKFold
from ._split import StratifiedShuffleSplit
from ._split import PredefinedSplit
from ._split import check_cv, _safe_split
__all__ = ['cross_val_score', 'cross_val_predict', 'permutation_test_score',
'learning_curve', 'validation_curve']
ALL_CVS = {'KFold': KFold,
'LabelKFold': LabelKFold,
'LeaveOneLabelOut': LeaveOneLabelOut,
'LeaveOneOut': LeaveOneOut,
'LeavePLabelOut': LeavePLabelOut,
'LeavePOut': LeavePOut,
'ShuffleSplit': ShuffleSplit,
'LabelShuffleSplit': LabelShuffleSplit,
'StratifiedKFold': StratifiedKFold,
'StratifiedShuffleSplit': StratifiedShuffleSplit,
'PredefinedSplit': PredefinedSplit}
LABEL_CVS = {'LabelKFold': LabelKFold,
'LeaveOneLabelOut': LeaveOneLabelOut,
'LeavePLabelOut': LeavePLabelOut,
'LabelShuffleSplit': LabelShuffleSplit}
def cross_val_score(estimator, X, y=None, labels=None, scoring=None, cv=None,
n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.cross_validation import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv.split(X, y, labels))
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def cross_val_predict(estimator, X, y=None, labels=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs',
method='predict'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
method : string, optional, default: 'predict'
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : ndarray
This is the result of calling ``method``
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.cross_validation import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Ensure the estimator has implemented the passed decision function
if not callable(getattr(estimator, method)):
raise AttributeError('{} not implemented in estimator'
.format(method))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
prediction_blocks = parallel(delayed(_fit_and_predict)(
clone(estimator), X, y, train, test, verbose, fit_params, method)
for train, test in cv.split(X, y, labels))
# Concatenate the predictions
predictions = [pred_block_i for pred_block_i, _ in prediction_blocks]
test_indices = np.concatenate([indices_i
for _, indices_i in prediction_blocks])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
# Check for sparse predictions
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
else:
predictions = np.concatenate(predictions)
return predictions[inv_test_indices]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params,
method):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
method : string
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : sequence
Result of calling 'estimator.method'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
predictions = func(X_test)
return predictions, test
def _check_is_permutation(indices, n_samples):
"""Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
integer array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(indices) != n_samples:
return False
hit = np.zeros(n_samples, bool)
hit[indices] = True
if not np.all(hit):
return False
return True
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def permutation_test_score(estimator, X, y, labels=None, cv=None,
n_permutations=100, n_jobs=1, random_state=0,
verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, labels, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state),
labels, cv, scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def _permutation_test_score(estimator, X, y, labels, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv.split(X, y, labels):
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
indices = random_state.permutation(len(y))
else:
indices = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
indices[this_mask] = random_state.permutation(indices[this_mask])
return y[indices]
def learning_curve(estimator, X, y, labels=None,
train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None,
exploit_incremental_learning=False, n_jobs=1,
pre_dispatch="all", verbose=0):
"""Learning curve.
Determines cross-validated training and test scores for different training
set sizes.
A cross-validation generator splits the whole dataset k times in training
and test data. Subsets of the training set with varying sizes will be used
to train the estimator and a score for each training subset size and the
test set will be computed. Afterwards, the scores will be averaged over
all k runs for each training subset size.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : boolean, optional, default: False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_sizes_abs : array, shape = (n_unique_ticks,), dtype int
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See :ref:`examples/model_selection/plot_learning_curve.py
<example_model_selection_plot_learning_curve.py>`
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
cv_iter = cv.split(X, y, labels)
# Make a list since we will be iterating multiple times over the folds
cv_iter = list(cv_iter)
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose) for train, test in cv.split(X, y, labels))
else:
out = parallel(delayed(_fit_and_score)(
clone(estimator), X, y, scorer, train[:n_train_samples], test,
verbose, parameters=None, fit_params=None, return_train_score=True)
for train, test in cv_iter
for n_train_samples in train_sizes_abs)
out = np.array(out)[:, :2]
n_cv_folds = out.shape[0] // n_unique_ticks
out = out.reshape(n_cv_folds, n_unique_ticks, 2)
out = np.asarray(out).transpose((2, 1, 0))
return train_sizes_abs, out[0], out[1]
def _translate_train_sizes(train_sizes, n_max_training_samples):
"""Determine absolute sizes of training subsets and validate 'train_sizes'.
Examples:
_translate_train_sizes([0.5, 1.0], 10) -> [5, 10]
_translate_train_sizes([5, 10], 10) -> [5, 10]
Parameters
----------
train_sizes : array-like, shape (n_ticks,), dtype float or int
Numbers of training examples that will be used to generate the
learning curve. If the dtype is float, it is regarded as a
fraction of 'n_max_training_samples', i.e. it has to be within (0, 1].
n_max_training_samples : int
Maximum number of training samples (upper bound of 'train_sizes').
Returns
-------
train_sizes_abs : array, shape (n_unique_ticks,), dtype int
Numbers of training examples that will be used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
"""
train_sizes_abs = np.asarray(train_sizes)
n_ticks = train_sizes_abs.shape[0]
n_min_required_samples = np.min(train_sizes_abs)
n_max_required_samples = np.max(train_sizes_abs)
if np.issubdtype(train_sizes_abs.dtype, np.float):
if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0:
raise ValueError("train_sizes has been interpreted as fractions "
"of the maximum number of training samples and "
"must be within (0, 1], but is within [%f, %f]."
% (n_min_required_samples,
n_max_required_samples))
train_sizes_abs = astype(train_sizes_abs * n_max_training_samples,
dtype=np.int, copy=False)
train_sizes_abs = np.clip(train_sizes_abs, 1,
n_max_training_samples)
else:
if (n_min_required_samples <= 0 or
n_max_required_samples > n_max_training_samples):
raise ValueError("train_sizes has been interpreted as absolute "
"numbers of training samples and must be within "
"(0, %d], but is within [%d, %d]."
% (n_max_training_samples,
n_min_required_samples,
n_max_required_samples))
train_sizes_abs = np.unique(train_sizes_abs)
if n_ticks > train_sizes_abs.shape[0]:
warnings.warn("Removed duplicate entries from 'train_sizes'. Number "
"of ticks will be less than than the size of "
"'train_sizes' %d instead of %d)."
% (train_sizes_abs.shape[0], n_ticks), RuntimeWarning)
return train_sizes_abs
def _incremental_fit_estimator(estimator, X, y, classes, train, test,
train_sizes, scorer, verbose):
"""Train estimator on training subsets incrementally and compute scores."""
train_scores, test_scores = [], []
partitions = zip(train_sizes, np.split(train, train_sizes)[:-1])
for n_train_samples, partial_train in partitions:
train_subset = train[:n_train_samples]
X_train, y_train = _safe_split(estimator, X, y, train_subset)
X_partial_train, y_partial_train = _safe_split(estimator, X, y,
partial_train)
X_test, y_test = _safe_split(estimator, X, y, test, train_subset)
if y_partial_train is None:
estimator.partial_fit(X_partial_train, classes=classes)
else:
estimator.partial_fit(X_partial_train, y_partial_train,
classes=classes)
train_scores.append(_score(estimator, X_train, y_train, scorer))
test_scores.append(_score(estimator, X_test, y_test, scorer))
return np.array((train_scores, test_scores)).T
def validation_curve(estimator, X, y, param_name, param_range, labels=None,
cv=None, scoring=None, n_jobs=1, pre_dispatch="all",
verbose=0):
"""Validation curve.
Determine training and test scores for varying parameter values.
Compute scores for an estimator with different values of a specified
parameter. This is similar to grid search with one parameter. However, this
will also compute training scores and is merely a utility for plotting the
results.
Read more in the :ref:`User Guide <learning_curve>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : string
Name of the parameter that will be varied.
param_range : array-like, shape (n_values,)
The values of the parameter that will be evaluated.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
verbose : integer, optional
Controls the verbosity: the higher, the more messages.
Returns
-------
train_scores : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array, shape (n_ticks, n_cv_folds)
Scores on test set.
Notes
-----
See
:ref:`examples/model_selection/plot_validation_curve.py
<example_model_selection_plot_validation_curve.py>`
"""
X, y, labels = indexable(X, y, labels)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
out = parallel(delayed(_fit_and_score)(
estimator, X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=None, return_train_score=True)
for train, test in cv.split(X, y, labels) for v in param_range)
out = np.asarray(out)[:, :2]
n_params = len(param_range)
n_cv_folds = out.shape[0] // n_params
out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0))
return out[0], out[1]
| bsd-3-clause |
kayak/fireant | fireant/database/base.py | 1 | 13098 | from datetime import datetime
from functools import partial
from typing import Collection, Dict, Union
from typing import Iterable, List, Sequence, Type
import pandas as pd
from pypika import (
Query,
enums,
terms,
)
from pypika import Table, functions as fn
from pypika.queries import QueryBuilder
from pypika.terms import Function
from fireant.dataset.fields import Field
from fireant.dataset.filters import Filter
from fireant.dataset.joins import Join
from fireant.exceptions import QueryCancelled
from fireant.middleware.decorators import apply_middlewares, connection_middleware
from fireant.queries.finders import (
find_totals_dimensions,
find_and_group_references_for_dimensions,
find_required_tables_to_join,
find_joins_for_tables,
)
from fireant.queries.references import make_reference_dimensions, make_reference_metrics, make_reference_filters
from fireant.queries.special_cases import adjust_daterange_filter_for_rolling_window
from fireant.queries.totals_helper import adapt_for_totals_query
from fireant.utils import (
alias_selector,
flatten,
)
from fireant.dataset.intervals import DatetimeInterval
class Database(object):
"""
This is a abstract base class used for interfacing with a database platform.
"""
# The pypika query class to use for constructing queries
query_cls = Query
slow_query_log_min_seconds = 15
def __init__(
self,
host=None,
port=None,
database=None,
max_result_set_size=200000,
middlewares=[],
):
self.host = host
self.port = port
self.database = database
self.max_result_set_size = max_result_set_size
self.middlewares = middlewares + [connection_middleware]
def connect(self):
"""
This function must establish a connection to the database platform and return it.
"""
raise NotImplementedError
def cancel(self, connection):
"""
Cancel any running query.
"""
if hasattr(connection, "cancel"):
connection.cancel()
else:
# A default cancel for databases for which no specific cancel is implemented
# This will force an exit of the connection context manager
raise QueryCancelled("Query was cancelled")
def get_column_definitions(self, schema, table, connection=None):
"""
Return a list of column name, column data type pairs.
:param schema: The name of the table schema.
:param table: The name of the table to get columns from.
:param connection: (Optional) The connection to execute this query with.
:return: A list of columns.
"""
raise NotImplementedError
def trunc_date(self, field, interval):
"""
This function must create a Pypika function which truncates a Date or DateTime object to a specific interval.
"""
raise NotImplementedError
def date_add(self, field: terms.Term, date_part: str, interval: int):
"""
This function must add/subtract a Date or Date/Time object.
"""
raise NotImplementedError
def convert_date(self, dt: datetime) -> Union[datetime, Function]:
"""
Override to provide a custom function for converting a date.
Defaults to an identity function.
:param dt: Date to convert
"""
return dt
def to_char(self, definition):
return fn.Cast(definition, enums.SqlTypes.VARCHAR)
@apply_middlewares
def fetch_queries(self, *queries, connection=None, parameters: Union[Dict, Collection] = ()):
results = []
# Parameters can either be passed as a list when using formatting placeholders like %s (varies per platform)
# or a dict when using named placeholders.
for query in queries:
cursor = connection.cursor()
cursor.execute(str(query), parameters)
results.append(cursor.fetchall())
return results
def fetch(self, query, **kwargs):
return self.fetch_queries(query, **kwargs)[0]
@apply_middlewares
def execute(self, *queries, **kwargs):
connection = kwargs.get("connection")
for query in queries:
cursor = connection.cursor()
cursor.execute(str(query))
connection.commit()
@apply_middlewares
def fetch_dataframes(self, *queries, parse_dates=None, **kwargs):
connection = kwargs.get("connection")
dataframes = []
for query in queries:
dataframes.append(pd.read_sql(query, connection, coerce_float=True, parse_dates=parse_dates))
return dataframes
def fetch_dataframe(self, query, **kwargs):
return self.fetch_dataframes(query, **kwargs)[0]
def __str__(self):
return f'Database|{self.__class__.__name__}|{self.host}'
def make_slicer_query_with_totals_and_references(
self,
table,
joins,
dimensions,
metrics,
operations,
filters,
references,
orders,
share_dimensions=(),
) -> List[Type[QueryBuilder]]:
"""
The following two loops will run over the spread of the two sets including a NULL value in each set:
- reference group (WoW, MoM, etc.)
- dimension with roll up/totals enabled (totals dimension)
This will result in at least one query where the reference group and totals dimension is NULL, which shall be
called base query. The base query will ALWAYS be present, even if there are zero reference groups or totals
dimensions.
For a concrete example, check the test case in :
```
fireant.tests.queries.test_build_dimensions.QueryBuilderDimensionTotalsTests
#test_build_query_with_totals_cat_dimension_with_references
```
"""
filters = adjust_daterange_filter_for_rolling_window(dimensions, operations, filters)
totals_dimensions = find_totals_dimensions(
dimensions,
share_dimensions,
)
totals_dimensions_and_none = [None] + totals_dimensions[::-1]
reference_groups = find_and_group_references_for_dimensions(dimensions, references)
reference_groups_and_none = [(None, None)] + list(reference_groups.items())
queries = []
for totals_dimension in totals_dimensions_and_none:
(dimensions_with_totals, filters_with_totals) = adapt_for_totals_query(
totals_dimension,
dimensions,
filters,
)
for reference_parts, references in reference_groups_and_none:
(dimensions_with_ref, metrics_with_ref, filters_with_ref,) = self.adapt_for_reference_query(
reference_parts,
dimensions_with_totals,
metrics,
filters_with_totals,
references,
)
query = self.make_slicer_query(
table,
joins,
dimensions_with_ref,
metrics_with_ref,
filters_with_ref,
orders,
)
# Add these to the query instance so when the data frames are joined together, the correct references and
# totals can be applied when combining the separate result set from each query.
query._totals = totals_dimension
query._references = references
queries.append(query)
return queries
def adapt_for_reference_query(self, reference_parts, dimensions, metrics, filters, references):
if reference_parts is None:
return dimensions, metrics, filters
ref_dim, unit, interval = reference_parts
offset_func = partial(self.date_add, date_part=unit, interval=interval)
offset_func_inv = partial(self.date_add, date_part=unit, interval=-interval)
ref_dimensions = make_reference_dimensions(
dimensions, ref_dim, offset_func, self.transform_field_to_query, self.trunc_date
)
ref_metrics = make_reference_metrics(metrics, references[0].reference_type.alias)
ref_filters = make_reference_filters(filters, ref_dim, offset_func_inv)
return ref_dimensions, ref_metrics, ref_filters
def make_slicer_query(
self,
base_table: Table,
joins: Sequence[Join] = (),
dimensions: Sequence[Field] = (),
metrics: Sequence[Field] = (),
filters: Sequence[Filter] = (),
orders: Sequence = (),
) -> Type[QueryBuilder]:
"""
Creates a pypika/SQL query from a list of slicer elements.
This is the base implementation shared by two implementations: the query to fetch data for a slicer request and
the query to fetch choices for dimensions.
This function only handles dimensions (select+group by) and filtering (where/having), which is everything needed
for the query to fetch choices for dimensions.
The slicer query extends this with metrics, references, and totals.
:param base_table:
pypika.Table - The base table of the query, the one in the FROM clause
:param joins:
A collection of joins available in the slicer. This should include all slicer joins. Only joins required for
the query will be used.
:param dimensions:
A collection of dimensions to use in the query.
:param metrics:
A collection of metrics to use in the query.
:param filters:
A collection of filters to apply to the query.
:param orders:
A collection of orders as tuples of the metric/dimension to order by and the direction to order in.
:return:
"""
query = self.query_cls.from_(base_table, immutable=False)
elements = flatten([metrics, dimensions, filters])
# Add joins
join_tables_needed_for_query = find_required_tables_to_join(elements, base_table)
for join in find_joins_for_tables(joins, base_table, join_tables_needed_for_query):
query = query.join(join.table, how=join.join_type).on(join.criterion)
# Add dimensions
for dimension in dimensions:
dimension_term = self.transform_field_to_query(dimension, self.trunc_date)
query = query.select(dimension_term)
if dimension.groupable:
query = query.groupby(dimension_term)
# Add filters
for fltr in filters:
query = query.having(fltr.definition) if fltr.is_aggregate else query.where(fltr.definition)
# Add metrics
metric_terms = [self.transform_field_to_query(metric) for metric in metrics]
if metric_terms:
query = query.select(*metric_terms)
# In the case that the orders are determined by a field that is not selected as a metric or dimension, then it needs
# to be added to the query.
select_aliases = {el.alias for el in query._selects}
for (orderby_field, orientation) in orders:
orderby_term = self.transform_field_to_query(orderby_field)
query = query.orderby(orderby_term, order=orientation)
if orderby_term.alias not in select_aliases:
query = query.select(orderby_term)
return query
def make_latest_query(
self,
base_table: Table,
joins: Iterable[Join] = (),
dimensions: Iterable[Field] = (),
):
query = self.query_cls.from_(base_table, immutable=False)
# Add joins
join_tables_needed_for_query = find_required_tables_to_join(dimensions, base_table)
for join in find_joins_for_tables(joins, base_table, join_tables_needed_for_query):
query = query.join(join.table, how=join.join_type).on(join.criterion)
for dimension in dimensions:
f_dimension_key = alias_selector(dimension.alias)
query = query.select(fn.Max(dimension.definition).as_(f_dimension_key))
return query
def transform_field_to_query(self, field, window=None):
"""
Makes a list of pypika terms for a given dataset field.
:param field:
A field from a dataset.
:param window:
A window function to apply to the dimension definition if it is a continuous dimension.
:return:
a list of terms required to select and group by in a SQL query given a dataset dimension. This list will contain
either one or two elements. A second element will be included if the dimension has a definition for its display
field.
"""
f_alias = alias_selector(field.alias)
if window and isinstance(field, DatetimeInterval):
return window(field.definition, field.interval_key).as_(f_alias)
return field.definition.as_(f_alias)
| apache-2.0 |
bikong2/scikit-learn | sklearn/utils/tests/test_class_weight.py | 90 | 12846 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
Kramer477/MontePetro | setup.py | 2 | 1931 | from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='montepetro',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.3.6',
description='Probabilistic Reserve Estimates in Python',
long_description=long_description,
# The project's main homepage.
url='https://github.com/LukasMosser/MontePetro',
download_url = 'https://github.com/LukasMosser/MontePetro/tarball/0.3.6',
# Author details
author='Lukas Mosser',
author_email='[email protected]',
# Choose your license
license='GPL',
# What does your project relate to?
keywords=['montecarlo', 'probabilistic', 'oil', 'gas'],
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy', 'scipy', 'matplotlib'],
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require = {
'dev': ['check-manifest'],
'test': ['coverage', 'nose'],
}
) | gpl-2.0 |
ArcherSys/ArcherSys | Lib/site-packages/IPython/core/magics/basic.py | 5 | 21633 | """Implementation of basic magic functions."""
from __future__ import print_function
import io
import sys
from pprint import pformat
from IPython.core import magic_arguments, page
from IPython.core.error import UsageError
from IPython.core.magic import Magics, magics_class, line_magic, magic_escapes
from IPython.utils.text import format_screen, dedent, indent
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils.ipstruct import Struct
from IPython.utils.path import unquote_filename
from IPython.utils.py3compat import unicode_type
from IPython.utils.warn import warn, error
class MagicsDisplay(object):
def __init__(self, magics_manager):
self.magics_manager = magics_manager
def _lsmagic(self):
"""The main implementation of the %lsmagic"""
mesc = magic_escapes['line']
cesc = magic_escapes['cell']
mman = self.magics_manager
magics = mman.lsmagic()
out = ['Available line magics:',
mesc + (' '+mesc).join(sorted(magics['line'])),
'',
'Available cell magics:',
cesc + (' '+cesc).join(sorted(magics['cell'])),
'',
mman.auto_status()]
return '\n'.join(out)
def _repr_pretty_(self, p, cycle):
p.text(self._lsmagic())
def __str__(self):
return self._lsmagic()
def _jsonable(self):
"""turn magics dict into jsonable dict of the same structure
replaces object instances with their class names as strings
"""
magic_dict = {}
mman = self.magics_manager
magics = mman.lsmagic()
for key, subdict in magics.items():
d = {}
magic_dict[key] = d
for name, obj in subdict.items():
try:
classname = obj.__self__.__class__.__name__
except AttributeError:
classname = 'Other'
d[name] = classname
return magic_dict
def _repr_json_(self):
return self._jsonable()
@magics_class
class BasicMagics(Magics):
"""Magics that provide central IPython functionality.
These are various magics that don't fit into specific categories but that
are all part of the base 'IPython experience'."""
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-l', '--line', action='store_true',
help="""Create a line magic alias."""
)
@magic_arguments.argument(
'-c', '--cell', action='store_true',
help="""Create a cell magic alias."""
)
@magic_arguments.argument(
'name',
help="""Name of the magic to be created."""
)
@magic_arguments.argument(
'target',
help="""Name of the existing line or cell magic."""
)
@line_magic
def alias_magic(self, line=''):
"""Create an alias for an existing line or cell magic.
Examples
--------
::
In [1]: %alias_magic t timeit
Created `%t` as an alias for `%timeit`.
Created `%%t` as an alias for `%%timeit`.
In [2]: %t -n1 pass
1 loops, best of 3: 954 ns per loop
In [3]: %%t -n1
...: pass
...:
1 loops, best of 3: 954 ns per loop
In [4]: %alias_magic --cell whereami pwd
UsageError: Cell magic function `%%pwd` not found.
In [5]: %alias_magic --line whereami pwd
Created `%whereami` as an alias for `%pwd`.
In [6]: %whereami
Out[6]: u'/home/testuser'
"""
args = magic_arguments.parse_argstring(self.alias_magic, line)
shell = self.shell
mman = self.shell.magics_manager
escs = ''.join(magic_escapes.values())
target = args.target.lstrip(escs)
name = args.name.lstrip(escs)
# Find the requested magics.
m_line = shell.find_magic(target, 'line')
m_cell = shell.find_magic(target, 'cell')
if args.line and m_line is None:
raise UsageError('Line magic function `%s%s` not found.' %
(magic_escapes['line'], target))
if args.cell and m_cell is None:
raise UsageError('Cell magic function `%s%s` not found.' %
(magic_escapes['cell'], target))
# If --line and --cell are not specified, default to the ones
# that are available.
if not args.line and not args.cell:
if not m_line and not m_cell:
raise UsageError(
'No line or cell magic with name `%s` found.' % target
)
args.line = bool(m_line)
args.cell = bool(m_cell)
if args.line:
mman.register_alias(name, target, 'line')
print('Created `%s%s` as an alias for `%s%s`.' % (
magic_escapes['line'], name,
magic_escapes['line'], target))
if args.cell:
mman.register_alias(name, target, 'cell')
print('Created `%s%s` as an alias for `%s%s`.' % (
magic_escapes['cell'], name,
magic_escapes['cell'], target))
@line_magic
def lsmagic(self, parameter_s=''):
"""List currently available magic functions."""
return MagicsDisplay(self.shell.magics_manager)
def _magic_docs(self, brief=False, rest=False):
"""Return docstrings from magic functions."""
mman = self.shell.magics_manager
docs = mman.lsmagic_docs(brief, missing='No documentation')
if rest:
format_string = '**%s%s**::\n\n%s\n\n'
else:
format_string = '%s%s:\n%s\n'
return ''.join(
[format_string % (magic_escapes['line'], fname,
indent(dedent(fndoc)))
for fname, fndoc in sorted(docs['line'].items())]
+
[format_string % (magic_escapes['cell'], fname,
indent(dedent(fndoc)))
for fname, fndoc in sorted(docs['cell'].items())]
)
@line_magic
def magic(self, parameter_s=''):
"""Print information about the magic function system.
Supported formats: -latex, -brief, -rest
"""
mode = ''
try:
mode = parameter_s.split()[0][1:]
except IndexError:
pass
brief = (mode == 'brief')
rest = (mode == 'rest')
magic_docs = self._magic_docs(brief, rest)
if mode == 'latex':
print(self.format_latex(magic_docs))
return
else:
magic_docs = format_screen(magic_docs)
out = ["""
IPython's 'magic' functions
===========================
The magic function system provides a series of functions which allow you to
control the behavior of IPython itself, plus a lot of system-type
features. There are two kinds of magics, line-oriented and cell-oriented.
Line magics are prefixed with the % character and work much like OS
command-line calls: they get as an argument the rest of the line, where
arguments are passed without parentheses or quotes. For example, this will
time the given statement::
%timeit range(1000)
Cell magics are prefixed with a double %%, and they are functions that get as
an argument not only the rest of the line, but also the lines below it in a
separate argument. These magics are called with two arguments: the rest of the
call line and the body of the cell, consisting of the lines below the first.
For example::
%%timeit x = numpy.random.randn((100, 100))
numpy.linalg.svd(x)
will time the execution of the numpy svd routine, running the assignment of x
as part of the setup phase, which is not timed.
In a line-oriented client (the terminal or Qt console IPython), starting a new
input with %% will automatically enter cell mode, and IPython will continue
reading input until a blank line is given. In the notebook, simply type the
whole cell as one entity, but keep in mind that the %% escape can only be at
the very start of the cell.
NOTE: If you have 'automagic' enabled (via the command line option or with the
%automagic function), you don't need to type in the % explicitly for line
magics; cell magics always require an explicit '%%' escape. By default,
IPython ships with automagic on, so you should only rarely need the % escape.
Example: typing '%cd mydir' (without the quotes) changes your working directory
to 'mydir', if it exists.
For a list of the available magic functions, use %lsmagic. For a description
of any of them, type %magic_name?, e.g. '%cd?'.
Currently the magic system has the following functions:""",
magic_docs,
"Summary of magic functions (from %slsmagic):" % magic_escapes['line'],
str(self.lsmagic()),
]
page.page('\n'.join(out))
@line_magic
def page(self, parameter_s=''):
"""Pretty print the object and display it through a pager.
%page [options] OBJECT
If no object is given, use _ (last output).
Options:
-r: page str(object), don't pretty-print it."""
# After a function contributed by Olivier Aubert, slightly modified.
# Process options/args
opts, args = self.parse_options(parameter_s, 'r')
raw = 'r' in opts
oname = args and args or '_'
info = self.shell._ofind(oname)
if info['found']:
txt = (raw and str or pformat)( info['obj'] )
page.page(txt)
else:
print('Object `%s` not found' % oname)
@line_magic
def profile(self, parameter_s=''):
"""Print your currently active IPython profile.
See Also
--------
prun : run code using the Python profiler
(:meth:`~IPython.core.magics.execution.ExecutionMagics.prun`)
"""
warn("%profile is now deprecated. Please use get_ipython().profile instead.")
from IPython.core.application import BaseIPythonApplication
if BaseIPythonApplication.initialized():
print(BaseIPythonApplication.instance().profile)
else:
error("profile is an application-level value, but you don't appear to be in an IPython application")
@line_magic
def pprint(self, parameter_s=''):
"""Toggle pretty printing on/off."""
ptformatter = self.shell.display_formatter.formatters['text/plain']
ptformatter.pprint = bool(1 - ptformatter.pprint)
print('Pretty printing has been turned',
['OFF','ON'][ptformatter.pprint])
@line_magic
def colors(self, parameter_s=''):
"""Switch color scheme for prompts, info system and exception handlers.
Currently implemented schemes: NoColor, Linux, LightBG.
Color scheme names are not case-sensitive.
Examples
--------
To get a plain black and white terminal::
%colors nocolor
"""
def color_switch_err(name):
warn('Error changing %s color schemes.\n%s' %
(name, sys.exc_info()[1]))
new_scheme = parameter_s.strip()
if not new_scheme:
raise UsageError(
"%colors: you must specify a color scheme. See '%colors?'")
# local shortcut
shell = self.shell
if not shell.colors_force:
if sys.platform in {'win32', 'cli'}:
import IPython.utils.rlineimpl as readline
if not readline.have_readline:
msg = """\
Proper color support under MS Windows requires the pyreadline library.
You can find it at:
http://ipython.org/pyreadline.html
Defaulting color scheme to 'NoColor'"""
new_scheme = 'NoColor'
warn(msg)
elif not shell.has_readline:
# Coloured prompts get messed up without readline
# Will remove this check after switching to prompt_toolkit
new_scheme = 'NoColor'
# Set prompt colors
try:
shell.prompt_manager.color_scheme = new_scheme
except:
color_switch_err('prompt')
else:
shell.colors = \
shell.prompt_manager.color_scheme_table.active_scheme_name
# Set exception colors
try:
shell.InteractiveTB.set_colors(scheme = new_scheme)
shell.SyntaxTB.set_colors(scheme = new_scheme)
except:
color_switch_err('exception')
# Set info (for 'object?') colors
if shell.color_info:
try:
shell.inspector.set_active_scheme(new_scheme)
except:
color_switch_err('object inspector')
else:
shell.inspector.set_active_scheme('NoColor')
@line_magic
def xmode(self, parameter_s=''):
"""Switch modes for the exception handlers.
Valid modes: Plain, Context and Verbose.
If called without arguments, acts as a toggle."""
def xmode_switch_err(name):
warn('Error changing %s exception modes.\n%s' %
(name,sys.exc_info()[1]))
shell = self.shell
new_mode = parameter_s.strip().capitalize()
try:
shell.InteractiveTB.set_mode(mode=new_mode)
print('Exception reporting mode:',shell.InteractiveTB.mode)
except:
xmode_switch_err('user')
@line_magic
def quickref(self,arg):
""" Show a quick reference sheet """
from IPython.core.usage import quick_reference
qr = quick_reference + self._magic_docs(brief=True)
page.page(qr)
@line_magic
def doctest_mode(self, parameter_s=''):
"""Toggle doctest mode on and off.
This mode is intended to make IPython behave as much as possible like a
plain Python shell, from the perspective of how its prompts, exceptions
and output look. This makes it easy to copy and paste parts of a
session into doctests. It does so by:
- Changing the prompts to the classic ``>>>`` ones.
- Changing the exception reporting mode to 'Plain'.
- Disabling pretty-printing of output.
Note that IPython also supports the pasting of code snippets that have
leading '>>>' and '...' prompts in them. This means that you can paste
doctests from files or docstrings (even if they have leading
whitespace), and the code will execute correctly. You can then use
'%history -t' to see the translated history; this will give you the
input after removal of all the leading prompts and whitespace, which
can be pasted back into an editor.
With these features, you can switch into this mode easily whenever you
need to do testing and changes to doctests, without having to leave
your existing IPython session.
"""
# Shorthands
shell = self.shell
pm = shell.prompt_manager
meta = shell.meta
disp_formatter = self.shell.display_formatter
ptformatter = disp_formatter.formatters['text/plain']
# dstore is a data store kept in the instance metadata bag to track any
# changes we make, so we can undo them later.
dstore = meta.setdefault('doctest_mode',Struct())
save_dstore = dstore.setdefault
# save a few values we'll need to recover later
mode = save_dstore('mode',False)
save_dstore('rc_pprint',ptformatter.pprint)
save_dstore('xmode',shell.InteractiveTB.mode)
save_dstore('rc_separate_out',shell.separate_out)
save_dstore('rc_separate_out2',shell.separate_out2)
save_dstore('rc_prompts_pad_left',pm.justify)
save_dstore('rc_separate_in',shell.separate_in)
save_dstore('rc_active_types',disp_formatter.active_types)
save_dstore('prompt_templates',(pm.in_template, pm.in2_template, pm.out_template))
if not mode:
# turn on
pm.in_template = '>>> '
pm.in2_template = '... '
pm.out_template = ''
# Prompt separators like plain python
shell.separate_in = ''
shell.separate_out = ''
shell.separate_out2 = ''
pm.justify = False
ptformatter.pprint = False
disp_formatter.active_types = ['text/plain']
shell.magic('xmode Plain')
else:
# turn off
pm.in_template, pm.in2_template, pm.out_template = dstore.prompt_templates
shell.separate_in = dstore.rc_separate_in
shell.separate_out = dstore.rc_separate_out
shell.separate_out2 = dstore.rc_separate_out2
pm.justify = dstore.rc_prompts_pad_left
ptformatter.pprint = dstore.rc_pprint
disp_formatter.active_types = dstore.rc_active_types
shell.magic('xmode ' + dstore.xmode)
# Store new mode and inform
dstore.mode = bool(1-int(mode))
mode_label = ['OFF','ON'][dstore.mode]
print('Doctest mode is:', mode_label)
@line_magic
def gui(self, parameter_s=''):
"""Enable or disable IPython GUI event loop integration.
%gui [GUINAME]
This magic replaces IPython's threaded shells that were activated
using the (pylab/wthread/etc.) command line flags. GUI toolkits
can now be enabled at runtime and keyboard
interrupts should work without any problems. The following toolkits
are supported: wxPython, PyQt4, PyGTK, Tk and Cocoa (OSX)::
%gui wx # enable wxPython event loop integration
%gui qt4|qt # enable PyQt4 event loop integration
%gui qt5 # enable PyQt5 event loop integration
%gui gtk # enable PyGTK event loop integration
%gui gtk3 # enable Gtk3 event loop integration
%gui tk # enable Tk event loop integration
%gui osx # enable Cocoa event loop integration
# (requires %matplotlib 1.1)
%gui # disable all event loop integration
WARNING: after any of these has been called you can simply create
an application object, but DO NOT start the event loop yourself, as
we have already handled that.
"""
opts, arg = self.parse_options(parameter_s, '')
if arg=='': arg = None
try:
return self.shell.enable_gui(arg)
except Exception as e:
# print simple error message, rather than traceback if we can't
# hook up the GUI
error(str(e))
@skip_doctest
@line_magic
def precision(self, s=''):
"""Set floating point precision for pretty printing.
Can set either integer precision or a format string.
If numpy has been imported and precision is an int,
numpy display precision will also be set, via ``numpy.set_printoptions``.
If no argument is given, defaults will be restored.
Examples
--------
::
In [1]: from math import pi
In [2]: %precision 3
Out[2]: u'%.3f'
In [3]: pi
Out[3]: 3.142
In [4]: %precision %i
Out[4]: u'%i'
In [5]: pi
Out[5]: 3
In [6]: %precision %e
Out[6]: u'%e'
In [7]: pi**10
Out[7]: 9.364805e+04
In [8]: %precision
Out[8]: u'%r'
In [9]: pi**10
Out[9]: 93648.047476082982
"""
ptformatter = self.shell.display_formatter.formatters['text/plain']
ptformatter.float_precision = s
return ptformatter.float_format
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-e', '--export', action='store_true', default=False,
help='Export IPython history as a notebook. The filename argument '
'is used to specify the notebook name and format. For example '
'a filename of notebook.ipynb will result in a notebook name '
'of "notebook" and a format of "json". Likewise using a ".py" '
'file extension will write the notebook as a Python script'
)
@magic_arguments.argument(
'filename', type=unicode_type,
help='Notebook name or filename'
)
@line_magic
def notebook(self, s):
"""Export and convert IPython notebooks.
This function can export the current IPython history to a notebook file.
For example, to export the history to "foo.ipynb" do "%notebook -e foo.ipynb".
To export the history to "foo.py" do "%notebook -e foo.py".
"""
args = magic_arguments.parse_argstring(self.notebook, s)
from nbformat import write, v4
args.filename = unquote_filename(args.filename)
if args.export:
cells = []
hist = list(self.shell.history_manager.get_range())
if(len(hist)<=1):
raise ValueError('History is empty, cannot export')
for session, execution_count, source in hist[:-1]:
cells.append(v4.new_code_cell(
execution_count=execution_count,
source=source
))
nb = v4.new_notebook(cells=cells)
with io.open(args.filename, 'w', encoding='utf-8') as f:
write(nb, f, version=4)
| mit |
florianhauer/ompl | demos/VFRRT/plotConservative.py | 4 | 2679 | #!/usr/bin/env python
######################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2015, Caleb Voss and Wilson Beebe
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Rice University nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
######################################################################
# Authors: Caleb Voss, Wilson Beebe
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pylab import *
from matplotlib import cm
def potential(x, y):
return 1 + np.sin(x) * np.sin(y)
def potentialSurface():
X = np.arange(-8, 8, 0.25)
Y = np.arange(-8, 8, 0.25)
X, Y = np.meshgrid(X, Y)
Z = potential(X, Y)
return X, Y, Z
fig = plt.figure()
ax = fig.gca(projection='3d', aspect='equal')
X, Y, Z = potentialSurface()
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0)
x = np.loadtxt("vfrrt-conservative.path")
ax.plot(x[:,0], x[:,1], potential(x[:,0], x[:,1]), color='b')
x = np.loadtxt("trrt-conservative.path")
ax.plot(x[:,0], x[:,1], potential(x[:,0], x[:,1]), color='r')
x = np.loadtxt("rrtstar-conservative.path")
ax.plot(x[:,0], x[:,1], potential(x[:,0], x[:,1]), color='g')
plt.show()
| bsd-3-clause |
hsinhuang/AliDMCompetition | lr/pred.py | 1 | 3962 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import sys
import os
current_dir = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.abspath(os.path.join(current_dir, '..', 'data'))
sys.path.append(data_path)
import prep
class LR:
"""
Parameters
----------
model: An object which has interfaces `fit' and `predict'
alpha: float, optional(default=1.0)
Penalty factor with respect to time
degree: int, optional(default=2)
Penalty degree on time interval
"""
def __init__(self, model, alpha=1.0, degree=2):
self.__poly_kernel__ = time_poly(alpha=alpha, n=degree)
self.__model__ = model
def fit(self, X):
self.__data__ = X
def predict(self, time_now):
X, y = extract_feature(self.__data__, self.__poly_kernel__, get_train_instances, time_now)
self.__model__.fit(X, y)
pred_X, ub = extract_feature(self.__data__, self.__poly_kernel__, get_pred_instance, time_now)
y = self.__model__.predict(pred_X)
predictions = ub[y == 1]
return predictions, np.ones((len(predictions,)))
def get_model():
from sklearn.svm import LinearSVC
return LR(model=LinearSVC(C=10, loss='l1'), alpha=0.7, degree=1)
def sort_by(data, order=['user_id', 'brand_id', 'visit_datetime']):
actype = np.dtype({
'names': ['user_id', 'brand_id', 'type', 'visit_datetime'],
'formats': [np.long, np.long, np.int, np.int]
})
typed_data = np.zeros(len(data), dtype=actype)
for i in range(len(data)):
typed_data[i] = tuple(data[i])
typed_data.sort(order=order)
for i in range(len(typed_data)):
td = typed_data[i]
data[i][0] = td['user_id']
data[i][1] = td['brand_id']
data[i][2] = td['type']
data[i][3] = td['visit_datetime']
def time_poly(alpha=1.0, n=2):
def poly(data, end_date):
click_inf = 0.
buy_inf = 0.
favo_inf = 0.
cart_inf = 0.
for i in data:
inf = 1./(1+alpha*(end_date - i[3])**n)
if i[2] == 0:
click_inf += inf
elif i[2] == 1:
buy_inf += inf
elif i[2] == 2:
favo_inf += inf
elif i[2] == 3:
cart_inf += inf
return np.array([click_inf, buy_inf, favo_inf, cart_inf])
return poly
def use_kernel(kernel, data, bound_date, not_util=False):
y = 1
if not_util:
data = data[data[:, 3] < bound_date - 30]
if data.shape[0] == 0:
return None
if not_util or bound_date - data[-1, 3] > 30:
y = 0
return kernel(data, bound_date), y
def get_train_instances(ub_data, kernel, bound_date):
whether_buy = ub_data[:, 2] == 1
xs = []
ys = []
i = 0
while i < len(whether_buy):
buy_ix = i + whether_buy[i:].argmax()
if buy_ix == 0 and whether_buy[buy_ix]:
i += 1
continue
if whether_buy[buy_ix]:
x, y = use_kernel(kernel, ub_data[:buy_ix], ub_data[buy_ix, 3])
xs.append(x)
ys.append(y)
i = buy_ix + 1
else:
rec = use_kernel(kernel, ub_data, bound_date, not_util=True)
if rec is not None:
x, y = rec
xs.append(x)
ys.append(y)
break
return xs, ys
def get_pred_instance(ub_data, kernel, bound_date):
return [kernel(ub_data, bound_date)], [np.array([ub_data[0, 0], ub_data[0, 1]])]
def extract_feature(data, kernel, get_instances, bound_date):
sort_by(data)
xs = []
ys = []
for ui in np.unique(data[:, 0]):
u_data = data[data[:, 0] == ui]
for bi in np.unique(u_data[:, 1]):
ub_data = u_data[u_data[:, 1] == bi]
xs_, ys_ = get_instances(ub_data, kernel, bound_date)
xs += xs_
ys += ys_
return np.array(xs), np.array(ys)
| gpl-2.0 |
vossman/ctfeval | appionlib/apCtf/sinefit.py | 1 | 12375 | #!/usr/bin/env python
import math
import time
import numpy
import scipy.stats
from appionlib import apDisplay
from appionlib.apImage import imagestat
from appionlib.apCtf import ctftools, genctf, leastsq
#===================================================
#===================================================
#===================================================
def refineAmplitudeContrast(radial_array, defocus, normPSD, cs, wavelength, weights=None, msg=True):
"""
takes elliptical average data and fits it to the equation
A cos(x) + B sin(x)
"""
if msg is True:
print "resolution limits %.2f <> %.2f"%(1.0e10/radial_array.max(), 1.0e10/radial_array.min())
# create X matrix
radialsq = radial_array**2
if msg is True:
print 1.0/radial_array[-1], wavelength, defocus, cs
gamma = ( -0.5 * math.pi * cs * wavelength**3 * radialsq**2
+ math.pi * wavelength * radialsq * defocus )
cosvec = numpy.cos(2*gamma) #C
sinvec = numpy.sin(2*gamma) #D
onevec = numpy.ones(gamma.shape) #extra constant
X = numpy.array([cosvec, sinvec, onevec, radialsq]).transpose()
#del cosvec, sinvec, gamma
# create weighted matrix
if weights is None:
# make an identity matrix for no weights
weights = numpy.ones(normPSD.shape[0])
# adjust y values
yprime = (normPSD - normPSD.mean())
yprime /= numpy.abs(yprime).max()
## solve it
beta = leastsq.totalLeastSquares(X, yprime, weights)
if beta is None:
beta = leastsq.numpyLeastSquares(X, yprime)
del X, weights
if beta is None:
apDisplay.printWarning("Least squares failed")
return None
#translate the values
C = beta[0]
D = beta[1]
constant = beta[2]
sqterm = beta[3]
if msg is True:
print beta, radial_array.shape
psi = 0.5*math.atan2(C,D)
if msg is True:
print "psi=", psi
phi = psi + math.pi/4
if msg is True:
print "phi=", phi
amp_con = math.sin(phi)
if msg is True:
apDisplay.printColor("amplitude contrast = %.8f"%(amp_con), "cyan")
fitctf1 = C*cosvec + D*sinvec
fitctf2 = numpy.sin(2*gamma + 2*psi)
newB = math.sqrt(1 - amp_con**2)
# need to do the y' = 2 y - 1
adjctf1 = 2 * numpy.power(amp_con*numpy.cos(gamma) + newB*numpy.sin(gamma), 2) - 1
#adjctf2 = 2 * numpy.power(numpy.sin(gamma + math.asin(amp_con)), 2) - 1
crosscorr = scipy.stats.pearsonr(fitctf2, adjctf1)[0]
yprime2 = yprime - constant - sqterm*radialsq
yprime2 /= numpy.abs(yprime2).max()
fitconf = scipy.stats.pearsonr(yprime2, fitctf2)[0]
if msg is True:
from matplotlib import pyplot
pyplot.clf()
pyplot.plot(radialsq, yprime2, '.', color="gray")
pyplot.plot(radialsq, yprime2, 'k-',)
pyplot.plot(radialsq, fitctf1, 'r--',)
pyplot.plot(radialsq, fitctf2, 'g--',)
pyplot.plot(radialsq, adjctf1, 'b--',)
conf1 = scipy.stats.pearsonr(yprime2, fitctf1)[0]
conf2 = scipy.stats.pearsonr(yprime2, adjctf1)[0]
conf3 = scipy.stats.pearsonr(yprime2, fitctf2)[0]
print "conf %.4f, %.4f, %.4f; cc = %.4f"%(conf1, conf2, conf3, crosscorr)
#pyplot.ylim(ymin=-1.05, ymax=1.05)
pyplot.title("Amplitude Contrast Fit (%.2f, %.2f, %.2f) CC=%.3f"%(conf1, conf2, conf3, crosscorr))
pyplot.subplots_adjust(wspace=0.05, hspace=0.05,
bottom=0.05, left=0.05, top=0.95, right=0.95, )
pyplot.show()
if crosscorr < -0.6:
print "likely 180 degree out of phase"
apDisplay.printWarning("Bad angle translation: %.8f"%(amp_con))
return None
if fitconf < 0.1 and amp_con > 0.4:
apDisplay.printWarning("Bad fit confidence %.3f, ac=%.8f"%(fitconf, amp_con))
return None
if crosscorr < 0.5:
apDisplay.printWarning("Bad angle translation: %.8f"%(amp_con))
return None
if amp_con < 0.0:
apDisplay.printWarning("amp contrast is negative (reduce defocus): %.4f"%(amp_con))
#return None
if amp_con > 0.6:
apDisplay.printWarning("amp contrast is too large (increase defocus): %.8f"%(amp_con))
#return None
return amp_con
#===================================================
#===================================================
#===================================================
def refineCTFOneDimension(radial_array, amp_con, zavg, normPSD, cs, wavelength, weights=None, msg=True):
"""
take a 2D normalized PSB and refines all CTF parameters
using a linear least squares
all values in meters
"""
apDisplay.printColor("BEFORE ac=%.3f, zavg=%.3e"%(amp_con, zavg), "cyan")
print cs, wavelength
print "resolution limits %.2f <> %.2f"%(1.0e10/radial_array.max(), 1.0e10/radial_array.min())
### convert parameters
C = math.sin(math.asin(amp_con) - math.pi/4.)
D = math.sqrt(1 - C**2)
### create astigmatic gamma function
radialsq_array = radial_array**2
gamma_array = ( -0.5*math.pi * cs * wavelength**3 * radialsq_array**2
+ math.pi * wavelength * radialsq_array * zavg )
### create refinement vectors
cosvec = numpy.cos(2*gamma_array) #C
sinvec = numpy.sin(2*gamma_array) #D
onevec = numpy.ones(radialsq_array.shape)
dCTFdGamma_array = -2*C*sinvec + 2*D*cosvec
zavgvec = wavelength*math.pi*radialsq_array * dCTFdGamma_array
### create X data matrix and adjust
X = numpy.array([cosvec, sinvec, zavgvec, onevec, radialsq_array]).transpose()
# create weighted matrix
if weights is None:
# make an identity matrix for no weights
weights = numpy.ones(normPSD.shape[0])
# adjust y values
yprime = (normPSD - normPSD.mean())
yprime /= numpy.abs(yprime).max()
## solve it
beta = leastsq.totalLeastSquares(X, yprime, weights)
if beta is None:
beta = leastsq.numpyLeastSquares(X, yprime)
del X, weights
if beta is None:
apDisplay.printWarning("Least squares failed")
return None
#translate the values
C = beta[0]
D = beta[1]
dzavg = beta[2]
constant = beta[3]
sqterm = beta[4]
print beta
psi = 0.5*math.atan2(C,D)
print "psi=", psi
phi = psi + math.pi/4
print "phi=", phi
amp_con = math.sin(phi)
if dzavg/zavg > 1:
apDisplay.printWarning("Bad defocus change: %.4e --> %.4e"%(zavg, zavg+dzavg))
return None
zavg += dzavg
print "AFTER ac=%.3f, zavg=%.3e"%(amp_con, zavg)
apDisplay.printColor("AFTER ac=%.3f, zavg=%.3e"%(amp_con, zavg), "cyan")
newGamma = ( -0.5*math.pi * cs * wavelength**3 * radialsq_array**2
+ math.pi * wavelength * radialsq_array * zavg )
fitctf1 = C*cosvec + D*sinvec
fitctf1b = numpy.sin(2*gamma_array + 2*psi)
fitctf2 = numpy.sin(2*newGamma + 2*psi)
newB = math.sqrt(1 - amp_con**2)
# need to do the y' = 2 y - 1
adjctf1 = 2 * numpy.power(amp_con*numpy.cos(newGamma) + newB*numpy.sin(newGamma), 2) - 1
crosscorr = scipy.stats.pearsonr(fitctf2, adjctf1)[0]
if crosscorr < -0.6:
print "likely 180 degree out of phase"
apDisplay.printWarning("Bad angle translation: %.8f"%(amp_con))
if msg is True:
from matplotlib import pyplot
pyplot.clf()
yprime2 = yprime - constant - sqterm*radialsq_array
yprime2 /= numpy.abs(yprime2).max()
pyplot.plot(radialsq_array, yprime2, '.', color="gray")
pyplot.plot(radialsq_array, yprime2, 'k-',)
pyplot.plot(radialsq_array, fitctf1b, 'r--',)
pyplot.plot(radialsq_array, fitctf2, 'g--',)
pyplot.plot(radialsq_array, adjctf1, 'b--',)
conf1 = scipy.stats.pearsonr(yprime2, fitctf1b)[0]
conf2 = scipy.stats.pearsonr(yprime2, adjctf1)[0]
conf3 = scipy.stats.pearsonr(yprime2, fitctf2)[0]
#pyplot.ylim(ymin=-1.05, ymax=1.05)
pyplot.title("CTF Refine 1D Fit (%.2f, %.2f, %.2f) CC=%.3f"%(conf1, conf2, conf3, crosscorr))
pyplot.subplots_adjust(wspace=0.05, hspace=0.05,
bottom=0.05, left=0.05, top=0.95, right=0.95, )
pyplot.show()
if crosscorr < 0.5:
apDisplay.printWarning("Bad angle translation: %.8f"%(amp_con))
return None
if zavg > 20e-6 or zavg < 0.1e-6:
apDisplay.printWarning("Bad defocus change: %.4e --> %.4e"%(zavg-dzavg, zavg))
return None
if amp_con < 0.0:
apDisplay.printWarning("amp contrast is negative (reduce defocus): %.4f"%(amp_con))
#return None
if amp_con > 0.6:
apDisplay.printWarning("amp contrast is too large (increase defocus): %.8f"%(amp_con))
#return None
return amp_con, zavg
#===================================================
#===================================================
#===================================================
def refineCTF(radial_array, angle_array,
amp_con, z1, z2, angle_astig,
normPSD, cs, wavelength, refineFlags=(1,1,1,1), weights=None, msg=True):
"""
take a 2D normalized PSB and refines all CTF parameters
using a linear least squares
all values in meters
"""
print "BEFORE ac=%.3f, z1=%.3e, z2=%.3e, astig=%.1f"%(amp_con, z1, z2, angle_astig)
print cs, wavelength
print "resolution limits %.2f <> %.2f"%(1.0e10/radial_array.max(), 1.0e10/radial_array.min())
### convert parameters
C = math.sin(math.asin(amp_con) - math.pi/4.)
D = math.sqrt(1 - C**2)
zavg = (z1 + z2)/2.0
zdiff = z2 - z1
if abs(zdiff) < 1e-9:
# this prevents singular matrices
zdiff = 1e-9
astigrad = math.radians(angle_astig)
### create astigmatic gamma function
radialsq_array = radial_array**2
astigcos_array = numpy.cos(2.0*(angle_array - astigrad))
defocus_array = zavg - zdiff/2.0 * astigcos_array
gamma_array = ( -0.5*math.pi * cs * wavelength**3 * radialsq_array**2
+ math.pi * wavelength * radialsq_array * defocus_array )
del defocus_array, radial_array
### create refinement vectors
cosvec = numpy.cos(2*gamma_array) #C
sinvec = numpy.sin(2*gamma_array) #D
dCTFdGamma_array = -2*C*sinvec + 2*D*cosvec
onevec = numpy.ones(radialsq_array.shape)
zavgvec = wavelength*math.pi*radialsq_array * dCTFdGamma_array
zdiffvec = -0.5*zavgvec * astigcos_array
zastigvec = zavgvec * zdiff * numpy.sin(2.0*(angle_array- astigrad))
del gamma_array, astigcos_array, dCTFdGamma_array
### create X data matrix and adjust y values
#X = numpy.array([cosvec, sinvec]).transpose()
X = numpy.vstack([cosvec, sinvec])
if refineFlags[0] == 1:
X = numpy.vstack([X, zavgvec])
if refineFlags[1] == 1:
X = numpy.vstack([X, zdiffvec])
if refineFlags[2] == 1:
X = numpy.vstack([X, zastigvec])
X = numpy.vstack([X, onevec, radialsq_array])
X = X.transpose()
del cosvec, sinvec, zavgvec, zdiffvec, zastigvec, angle_array
# create weighted matrix
if weights is None:
# make an identity matrix for no weights
weights = numpy.ones(normPSD.shape[0])
# adjust y values
yprime = 2 * normPSD - 1
## solve it
beta = leastsq.totalLeastSquares(X, yprime, weights)
if beta is None:
beta = leastsq.numpyLeastSquares(X, yprime)
del X, weights
if beta is None:
apDisplay.printWarning("Least squares failed")
return None
#translate the values
index = 0
C = beta[index]
index += 1
D = beta[index]
index += 1
if refineFlags[0] == 1:
dzavg = beta[index]
print "dzavg", dzavg
index += 1
else:
dzavg = 0
if refineFlags[1] == 1:
dzdiff = beta[index]
index += 1
print "dzdiff", dzdiff
else:
dzdiff = 0
if refineFlags[2] == 1:
dtheta = beta[index] % 2*math.pi
index += 1
print "dtheta", dtheta
else:
dtheta = 0
constant = beta[index]
index += 1
sqterm = beta[index]
index += 1
if refineFlags[3] == 1:
psi = 0.5*math.atan2(C,D)
phi = psi + math.pi/4
amp_con = math.sin(phi)
zavg += dzavg
zdiff += dzdiff
if zdiff < 0:
zdiff = 0
z1 = zavg - zdiff/2
z2 = zavg + zdiff/2.
if refineFlags[2] == 1:
astigrad += dtheta
angle_astig = math.degrees(astigrad)
print "AFTER ac=%.3f, z1=%.3e, z2=%.3e, astig=%.1f"%(amp_con, z1, z2, angle_astig)
if msg is True:
from matplotlib import pyplot
args = numpy.argsort(radialsq_array)
radialsq_array = radialsq_array[args]
yprime = yprime[args]
pyplot.clf()
yprime2 = yprime - constant - sqterm*radialsq_array
yprime2 /= numpy.abs(yprime2).max()
newGamma = ( -0.5*math.pi * cs * wavelength**3 * radialsq_array**2
+ math.pi * wavelength * radialsq_array * zavg )
newB = math.sqrt(1 - amp_con**2)
adjctf1 = 2 * numpy.power(amp_con*numpy.cos(newGamma) + newB*numpy.sin(newGamma), 2) - 1
pyplot.plot(radialsq_array, yprime2, '.', color="gray")
#pyplot.plot(radialsq_array, yprime2, 'k-',)
pyplot.plot(radialsq_array, adjctf1, 'b--',)
pyplot.title("CTF Refine 2D Fit")
pyplot.subplots_adjust(wspace=0.05, hspace=0.05,
bottom=0.05, left=0.05, top=0.95, right=0.95, )
pyplot.show()
if amp_con < 0.0:
apDisplay.printWarning("amp contrast is negative (reduce defocus): %.4f"%(amp_con))
#return None
if amp_con > 0.5:
apDisplay.printWarning("amp contrast is too large (increase defocus): %.8f"%(amp_con))
#return None
return amp_con, z1, z2, angle_astig
| apache-2.0 |
lancezlin/ml_template_py | lib/python2.7/site-packages/matplotlib/backends/qt_editor/figureoptions.py | 4 | 6227 | # -*- coding: utf-8 -*-
#
# Copyright © 2009 Pierre Raybaut
# Licensed under the terms of the MIT License
# see the mpl licenses directory for a copy of the license
"""Module that provides a GUI-based editor for matplotlib's figure options"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import os.path as osp
import matplotlib.backends.qt_editor.formlayout as formlayout
from matplotlib.backends.qt_compat import QtGui
from matplotlib import markers
from matplotlib.colors import colorConverter, rgb2hex
def get_icon(name):
import matplotlib
basedir = osp.join(matplotlib.rcParams['datapath'], 'images')
return QtGui.QIcon(osp.join(basedir, name))
LINESTYLES = {'-': 'Solid',
'--': 'Dashed',
'-.': 'DashDot',
':': 'Dotted',
'none': 'None',
}
DRAWSTYLES = {'default': 'Default',
'steps': 'Steps',
}
MARKERS = markers.MarkerStyle.markers
def figure_edit(axes, parent=None):
"""Edit matplotlib figure options"""
sep = (None, None) # separator
has_curve = len(axes.get_lines()) > 0
# Get / General
xmin, xmax = axes.get_xlim()
ymin, ymax = axes.get_ylim()
general = [('Title', axes.get_title()),
sep,
(None, "<b>X-Axis</b>"),
('Min', xmin), ('Max', xmax),
('Label', axes.get_xlabel()),
('Scale', [axes.get_xscale(), 'linear', 'log']),
sep,
(None, "<b>Y-Axis</b>"),
('Min', ymin), ('Max', ymax),
('Label', axes.get_ylabel()),
('Scale', [axes.get_yscale(), 'linear', 'log']),
sep,
('(Re-)Generate automatic legend', False),
]
# Save the unit data
xconverter = axes.xaxis.converter
yconverter = axes.yaxis.converter
xunits = axes.xaxis.get_units()
yunits = axes.yaxis.get_units()
if has_curve:
# Get / Curves
linedict = {}
for line in axes.get_lines():
label = line.get_label()
if label == '_nolegend_':
continue
linedict[label] = line
curves = []
linestyles = list(six.iteritems(LINESTYLES))
drawstyles = list(six.iteritems(DRAWSTYLES))
markers = list(six.iteritems(MARKERS))
curvelabels = sorted(linedict.keys())
for label in curvelabels:
line = linedict[label]
color = rgb2hex(colorConverter.to_rgb(line.get_color()))
ec = rgb2hex(colorConverter.to_rgb(line.get_markeredgecolor()))
fc = rgb2hex(colorConverter.to_rgb(line.get_markerfacecolor()))
curvedata = [('Label', label),
sep,
(None, '<b>Line</b>'),
('Line Style', [line.get_linestyle()] + linestyles),
('Draw Style', [line.get_drawstyle()] + drawstyles),
('Width', line.get_linewidth()),
('Color', color),
sep,
(None, '<b>Marker</b>'),
('Style', [line.get_marker()] + markers),
('Size', line.get_markersize()),
('Facecolor', fc),
('Edgecolor', ec),
]
curves.append([curvedata, label, ""])
# make sure that there is at least one displayed curve
has_curve = bool(curves)
datalist = [(general, "Axes", "")]
if has_curve:
datalist.append((curves, "Curves", ""))
def apply_callback(data):
"""This function will be called to apply changes"""
if has_curve:
general, curves = data
else:
general, = data
# Set / General
title, xmin, xmax, xlabel, xscale, ymin, ymax, ylabel, yscale, \
generate_legend = general
if axes.get_xscale() != xscale:
axes.set_xscale(xscale)
if axes.get_yscale() != yscale:
axes.set_yscale(yscale)
axes.set_title(title)
axes.set_xlim(xmin, xmax)
axes.set_xlabel(xlabel)
axes.set_ylim(ymin, ymax)
axes.set_ylabel(ylabel)
# Restore the unit data
axes.xaxis.converter = xconverter
axes.yaxis.converter = yconverter
axes.xaxis.set_units(xunits)
axes.yaxis.set_units(yunits)
axes.xaxis._update_axisinfo()
axes.yaxis._update_axisinfo()
if has_curve:
# Set / Curves
for index, curve in enumerate(curves):
line = linedict[curvelabels[index]]
label, linestyle, drawstyle, linewidth, color, \
marker, markersize, markerfacecolor, markeredgecolor \
= curve
line.set_label(label)
line.set_linestyle(linestyle)
line.set_drawstyle(drawstyle)
line.set_linewidth(linewidth)
line.set_color(color)
if marker is not 'none':
line.set_marker(marker)
line.set_markersize(markersize)
line.set_markerfacecolor(markerfacecolor)
line.set_markeredgecolor(markeredgecolor)
# re-generate legend, if checkbox is checked
if generate_legend:
draggable = None
ncol = 1
if axes.legend_ is not None:
old_legend = axes.get_legend()
draggable = old_legend._draggable is not None
ncol = old_legend._ncol
new_legend = axes.legend(ncol=ncol)
if new_legend:
new_legend.draggable(draggable)
# Redraw
figure = axes.get_figure()
figure.canvas.draw()
data = formlayout.fedit(datalist, title="Figure options", parent=parent,
icon=get_icon('qt4_editor_options.svg'),
apply=apply_callback)
if data is not None:
apply_callback(data)
| mit |
ChinaQuants/bokeh | setup.py | 1 | 19865 | """Setup script for Bokeh."""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENCE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import os, platform, re, shutil, site, subprocess, sys, time
from os.path import abspath, dirname, exists, isdir, join, realpath, relpath
from shutil import copy
try:
import colorama
def bright(text): return "%s%s%s" % (colorama.Style.BRIGHT, text, colorama.Style.RESET_ALL)
def dim(text): return "%s%s%s" % (colorama.Style.DIM, text, colorama.Style.RESET_ALL)
def white(text): return "%s%s%s" % (colorama.Fore.WHITE, text, colorama.Style.RESET_ALL)
def blue(text): return "%s%s%s" % (colorama.Fore.BLUE, text, colorama.Style.RESET_ALL)
def red(text): return "%s%s%s" % (colorama.Fore.RED, text, colorama.Style.RESET_ALL)
def green(text): return "%s%s%s" % (colorama.Fore.GREEN, text, colorama.Style.RESET_ALL)
def yellow(text): return "%s%s%s" % (colorama.Fore.YELLOW, text, colorama.Style.RESET_ALL)
except ImportError:
def bright(text): return text
def dim(text): return text
def white(text) : return text
def blue(text) : return text
def red(text) : return text
def green(text) : return text
def yellow(text) : return text
if 'nightly' in sys.argv:
from setuptools import setup
sys.argv.remove('nightly')
with open('__conda_version__.txt', 'r') as f:
version = f.read().rstrip()
vers_file = os.path.join('bokeh', '__conda_version__.py')
with open(vers_file, 'w') as f:
f.write("conda_version=" + "'" + version + "'")
else:
from distutils.core import setup
from distutils import dir_util
# Our own imports
import versioneer
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
ROOT = dirname(realpath(__file__))
BOKEHJSROOT = join(ROOT, 'bokehjs')
BOKEHJSBUILD = join(BOKEHJSROOT, 'build')
CSS = join(BOKEHJSBUILD, 'css')
JS = join(BOKEHJSBUILD, 'js')
SERVER = join(ROOT, 'bokeh/server')
if sys.version_info[0] < 3:
input = raw_input
# -----------------------------------------------------------------------------
# Local utilities
# -----------------------------------------------------------------------------
versioneer.versionfile_source = 'bokeh/_version.py'
versioneer.versionfile_build = 'bokeh/_version.py'
versioneer.tag_prefix = '' # tags are like 1.2.0
versioneer.parentdir_prefix = 'Bokeh-' # dirname like 'myproject-1.2.0'
# -----------------------------------------------------------------------------
# Classes and functions
# -----------------------------------------------------------------------------
copy("LICENSE.txt", "bokeh/")
package_data = ['LICENSE.txt']
def package_path(path, filters=()):
if not os.path.exists(path):
raise RuntimeError("packaging non-existent path: %s" % path)
elif os.path.isfile(path):
package_data.append(relpath(path, 'bokeh'))
else:
for path, dirs, files in os.walk(path):
path = relpath(path, 'bokeh')
for f in files:
if not filters or f.endswith(filters):
package_data.append(join(path, f))
# You can't install Bokeh in a virtualenv because the lack of getsitepackages()
# This is an open bug: https://github.com/pypa/virtualenv/issues/355
# And this is an intended PR to fix it: https://github.com/pypa/virtualenv/pull/508
# Workaround to fix our issue: https://github.com/bokeh/bokeh/issues/378
def getsitepackages():
"""Returns a list containing all global site-packages directories
(and possibly site-python)."""
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
prefixes = [sys.prefix, sys.exec_prefix]
sitepackages = []
seen = set()
for prefix in prefixes:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys.prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version_info[0] >= 3:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
sitepackages.append(os.path.abspath(sitedir))
sitepackages = [p for p in sitepackages if os.path.isdir(p)]
return sitepackages
def check_remove_bokeh_install(site_packages):
bokeh_path = join(site_packages, "bokeh")
if not (exists(bokeh_path) and isdir(bokeh_path)):
return
prompt = "Found existing bokeh install: %s\nRemove it? [y|N] " % bokeh_path
val = input(prompt)
if val == "y":
print("Removing old bokeh install...", end=" ")
try:
shutil.rmtree(bokeh_path)
print("Done")
except (IOError, OSError):
print("Unable to remove old bokeh at %s, exiting" % bokeh_path)
sys.exit(-1)
else:
print("Not removing old bokeh install")
sys.exit(1)
def remove_bokeh_pth(path_file):
if exists(path_file):
try:
os.remove(path_file)
except (IOError, OSError):
print("Unable to remove old path file at %s, exiting" % path_file)
sys.exit(-1)
return True
return False
BUILD_EXEC_FAIL_MSG = bright(red("Failed.")) + """
ERROR: subprocess.Popen(%r) failed to execute:
%s
Have you run `npm install` from the bokehjs subdirectory?
For more information, see the Dev Guide:
http://bokeh.pydata.org/en/latest/docs/dev_guide.html
"""
BUILD_FAIL_MSG = bright(red("Failed.")) + """
ERROR: 'gulp build' returned error message:
%s
"""
BUILD_SIZE_FAIL_MSG = """
ERROR: could not determine sizes:
%s
"""
BUILD_SUCCESS_MSG = bright(green("Success!")) + """
Build output:
%s"""
def build_js():
print("Building BokehJS... ", end="")
sys.stdout.flush()
os.chdir('bokehjs')
if sys.platform != "win32":
cmd = [join('node_modules', '.bin', 'gulp'), 'build']
else:
cmd = [join('node_modules', '.bin', 'gulp.cmd'), 'build']
t0 = time.time()
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
print(BUILD_EXEC_FAIL_MSG % (cmd, e))
sys.exit(1)
finally:
os.chdir('..')
result = proc.wait()
t1 = time.time()
if result != 0:
indented_msg = ""
msg = proc.stderr.read().decode('ascii', errors='ignore')
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_FAIL_MSG % red(msg))
sys.exit(1)
indented_msg = ""
msg = proc.stdout.read().decode('ascii', errors='ignore')
pat = re.compile(r"(\[.*\]) (.*)", re.DOTALL)
for line in msg.strip().split("\n"):
stamp, txt = pat.match(line).groups()
indented_msg += " " + dim(green(stamp)) + " " + dim(txt) + "\n"
msg = "\n".join([" " + x for x in msg.split("\n")])
print(BUILD_SUCCESS_MSG % indented_msg)
print("Build time: %s" % bright(yellow("%0.1f seconds" % (t1-t0))))
print()
print("Build artifact sizes:")
try:
def size(*path):
return os.stat(join("bokehjs", "build", *path)).st_size / 2**10
print(" - bokeh.js : %6.1f KB" % size("js", "bokeh.js"))
print(" - bokeh.css : %6.1f KB" % size("css", "bokeh.css"))
print(" - bokeh.min.js : %6.1f KB" % size("js", "bokeh.min.js"))
print(" - bokeh.min.css : %6.1f KB" % size("css", "bokeh.min.css"))
print(" - bokeh-widgets.js : %6.1f KB" % size("js", "bokeh-widgets.js"))
print(" - bokeh-widgets.css : %6.1f KB" % size("css", "bokeh-widgets.css"))
print(" - bokeh-widgets.min.js : %6.1f KB" % size("js", "bokeh-widgets.min.js"))
print(" - bokeh-widgets.min.css : %6.1f KB" % size("css", "bokeh-widgets.min.css"))
except Exception as e:
print(BUILD_SIZE_FAIL_MSG % e)
def install_js():
target_jsdir = join(SERVER, 'static', 'js')
target_cssdir = join(SERVER, 'static', 'css')
STATIC_ASSETS = [
join(JS, 'bokeh.js'),
join(JS, 'bokeh.min.js'),
join(CSS, 'bokeh.css'),
join(CSS, 'bokeh.min.css'),
]
if not all([exists(a) for a in STATIC_ASSETS]):
print("""
ERROR: Cannot install BokehJS: files missing in `./bokehjs/build`.
Please build BokehJS by running setup.py with the `--build_js` option.
Dev Guide: http://bokeh.pydata.org/docs/dev_guide.html#bokehjs.
""")
sys.exit(1)
if exists(target_jsdir):
shutil.rmtree(target_jsdir)
shutil.copytree(JS, target_jsdir)
if exists(target_cssdir):
shutil.rmtree(target_cssdir)
shutil.copytree(CSS, target_cssdir)
def clean():
print("Removing prior-built items...", end=" ")
build_dir = 'build/lib/bokeh'
if os.path.exists(build_dir):
dir_util.remove_tree(build_dir)
for root, dirs, files in os.walk('.'):
for item in files:
if item.endswith('.pyc'):
os.remove(os.path.join(root, item))
print("Done")
def get_user_jsargs():
print("""
Bokeh includes a JavaScript library (BokehJS) that has its own
build process. How would you like to handle BokehJS:
1) build and install fresh BokehJS
2) install last built BokehJS from bokeh/bokehjs/build
""")
mapping = {"1": True, "2": False}
value = input("Choice? ")
while value not in mapping:
print("Input '%s' not understood. Valid choices: 1, 2\n" % value)
value = input("Choice? ")
return mapping[value]
def parse_jsargs():
options = ('install', 'develop', 'sdist', 'egg_info', 'build')
installing = any(arg in sys.argv for arg in options)
if '--build_js' in sys.argv:
if not installing:
print("Error: Option '--build_js' only valid with 'install', 'develop', 'sdist', or 'build', exiting.")
sys.exit(1)
jsbuild = True
sys.argv.remove('--build_js')
elif '--install_js' in sys.argv:
# Note that --install_js can be used by itself (without sdist/install/develop)
jsbuild = False
sys.argv.remove('--install_js')
else:
if installing:
jsbuild = get_user_jsargs()
else:
jsbuild = False
return jsbuild
# -----------------------------------------------------------------------------
# Main script
# -----------------------------------------------------------------------------
# Aliases for build_js and install_js
for i in range(len(sys.argv)):
if sys.argv[i] == '--build-js':
sys.argv[i] = '--build_js'
if sys.argv[i] == '--install-js':
sys.argv[i] = '--install_js'
# Set up this checkout or source archive with the right BokehJS files.
if sys.version_info[:2] < (2, 6):
raise RuntimeError("Bokeh requires python >= 2.6")
# Lightweight command to only install js and nothing more - developer mode
if len(sys.argv) == 2 and sys.argv[-1] == '--install_js':
install_js()
sys.exit(0)
# check for 'sdist' and make sure we always do a BokehJS build when packaging
if "sdist" in sys.argv:
if "--install_js" in sys.argv:
print("Removing '--install_js' incompatible with 'sdist'")
sys.argv.remove('--install_js')
if "--build_js" not in sys.argv:
print("Adding '--build_js' required for 'sdist'")
sys.argv.append('--build_js')
# check for package install, set jsinstall to False to skip prompt
jsinstall = True
if not exists(join(ROOT, 'MANIFEST.in')):
if "--build_js" in sys.argv or "--install_js" in sys.argv:
print("BokehJS source code is not shipped in sdist packages; "
"building/installing from the bokehjs source directory is disabled. "
"To build or develop BokehJS yourself, you must clone the full "
"Bokeh repository from https://github.com/bokeh/bokeh")
if "--build_js" in sys.argv:
sys.argv.remove('--build_js')
if "--install_js" in sys.argv:
sys.argv.remove('--install_js')
jsbuild = False
jsinstall = False
else:
jsbuild = parse_jsargs()
if jsbuild:
build_js()
if jsinstall:
install_js()
sampledata_suffixes = ('.csv', '.conf', '.gz', '.json', '.png', '.ics')
package_path(join(SERVER, 'static'))
package_path(join(SERVER, '_templates'))
package_path(join(ROOT, 'bokeh', '_templates'))
package_path(join(ROOT, 'bokeh', 'sampledata'), sampledata_suffixes)
package_path(join(ROOT, 'bokeh', 'server', 'redis.conf'))
scripts = ['bokeh-server', 'websocket_worker.py']
if '--user' in sys.argv:
site_packages = site.USER_SITE
else:
site_packages = getsitepackages()[0]
path_file = join(site_packages, "bokeh.pth")
path = abspath(dirname(__file__))
print()
if 'develop' in sys.argv:
check_remove_bokeh_install(site_packages)
with open(path_file, "w+") as f:
f.write(path)
print("Installing Bokeh for development:")
print(" - writing path '%s' to %s" % (path, path_file))
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % yellow("PACKAGED"))
sys.exit()
elif 'clean' in sys.argv:
clean()
elif 'install' in sys.argv:
pth_removed = remove_bokeh_pth(path_file)
print("Installing Bokeh:")
if pth_removed:
print(" - removed path file at %s" % path_file)
if jsinstall:
print(" - using %s built BokehJS from bokehjs/build\n" % (bright(yellow("NEWLY")) if jsbuild else bright(yellow("PREVIOUSLY"))))
else:
print(" - using %s BokehJS, located in 'bokeh.server.static'\n" % bright(yellow("PACKAGED")))
elif '--help' in sys.argv:
if jsinstall:
print("Bokeh-specific options available with 'install' or 'develop':")
print()
print(" --build_js build and install a fresh BokehJS")
print(" --install_js install only last previously built BokehJS")
else:
print("Bokeh is using PACKAGED BokehJS, located in 'bokeh.server.static'")
print()
print()
REQUIRES = [
'six>=1.5.2',
'requests>=1.2.3',
'PyYAML>=3.10',
'python-dateutil>=2.1',
'Jinja2>=2.7',
'numpy>=1.7.1',
'pandas>=0.11.0',
'Flask>=0.10.1',
'pyzmq>=14.3.1',
'tornado>=4.0.1',
]
_version = versioneer.get_version()
_cmdclass = versioneer.get_cmdclass()
# Horrible hack: workaround to allow creation of bdist_whell on pip installation
# Why, for God's sake, is pip forcing the generation of wheels when installing a package?
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError as e:
# pip is not claiming for bdist_wheel when wheel is not installed
bdist_wheel = None
if bdist_wheel is not None:
_cmdclass["bdist_wheel"] = bdist_wheel
setup(
name='bokeh',
version=_version,
cmdclass=_cmdclass,
packages=[
'bokeh',
'bokeh.models',
'bokeh.models.tests',
'bokeh.models.widgets',
'bokeh.charts',
'bokeh.charts.builders',
'bokeh.charts.builders.tests',
'bokeh.charts.tests',
'bokeh._legacy_charts',
'bokeh._legacy_charts.builder',
'bokeh._legacy_charts.builder.tests',
'bokeh._legacy_charts.tests',
'bokeh.compat',
'bokeh.compat.mplexporter',
'bokeh.compat.mplexporter.renderers',
'bokeh.crossfilter',
'bokeh.sampledata',
'bokeh.server',
'bokeh.server.models',
'bokeh.server.storage',
'bokeh.server.tests',
'bokeh.server.utils',
'bokeh.server.views',
'bokeh.server.websocket',
'bokeh.server.zmq',
'bokeh.sphinxext',
'bokeh.tests',
'bokeh.transforms',
'bokeh.util',
'bokeh.util.tests',
'bokeh.validation',
],
package_data={'bokeh': package_data},
author='Continuum Analytics',
author_email='[email protected]',
url='http://github.com/bokeh/bokeh',
description='Statistical and novel interactive HTML plots for Python',
license='New BSD',
scripts=scripts,
zip_safe=False,
install_requires=REQUIRES
)
| bsd-3-clause |
Achuth17/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
JT5D/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 8 | 2178 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import pylab as pl
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = pl.figure(figsize=(8, 5))
pl.subplot(1, 2, 1)
pl.spy(coef_lasso_)
pl.xlabel('Feature')
pl.ylabel('Time (or Task)')
pl.text(10, 5, 'Lasso')
pl.subplot(1, 2, 2)
pl.spy(coef_multi_task_lasso_)
pl.xlabel('Feature')
pl.ylabel('Time (or Task)')
pl.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
pl.figure()
pl.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
pl.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
pl.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
pl.legend(loc='upper center')
pl.axis('tight')
pl.ylim([-1.1, 1.1])
pl.show()
| bsd-3-clause |
johnmwalters/ThinkStats2 | code/hypothesis.py | 75 | 10162 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import nsfg
import nsfg2
import first
import thinkstats2
import thinkplot
import copy
import random
import numpy as np
import matplotlib.pyplot as pyplot
class CoinTest(thinkstats2.HypothesisTest):
"""Tests the hypothesis that a coin is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
heads, tails = data
test_stat = abs(heads - tails)
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
heads, tails = self.data
n = heads + tails
sample = [random.choice('HT') for _ in range(n)]
hist = thinkstats2.Hist(sample)
data = hist['H'], hist['T']
return data
class DiffMeansPermute(thinkstats2.HypothesisTest):
"""Tests a difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = abs(group1.mean() - group2.mean())
return test_stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
group1, group2 = self.data
self.n, self.m = len(group1), len(group2)
self.pool = np.hstack((group1, group2))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
class DiffMeansOneSided(DiffMeansPermute):
"""Tests a one-sided difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.mean() - group2.mean()
return test_stat
class DiffStdPermute(DiffMeansPermute):
"""Tests a one-sided difference in standard deviation by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.std() - group2.std()
return test_stat
class CorrelationPermute(thinkstats2.HypothesisTest):
"""Tests correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
xs, ys = data
test_stat = abs(thinkstats2.Corr(xs, ys))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
xs, ys = self.data
xs = np.random.permutation(xs)
return xs, ys
class DiceTest(thinkstats2.HypothesisTest):
"""Tests whether a six-sided die is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum(abs(observed - expected))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
n = sum(self.data)
values = [1,2,3,4,5,6]
rolls = np.random.choice(values, n, replace=True)
hist = thinkstats2.Hist(rolls)
freqs = hist.Freqs(values)
return freqs
class DiceChiTest(DiceTest):
"""Tests a six-sided die using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum((observed - expected)**2 / expected)
return test_stat
class PregLengthTest(thinkstats2.HypothesisTest):
"""Tests difference in pregnancy length using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: pair of lists of pregnancy lengths
"""
firsts, others = data
stat = self.ChiSquared(firsts) + self.ChiSquared(others)
return stat
def ChiSquared(self, lengths):
"""Computes the chi-squared statistic.
lengths: sequence of lengths
returns: float
"""
hist = thinkstats2.Hist(lengths)
observed = np.array(hist.Freqs(self.values))
expected = self.expected_probs * len(lengths)
stat = sum((observed - expected)**2 / expected)
return stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
firsts, others = self.data
self.n = len(firsts)
self.pool = np.hstack((firsts, others))
pmf = thinkstats2.Pmf(self.pool)
self.values = range(35, 44)
self.expected_probs = np.array(pmf.Probs(self.values))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
def RunDiceTest():
"""Tests whether a die is fair.
"""
data = [8, 9, 19, 5, 8, 11]
dt = DiceTest(data)
print('dice test', dt.PValue(iters=10000))
dt = DiceChiTest(data)
print('dice chi test', dt.PValue(iters=10000))
def FalseNegRate(data, num_runs=1000):
"""Computes the chance of a false negative based on resampling.
data: pair of sequences
num_runs: how many experiments to simulate
returns: float false negative rate
"""
group1, group2 = data
count = 0
for i in range(num_runs):
sample1 = thinkstats2.Resample(group1)
sample2 = thinkstats2.Resample(group2)
ht = DiffMeansPermute((sample1, sample2))
p_value = ht.PValue(iters=101)
if p_value > 0.05:
count += 1
return count / num_runs
def PrintTest(p_value, ht):
"""Prints results from a hypothesis test.
p_value: float
ht: HypothesisTest
"""
print('p-value =', p_value)
print('actual =', ht.actual)
print('ts max =', ht.MaxTestStat())
def RunTests(data, iters=1000):
"""Runs several tests on the given data.
data: pair of sequences
iters: number of iterations to run
"""
# test the difference in means
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute two-sided')
PrintTest(p_value, ht)
ht.PlotCdf()
thinkplot.Save(root='hypothesis1',
title='Permutation test',
xlabel='difference in means (weeks)',
ylabel='CDF',
legend=False)
# test the difference in means one-sided
ht = DiffMeansOneSided(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute one-sided')
PrintTest(p_value, ht)
# test the difference in std
ht = DiffStdPermute(data)
p_value = ht.PValue(iters=iters)
print('\nstd permute one-sided')
PrintTest(p_value, ht)
def ReplicateTests():
"""Replicates tests with the new NSFG data."""
live, firsts, others = nsfg2.MakeFrames()
# compare pregnancy lengths
print('\nprglngth2')
data = firsts.prglngth.values, others.prglngth.values
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
print('\nbirth weight 2')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation 2')
PrintTest(p_value, ht)
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared 2')
PrintTest(p_value, ht)
def main():
thinkstats2.RandomSeed(17)
# run the coin test
ct = CoinTest((140, 110))
pvalue = ct.PValue()
print('coin test p-value', pvalue)
# compare pregnancy lengths
print('\nprglngth')
live, firsts, others = first.MakeFrames()
data = firsts.prglngth.values, others.prglngth.values
RunTests(data)
# compare birth weights
print('\nbirth weight')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation')
print('n=', len(live2))
PrintTest(p_value, ht)
# run the dice test
RunDiceTest()
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared')
PrintTest(p_value, ht)
# compute the false negative rate for difference in pregnancy length
data = firsts.prglngth.values, others.prglngth.values
neg_rate = FalseNegRate(data)
print('false neg rate', neg_rate)
# run the tests with new nsfg data
ReplicateTests()
if __name__ == "__main__":
main()
| gpl-3.0 |
jefftc/changlab | genomicode/filelib.py | 1 | 31555 | """
Functions:
lwrite Write to a handle, locking it to prevent concurrent writing.
tswrite Write to a handle with a timestamp.
openfh Open a file name or handle.
safe_unlink Unlink file only if it exists.
safe_mkdir Make a directory only if it does not exist.
which Find full path of executable program or return None
which_assert Find full path or raise AssertionError.
exists Whether a filename exists. Also checks for .gz and .bz2.
exists_nz Whether a filename exists and has non-zero size.
exists_nz_many
assert_exists_many
assert_exists_nz
assert_exists_nz_many
assert_exists_z_many
fp_exists_nz Whether a file or directory exists and is not empty.
dir_exists
list_files_in_path Return all files under a path.
get_file_or_path_size Get size of all files in a directory.
symlink_file_or_path_to_path
copy_file_or_path_to_path
read_row Read one row from a tab-delimited table from a file.
write_row Save one row from a tab-delimited table to a file.
read_cols Iterate over columns of a matrix.
split_by Split a column.
join_by Join a column.
iter_by Group sequential records based on some key value.
as_dict Convert records into a dictionary.
"""
import os, sys
# _lock
# _unlock
#
# _parse_format
# _read_fmt2fn
# _write_fmt2fn
# _make_convert_fns
class GenericObject:
def __init__(self, **keywds):
#import traceback; traceback.print_stack()
for name, value in keywds.iteritems():
setattr(self, name, value)
def __repr__(self):
# Bug: Does not properly quote strings.
x = [" %s=%s" % (n, v) for (n, v) in self.__dict__.iteritems()]
x = ",\n".join(x)
return "GenericObject(\n%s\n )" % x
def _lock(handle):
import fcntl
import time
return # XXX
fileno = handle.fileno()
start = time.time()
while 1:
try:
fcntl.lockf(fileno, fcntl.LOCK_EX)
except Exception, x:
if str(x).find('No record locks available') < 0 or \
time.time() >= start + 600: # try to lock for 10 minutes
raise
else:
break
def _unlock(handle):
import fcntl
return # XXX
fileno = handle.fileno()
fcntl.lockf(fileno, fcntl.LOCK_UN)
def lwrite(s, handle=None):
if handle is None:
handle = sys.stdout
_lock(handle)
try:
handle.write(s)
handle.flush()
finally:
_unlock(handle)
def nlwrite(s, handle=None):
if handle is None:
handle = sys.stdout
handle.write(s)
def tswrite(s, handle=None, format="%m/%d/%Y %H:%M:%S", write_fn=lwrite):
import time
time_tup = time.localtime(time.time())
now = time.strftime(format, time_tup)
write_fn("%s\t%s" % (now, s), handle=handle)
# Fix this. Use the new subprocess code.
def _my_popen(cmd):
if sys.hexversion >= 0x02040000:
from subprocess import Popen, PIPE
p = Popen(
cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE,
close_fds=True, universal_newlines=True)
w, r, e = p.stdin, p.stdout, p.stderr
else:
w, r, e = os.popen3(cmd)
return w, r, e
def openfh(file_or_handle, mode='rU'):
# Bug: This doesn't handle newlines correctly. Most of the
# utilities here will split only on newlines (\n). So Mac
# formatted files that have only carriage returns (\r) will not
# work correctly. Now fixed. Uses subprocess module.
if type(file_or_handle) is not type(''):
# If this is not a string, assume it's already a file handle.
return file_or_handle
elif file_or_handle.lower().startswith("http"):
# Looks like a URL.
import urllib2
return urllib2.urlopen(file_or_handle)
elif file_or_handle.lower().endswith(".gz"):
if "r" in mode:
# May cause broken pipes. Thus, capture stderr and get
# rid of it.
#return os.popen("zcat %s" % file_or_handle)
if not os.path.exists(file_or_handle):
raise IOError, "File does not exist: %r" % file_or_handle
# If file isn't finished reading, should close it so
# process doesn't stick around half done.
cmd = "gunzip -c '%s'" % file_or_handle
w, r, e = _my_popen(cmd)
w.close()
e.close()
return r
else:
import gzip
return gzip.open(file_or_handle, mode)
elif file_or_handle.lower().endswith(".bz2"):
if "r" in mode:
if not os.path.exists(file_or_handle):
raise IOError, "File does not exist: %s" % file_or_handle
cmd = "bzcat '%s'" % file_or_handle
w, r, e = _my_popen(cmd)
w.close()
e.close()
return r
else:
raise NotImplementedError
elif file_or_handle.lower().endswith(".xz"):
if "r" in mode:
if not os.path.exists(file_or_handle):
raise IOError, "File does not exist: %s" % file_or_handle
cmd = "xzcat '%s'" % file_or_handle
w, r, e = _my_popen(cmd)
w.close()
e.close()
return r
else:
raise NotImplementedError
elif file_or_handle.lower().endswith(".zip"):
if "r" in mode:
if not os.path.exists(file_or_handle):
raise IOError, "File does not exist: %s" % file_or_handle
cmd = "unzip -p '%s'" % file_or_handle
w, r, e = _my_popen(cmd)
w.close()
e.close()
return r
else:
raise NotImplementedError
elif file_or_handle.lower().endswith(".xls") or \
file_or_handle.lower().endswith(".xlsx"):
assert os.path.exists(file_or_handle), "File not found: %s" % \
file_or_handle
# BUG: If this isn't actually an excel file (e.g. a text file
# with .xls extension), will return an empty file.
cmd = "xls2txt '%s'" % file_or_handle
w, r, e = _my_popen(cmd)
# This may block. Just ignore this until we can figure out
# how to fix it properly.
#x = e.read()
#assert not x, x
w.close()
e.close()
return r
return open(file_or_handle, mode)
def which(program):
is_jar = program.lower().endswith(".jar")
def is_exe(fpath):
return os.path.exists(fpath) and (os.access(fpath, os.X_OK) or is_jar)
def ext_candidates(fpath):
yield fpath
for ext in os.environ.get("PATHEXT", "").split(os.pathsep):
yield fpath + ext
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
for candidate in ext_candidates(exe_file):
if is_exe(candidate):
return candidate
return None
def which_assert(binary):
# Make sure a binary exists and return its realpath.
which_binary = which(binary)
assert which_binary, "Executable not found: %s" % binary
return which_binary
def assert_exists(filename):
assert exists(filename), "File not found: %s" % filename
def exists(filename):
if type(filename) is type("") and filename.lower().startswith("http"):
# Looks like a URL.
import urllib2
try:
urllib2.urlopen(filename)
except urllib2.HTTPError:
return None
return filename
EXTS = [".gz", ".bz2", ".zip"]
name, ext = os.path.splitext(filename)
if ext.lower() in EXTS:
filename = name
if os.path.exists(filename):
return filename
for ext in EXTS:
f = filename + ext
if os.path.exists(f):
return f
return None
def exists_nz(filename):
import stat
fn = exists(filename)
if not fn:
return None
if os.stat(fn)[stat.ST_SIZE] > 0:
return fn
return None
def filesize(filename):
import stat
return os.stat(filename)[stat.ST_SIZE]
def assert_exists_nz(filename):
import os
if exists_nz(filename):
return
if not os.path.exists(filename):
raise AssertionError, "File not found: %s" % filename
raise AssertionError, "File empty: %s" % filename
def exists_nz_many(filenames):
# Check if multiple files exist.
for filename in filenames:
if not exists_nz(filename):
return False
return True
def assert_exists_nz_many(filenames, retries=2):
# Assert that multiple filenames exists and is non-zero.
import time
# Number of seconds between attempts.
DELAY = 2
assert retries >= 0
# Sometimes, if a file was just written, this will erroneously
# trigger an error. If the files don't exist, wait a few seconds
# and try again.
num_tries = 0
while num_tries <= retries:
if exists_nz_many(filenames):
return
time.sleep(DELAY)
num_tries += 1
missing = []
for filename in filenames:
if not exists_nz(filename):
missing.append(filename)
if not missing:
return
if len(missing) == 1:
msg = "File not found or empty: %s" % missing[0]
elif len(missing) < 5:
x = map(repr, missing)
msg = "Files not found or empty: %s" % ", ".join(x)
else:
x = missing[:5]
x = map(repr, missing)
x = x + ["..."]
msg = "Files (%d) not found or empty: %s" % (
len(missing), ", ".join(x))
assert not missing, msg
def assert_exists_z_many(filenames):
# Assert that multiple filenames exists and are zero.
import stat
assert_exists_many(filenames)
# Make sure they're all 0.
nonzero = []
for filename in filenames:
if os.stat(filename)[stat.ST_SIZE] != 0:
nonzero.append(filename)
if not nonzero:
return
if len(nonzero) == 1:
msg = "File not empty: %s" % nonzero[0]
elif len(nonzero) < 5:
msg = "Files not empty: %s" % ", ".join(nonzero)
else:
x = nonzero[:5] + ["..."]
msg = "Files (%d) not empty: %s" % (
len(nonzero), ", ".join(x))
assert not nonzero, msg
def assert_exists_many(filenames):
# Assert that multiple filenames exists and is non-zero.
missing = []
for filename in filenames:
if not exists(filename):
missing.append(filename)
if not missing:
return
if len(missing) == 1:
msg = "File not found or empty: %s" % missing[0]
elif len(missing) < 5:
msg = "Files not found or empty: %s" % ", ".join(missing)
else:
x = missing[:5] + ["..."]
msg = "Files (%d) not found or empty: %s" % (
len(missing), ", ".join(x))
assert not missing, msg
def fp_exists_nz(file_or_path):
if os.path.isdir(file_or_path):
if os.listdir(file_or_path):
return True
return False
return exists_nz(file_or_path)
def dir_exists(path):
import os
if not os.path.isdir(path):
return False
if not os.path.exists(path):
return False
return True
def _parse_format(format):
"""Return names, format."""
assert format is not None
if format.find(":") < 0:
assert " " not in format
return None, format
names, fmt = [], ""
for x in format.split():
name, type_ = x.split(":")
assert len(type_) == 1, "Invalid type: %s" % type_
names.append(name)
fmt += type_
assert len(names) == len(fmt)
return names, fmt
def _read_fmt2fn(f):
# Return a function that can convert this format, or None.
if f == 's':
return None
elif f in ['d', 'i']:
return int
elif f == 'l':
return long
elif f == 'f':
return float
elif f == 'x':
return None
raise ValueError, "Unknown format specifier %s" % f
def _write_fmt2fn(f):
# Return a function that can convert this format, or None.
if f == 's':
return str
elif f in ['d', 'i']:
return str
elif f == 'l':
return str
elif f == 'f':
return str
elif f == 'x':
return None
raise ValueError, "Unknown format specifier %s" % f
def _make_convert_fns(format, obj_convert_fns, fmt2fn_fn):
obj_convert_fns = list(obj_convert_fns)
convert_fns = []
for f in format:
if f == "O":
fn = obj_convert_fns.pop(0)
else:
fn = fmt2fn_fn(f)
convert_fns.append(fn)
return convert_fns
def _read_with_pandas(handle, delimiter, nrows):
# Read a matrix (list of lists) with pandas. Return None if not
# able to do so (e.g. if pandas not installed).
# Don't know why, but this has a hard time reading VCF files.
# 1. If CHUNKSIZE is 64k, then will only read the 20 lines (that
# start with "##").
# 2. If CHUNKSIZE is 4, will read all lines, but only keep first
# column.
#
# Does too much weird stuff. Don't use it.
return None
try:
import pandas
except ImportError, x:
return None
# Sometimes segfaults:
# pandas.io.common.CParserError: Error tokenizing data. C error:
# out of memory
# chunksize is the number of rows.
CHUNKSIZE = 64*1024 # Read 64k rows at a time.
# nrows and chunksize is not implemented in pandas.
if nrows is not None:
CHUNKSIZE = None
reader = pandas.read_csv(
handle, sep=delimiter, skip_blank_lines=False, header=None,
comment=None,
na_filter=False, doublequote=False, error_bad_lines=False,
warn_bad_lines=False, low_memory=False, memory_map=True,
nrows=nrows, dtype=str, chunksize=CHUNKSIZE)
if CHUNKSIZE is None:
df = reader
matrix = [list(x) for x in df.values]
else:
matrix = []
for df in reader:
x = [list(x) for x in df.values]
matrix.extend(x)
return matrix
# Bug: if the file is gzip'd, will leave gunzip -c processes lying
# around.
def read_cols(file_or_handle, delimiter="\t", skip=0, nrows=None):
import csv
# Skip the first lines.
handle = openfh(file_or_handle)
for i in range(skip):
handle.readline()
matrix = _read_with_pandas(handle, delimiter, nrows)
if matrix:
for x in matrix:
yield x
elif False and delimiter == "\t":
# iolib.split_tdf takes about the same amount of time as
# csv.reader.
for i, x in enumerate(iolib.split_tdf(handle.read())):
if nrows is not None and i >= nrows:
break
yield x
elif False:
# Default naive implementation.
for i, line in enumerate(handle):
if nrows is not None and i >= nrows:
break
cols = line.rstrip("\r\n").split(delimiter)
yield cols
handle.close()
else:
# Use the Python csv parser.
# Allow up to 64Mb fields (Python 2.5 and above).
# Have problems reading bam files with smaller limit:
# field larger than field limit (33554432)
FIELD_SIZE_LIMIT = 32*1024*1024
if hasattr(csv, "field_size_limit"):
csv.field_size_limit(FIELD_SIZE_LIMIT)
# Read each line.
handle = csv.reader(handle, delimiter=delimiter)
row = None
try:
for i, row in enumerate(handle):
if nrows is not None and i >= nrows:
break
#if i > 5000: # For debugging
# break
yield row
except csv.Error, x:
if str(x).find("field larger than field limit"):
if row is not None:
print "row %d limit %d" % (i, FIELD_SIZE_LIMIT)
raise
def read_all_cols(file_or_handle, delimiter="\t", skip=0, nrows=None):
import csv
# Skip the first lines.
handle = openfh(file_or_handle)
for i in range(skip):
handle.readline()
matrix = _read_with_pandas(handle, delimiter, nrows)
if matrix is not None:
return matrix
matrix = [
x for x in
read_cols(handle, delimiter=delimiter, skip=0, nrows=nrows)]
return matrix
def _make_format_from_header(names):
import math
import hashlib
# Normalize each name.
normnames = [hashlib.hash_var(x) for x in names]
# For columns with duplicate names, number them so that they are
# unique.
name2count = {}
for n in normnames:
name2count[n] = name2count.get(n, 0) + 1
name2nextid = {}
for i, name in enumerate(normnames):
count = name2count[name]
if count == 1:
continue
# Figure out the number of digits to use for the id.
ndigits = int(math.floor(math.log(count, 10))) + 1
id_ = name2nextid.get(name, 0)
name2nextid[name] = id_ + 1
x = "%s_%0*d" % (name, ndigits, id_)
normnames[i] = x
# Make each of the columns a string.
normnames = ["%s:s" % x for x in normnames]
# Return the format.
return " ".join(normnames)
class RowIterator:
# Member variables:
# _line Previous line from the file (unparsed).
# _cols
# _header Name of each column. From header, format, then index.
# _nheader Normalized names.
def __init__(self, file_or_handle, delimiter, strip, skip, comment_char,
pad_cols, header, format=None, *obj_convert_fns):
if skip:
file_or_handle = openfh(file_or_handle)
for i in range(skip):
file_or_handle.readline()
reader = self._parse_line(file_or_handle, comment_char, delimiter)
names = None
if header:
# If the file is empty, this will raise a StopIteration
# exception.
try:
line, names = reader.next()
except StopIteration:
names = []
# If no format is provided, then make one from the header.
if format is None:
format = _make_format_from_header(names)
assert format is not None, "No format given."
normnames, format = _parse_format(format)
if not names:
names = normnames
x = _make_convert_fns(format, obj_convert_fns, _read_fmt2fn)
convert_fns = x
fn_cols = [
i for (i, fn, f)
in zip(range(len(format)), convert_fns, format)
if fn != None and f != "x"]
x = [i for (i, f) in enumerate(format) if strip and f == "s"]
strip_cols = x
self._header = names
self._nheader = normnames
self._reader = reader
self._format = format
self._convert_fns = convert_fns
self._fn_cols = fn_cols
self._strip_cols = strip_cols
self._pad_cols = pad_cols
def _parse_line(self, file, comment_char, delimiter):
import csv
delimiter = delimiter or "\t"
# Allow up to 32Mb fields (Python 2.5 and above).
if hasattr(csv, "field_size_limit"):
csv.field_size_limit(32*1024*1024)
for i, line in enumerate(openfh(file)):
# Skip blank lines.
if not line.strip():
continue
# Skip comment lines.
if comment_char is not None and line.startswith(comment_char):
continue
try:
x = csv.reader([line], delimiter=delimiter).next()
except csv.Error, x:
raise csv.Error, "%s [%d: %s]" % (str(x), i, repr(line))
yield line, x
def next(self):
line, data = self._reader.next()
cols = data[:]
if len(data) != len(self._format):
dlen, flen = len(data), len(self._format)
if dlen < flen and self._pad_cols is not None:
data = data + [self._pad_cols]*(flen-dlen)
else:
s = "data(%d)/format(%d) are different lengths\n%r\n%r" % (
dlen, flen, self._format, data)
raise AssertionError, s
for i in self._fn_cols:
try:
data[i] = self._convert_fns[i](data[i])
except ValueError, x:
x = "[%s] %s" % (self._header[i], str(x))
raise ValueError(x)
for i in self._strip_cols:
data[i] = data[i].strip()
if self._nheader:
params = {}
for (n, d, f) in zip(self._nheader, data, self._format):
if f == "x":
continue
params[n] = d
data = GenericObject(**params)
self._line = line
self._cols = cols
#data._iter = self
data._line = line
data._cols = cols
data._header = self._header
data._nheader = self._nheader
#setattr(data, "_iter", self)
#setattr(data, "_line", line)
#setattr(data, "_cols", cols)
#setattr(data, "_header", self._header)
#setattr(data, "_nheader", self._nheader)
return data
def __iter__(self):
return self
# read_row(filename, "ssss")
# read_row(filename, "name:s number:i")
# read_row(filename, "col1:O col2:O", convert_fn, convert_fn)
# read_row(filename, header=1)
# delimiter Character used for the delimiter. default "\t".
# strip
# skip Number of lines to skip at the beginning.
# comment_char Ignore lines that start with this character.
# pad_cols If not enough columns in a row, pad with this value.
# header
# format
def read_row(file_or_handle, *args, **keywds):
"""Iterate over each line of a tab-delimited file. The iterator
object contains the following member variables:
_line Previous line from the file (unparsed).
_cols Previous line from the file, parsed as columns.
_header List of column names. From header, format, then index.
_nheader Normalized names.
format is a string describing the type of each column. The format
string can follow one of two syntaxes:
1. "<type1><type2><type3> ..."
2. "<name1>:<type1> <name2>:<type2> ..."
In the first syntax, each character in the string corresponds to
the type of a column. There should be no intervening spaces. The
second syntax also corresponds to columns, but names are also
given.
If names are given, then returns an object with the names used as
member variables. Otherwise, returns a list. Either way, the
returned object will contain special member variables:
_iter reference back to the original iterator
_line
_cols
_header
_nheader
Allowed types are:
s string
i integer
f float
O object
x ignore this column
Each "object" type requires a corresponding function to convert.
If header=1 is given, will use it for the format string. If the
format string is provided, then will just skip the header.
"""
known_params = [
"delimiter", "strip", "skip", "pad_cols", "header", "comment_char"]
for key in keywds:
assert key in known_params, "Unknown parameter: %s" % key
delimiter = keywds.get("delimiter", "\t")
strip = keywds.get("strip", False)
skip = keywds.get("skip", 0)
pad_cols = keywds.get("pad_cols", None)
header = keywds.get("header", False)
comment_char = keywds.get("comment_char", None)
return RowIterator(
file_or_handle, delimiter, strip, skip, comment_char, pad_cols, header,
*args)
def write_row(file_or_handle, data, format=None, *obj_convert_fns):
"""Write one row of a table into file_or_handle. data should be a
list or tuple. It can be a dict, if the format contains names."""
import csv
import operator
if file_or_handle is None:
file_or_handle = sys.stdout
assert type(file_or_handle) is not type(""), "Can not write row to a file."
# If named_format is given, should I accept a dict for data?
names, format = _parse_format(format)
if not operator.isSequenceType(data): # assume it's a TableRow
assert names
data = [getattr(data, n) for n in names]
assert len(data) == len(format), "data/format are different lengths"
convert_fns = _make_convert_fns(format, obj_convert_fns, _write_fmt2fn)
handle = openfh(file_or_handle, 'w')
w = csv.writer(handle, delimiter="\t", lineterminator="\n")
# Should ignore the columns with "x" format, so get rid of it.
data = [fn(d) for (d, f, fn) in zip(data, format, convert_fns) if f != 'x']
w.writerow(data)
#handle.close()
def split_by(delimiter, convert_fn=None):
def f(s):
x = s.split(delimiter)
if convert_fn is not None:
x = [convert_fn(x) for x in x]
return x
return f
def join_by(delimiter, convert_fn=None):
def f(data):
if convert_fn is not None:
data = [convert_fn(x) for x in data]
return delimiter.join(data)
return f
def iter_by(iterator, *args, **params):
assert args, "no fieldnames provided"
VALID_PARAMS = ["BATCH_SIZE"]
for k in params:
assert k in VALID_PARAMS
BATCH_SIZE = params.get("BATCH_SIZE", None)
started = 0
prev_key, data = None, []
for d in iterator:
key = [getattr(d, fn) for fn in args]
if (not started or key != prev_key or
(BATCH_SIZE is not None and len(data) >= BATCH_SIZE)):
if data:
yield data
started = 1
prev_key, data = key, []
data.append(d)
if data:
yield data
def as_dict(iterator, *args, **keywds):
# args is the member variables that comprise the key.
#
# Keyword arguments:
# unique Save the values as an object (not a list).
# use_first Only save the first value for each key.
unique = keywds.get("unique", 1)
use_first = keywds.get("use_first", 0)
dict_ = {}
for d in iterator:
key = tuple([getattr(d, fn) for fn in args])
if len(args) == 1:
key = key[0]
if not unique:
dict_.setdefault(key, []).append(d)
elif not (use_first and key in dict_):
dict_[key] = d
return dict_
def safe_unlink(filename):
if not filename or not os.path.exists(filename):
return
os.unlink(filename)
def safe_mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
def list_files_in_path(
file_or_path, endswith=None, file_not_startswith=None,
case_insensitive=False, not_empty=False, toplevel_only=False):
# Return a list of the files. Returns full paths.
# not_empty means will make sure some files are found.
assert os.path.exists(file_or_path), "Not found: %s" % file_or_path
# If file_or_path is a file, then put it in a list.
if not os.path.isdir(file_or_path):
x = os.path.realpath(file_or_path)
filenames = [x]
elif toplevel_only:
x = os.listdir(file_or_path)
filenames = [os.path.join(file_or_path, x) for x in x]
else:
filenames = []
for x in os.walk(file_or_path, followlinks=True):
dirpath, dirnames, files = x
x = [os.path.join(dirpath, x) for x in files]
filenames.extend(x)
if endswith is not None:
x = filenames
if case_insensitive:
x = [x for x in x if x.lower().endswith(endswith.lower())]
else:
x = [x for x in x if x.endswith(endswith)]
filenames = x
if file_not_startswith is not None:
i = 0
while i < len(filenames):
p, f = os.path.split(filenames[i])
nsw = file_not_startswith
if case_insensitive:
f = f.lower()
nsw = file_not_startswith.lower()
if f.startswith(nsw):
del filenames[i]
else:
i += 1
if not_empty:
msg = "No files found."
if endswith:
msg = "No %s files found." % endswith
assert filenames, msg
return filenames
def get_file_or_path_size(file_or_path):
import stat
filenames = list_files_in_path(file_or_path)
sizes = [os.stat(x)[stat.ST_SIZE] for x in filenames]
return sum(sizes)
def symlink_file_or_path_to_path(
in_file_or_path, out_path, overwrite_outpath=True):
# in_file_or_path can be a file or a path.
# <file> -> <out_path>/<file>
# <path>/<file> -> <out_path>/<file> Symlink the files under <path>.
#
# If overwrite_outpath is True, then will remove out_path if it
# already exists. Otherwise, will merge with the existing contents.
import shutil
assert os.path.exists(in_file_or_path)
# If in_file_or_path is a symlink, then symlink to the original
# file.
in_file_or_path = os.path.realpath(in_file_or_path)
if overwrite_outpath and os.path.exists(out_path):
if os.path.isdir(out_path):
shutil.rmtree(out_path)
else:
os.unlink(out_path)
if not os.path.exists(out_path):
os.mkdir(out_path)
if os.path.isfile(in_file_or_path): # follows symbolic links
p, f = os.path.split(in_file_or_path)
out_filename = os.path.join(out_path, f)
if not os.path.exists(out_filename):
os.symlink(in_file_or_path, out_filename)
elif os.path.isdir(in_file_or_path):
files = os.listdir(in_file_or_path)
for x in files:
in_filename = os.path.join(in_file_or_path, x)
out_filename = os.path.join(out_path, x)
if not os.path.exists(out_filename):
os.symlink(in_filename, out_filename)
else:
raise AssertionError, "not file or path"
def copy_file_or_path_to_path(in_file_or_path, out_path):
# in_file_or_path can be a file.
# <file> -> <out_path>/<file>
# <path>/<file> -> <out_path>/<file>
import shutil
assert os.path.exists(in_file_or_path)
if os.path.exists(out_path):
if os.path.dir(out_path):
shutil.rmtree(out_path)
else:
os.unlink(out_path)
if os.path.isfile(in_file_or_path): # follows symbolic links
os.mkdir(out_path)
shutil.copy2(in_file_or_path, out_path)
elif os.path.isdir(in_file_or_path):
shutil.copytree(in_file_or_path, out_path)
else:
raise AssertionError, "not file or path"
class DelFile:
def __init__(self, filename, mode):
self.filename = filename
self.handle = open(filename, mode)
def __getattr__(self, attr):
return getattr(self.handle, attr)
def __del__(self):
if not self.handle.closed:
self.handle.close()
safe_unlink(self.filename)
def make_temp_handle(suffix="", prefix="", dir=None, realfile=True):
from StringIO import StringIO
import tempfile
if realfile:
x, filename = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
handle = DelFile(filename, "r+w")
else:
handle = StringIO()
return handle
| mit |
geomf/omf-fork | omf/calibrate.py | 1 | 6561 | # Portions Copyright (C) 2015 Intel Corporation
import csv
import datetime as dt
import json
import tempfile
from matplotlib import pyplot as plt
from os.path import join as pJoin
import logging
# OMF imports
import omf.feeder
from omf.solvers import gridlabd
from omf.common.plot import Plot
logger = logging.getLogger(__name__)
def omfCalibrate(workDir, feederPath, scadaPath):
'''calibrates a feeder and saves the calibrated tree at a location'''
logger.info('Calibrating feeder... work dir: %s; feeder path: %s; scada path: %s', workDir, feederPath, scadaPath)
with open(feederPath, "r") as jsonIn:
feederJson = json.load(jsonIn)
tree = feederJson.get("tree", {})
scadaSubPower, firstDateTime = _processScadaData(workDir, scadaPath)
# Force FBS powerflow, because NR fails a lot.
for key in tree:
if tree[key].get("module", "").lower() == "powerflow":
tree[key] = {"module": "powerflow", "solver_method": "FBS"}
# Attach player.
classOb = {"class": "player", "variable_names": [
"value"], "variable_types": ["double"]}
playerOb = {"object": "player", "property": "value",
"name": "scadaLoads", "file": "subScada.player", "loop": "0"}
maxKey = omf.feeder.getMaxKey(tree)
tree[maxKey + 1] = classOb
tree[maxKey + 2] = playerOb
# Make loads reference player.
loadTemplate = {"object": "triplex_load",
"power_pf_12": "0.95",
"impedance_pf_12": "0.98",
"power_pf_12": "0.90",
"impedance_fraction_12": "0.7",
"power_fraction_12": "0.3"}
for key in tree:
ob = tree[key]
if ob.get("object", "") == "triplex_node" and ob.get("power_12", "") != "":
newOb = dict(loadTemplate)
newOb["name"] = ob.get("name", "")
newOb["parent"] = ob.get("parent", "")
newOb["phases"] = ob.get("phases", "")
newOb["nominal_voltage"] = ob.get("nominal_voltage", "")
newOb["latitude"] = ob.get("latitude", "0")
newOb["longitude"] = ob.get("longitude", "0")
oldPow = ob.get("power_12", "").replace("j", "d")
pythagPower = gridlabd._strClean(oldPow)
newOb["base_power_12"] = "scadaLoads.value*" + str(pythagPower)
tree[key] = newOb
# Search for the substation regulator and attach a recorder there.
for key in tree:
if tree[key].get('bustype', '').lower() == 'swing':
swingName = tree[key].get('name')
for key in tree:
if tree[key].get('object', '') in ['regulator', 'overhead_line', 'underground_line', 'transformer', 'fuse'] and tree[key].get('from', '') == swingName:
SUB_REG_NAME = tree[key]['name']
recOb = {"object": "recorder",
"parent": SUB_REG_NAME,
"property": "power_in.real,power_in.imag",
"file": "caliSub.csv",
"interval": "900"}
tree[maxKey + 3] = recOb
HOURS = 100
omf.feeder.adjustTime(tree, HOURS, "hours", firstDateTime.strftime("%Y-%m-%d"))
# Run Gridlabd.
output = gridlabd.runInFilesystem(tree, keepFiles=True, workDir=workDir)
# Calculate scaling constant.
outRealPow = output["caliSub.csv"]["power_in.real"]
outImagPower = output["caliSub.csv"]["power_in.imag"]
outAppPowerKw = [
(x[0]**2 + x[1]**2)**0.5 / 1000 for x in zip(outRealPow, outImagPower)]
# HACK: ignore first time step in output and input because GLD sometimes
# breaks the first step.
SCAL_CONST = sum(scadaSubPower[1:HOURS]) / sum(outAppPowerKw[1:HOURS])
# Rewrite the subScada.player file so all the power values are multiplied
# by the SCAL_CONSTANT.
newPlayData = []
with open(pJoin(workDir, "subScada.player"), "r") as playerFile:
for line in playerFile:
(key, val) = line.split(',')
newPlayData.append(
str(key) + ',' + str(float(val) * SCAL_CONST) + "\n")
with open(pJoin(workDir, "subScadaCalibrated.player"), "w") as playerFile:
for row in newPlayData:
playerFile.write(row)
# Test by running a glm with subScadaCalibrated.player and caliSub.csv2.
tree[maxKey + 2]["file"] = "subScadaCalibrated.player"
tree[maxKey + 3]["file"] = "caliSubCheck.csv"
secondOutput = gridlabd.runInFilesystem(
tree, keepFiles=True, workDir=workDir)
plt.figure()
plt.plot(outAppPowerKw[1:HOURS], label="initialGuess")
plt.plot(scadaSubPower[1:HOURS], label="scadaSubPower")
secondAppKw = [(x[0]**2 + x[1]**2)**0.5 / 1000
for x in zip(secondOutput["caliSubCheck.csv"]["power_in.real"], secondOutput["caliSubCheck.csv"]["power_in.imag"])]
plt.plot(secondAppKw[1:HOURS], label="finalGuess")
plt.legend(loc=3)
Plot.save_fig(plt, pJoin(workDir, "caliCheckPlot.png"))
# Write the final output.
with open(pJoin(workDir, "calibratedFeeder.json"), "w") as outJson:
playerString = open(pJoin(workDir, "subScadaCalibrated.player")).read()
feederJson["attachments"]["subScadaCalibrated.player"] = playerString
feederJson["tree"] = tree
json.dump(feederJson, outJson, indent=4)
return
def _processScadaData(workDir, scadaPath):
'''generate a SCADA player file from raw SCADA data'''
with open(scadaPath, "r") as scadaFile:
scadaReader = csv.DictReader(scadaFile, delimiter='\t')
allData = [row for row in scadaReader]
scadaSubPower = [float(row["power"]) for row in allData]
firstDateTime = dt.datetime.strptime(
allData[1]["timestamp"], "%m/%d/%Y %H:%M:%S")
# Write the player.
maxPower = max(scadaSubPower)
with open(pJoin(workDir, "subScada.player"), "w") as playFile:
for row in allData:
timestamp = dt.datetime.strptime(
row["timestamp"], "%m/%d/%Y %H:%M:%S")
power = float(row["power"]) / maxPower
line = timestamp.strftime(
"%Y-%m-%d %H:%M:%S") + " PST," + str(power) + "\n"
playFile.write(line)
return scadaSubPower, firstDateTime
def _tests():
print "Beginning to test calibrate.py"
workDir = tempfile.mkdtemp()
print "Currently working in: ", workDir
scadaPath = pJoin("uploads", "FrankScada.tsv")
feederPath = pJoin("data", "Feeder", "public", "ABEC Frank LO.json")
assert None == omfCalibrate(
workDir, feederPath, scadaPath), "feeder calibration failed"
if __name__ == '__main__':
_tests()
| gpl-2.0 |
daemonmaker/pylearn2 | pylearn2/train_extensions/tests/test_roc_auc.py | 32 | 5780 | """
Tests for ROC AUC.
"""
from pylearn2.config import yaml_parse
from pylearn2.testing.skip import skip_if_no_sklearn
def test_roc_auc():
"""Test RocAucChannel."""
skip_if_no_sklearn()
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
def test_roc_auc_one_vs_rest():
"""Test one vs. rest RocAucChannel."""
skip_if_no_sklearn()
trainer = yaml_parse.load(test_yaml_ovr)
trainer.main_loop()
def test_roc_auc_one_vs_one():
"""Test one vs. rest RocAucChannel."""
skip_if_no_sklearn()
trainer = yaml_parse.load(test_yaml_ovo)
trainer.main_loop()
test_yaml = """
!obj:pylearn2.train.Train {
dataset:
&train !obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 10,
dim: 10,
num_classes: 2,
},
model: !obj:pylearn2.models.mlp.MLP {
nvis: 10,
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 10,
irange: 0.05,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 2,
irange: 0.,
}
],
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
monitoring_dataset: {
'train': *train,
},
batches_per_iter: 1,
monitoring_batches: 1,
termination_criterion: !obj:pylearn2.termination_criteria.And {
criteria: [
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
!obj:pylearn2.termination_criteria.MonitorBased {
channel_name: train_roc_auc,
prop_decrease: 0.,
N: 1,
},
],
},
},
extensions: [
!obj:pylearn2.train_extensions.roc_auc.RocAucChannel {},
],
}
"""
test_yaml_ovr = """
!obj:pylearn2.train.Train {
dataset:
&train !obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 10,
dim: 10,
num_classes: 3,
},
model: !obj:pylearn2.models.mlp.MLP {
nvis: 10,
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 10,
irange: 0.05,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 3,
irange: 0.,
}
],
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
monitoring_dataset: {
'train': *train,
},
batches_per_iter: 1,
monitoring_batches: 1,
termination_criterion: !obj:pylearn2.termination_criteria.And {
criteria: [
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
!obj:pylearn2.termination_criteria.MonitorBased {
channel_name: train_roc_auc-0vX,
prop_decrease: 0.,
N: 1,
},
],
},
},
extensions: [
!obj:pylearn2.train_extensions.roc_auc.RocAucChannel {
channel_name_suffix: roc_auc-0vX,
positive_class_index: 0,
},
!obj:pylearn2.train_extensions.roc_auc.RocAucChannel {
channel_name_suffix: roc_auc-1vX,
positive_class_index: 1,
},
!obj:pylearn2.train_extensions.roc_auc.RocAucChannel {
channel_name_suffix: roc_auc-2vX,
positive_class_index: 2,
},
],
}
"""
test_yaml_ovo = """
!obj:pylearn2.train.Train {
dataset:
&train !obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 10,
dim: 10,
num_classes: 3,
},
model: !obj:pylearn2.models.mlp.MLP {
nvis: 10,
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 10,
irange: 0.05,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: y,
n_classes: 3,
irange: 0.,
}
],
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
monitoring_dataset: {
'train': *train,
},
batches_per_iter: 1,
monitoring_batches: 1,
termination_criterion: !obj:pylearn2.termination_criteria.And {
criteria: [
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
!obj:pylearn2.termination_criteria.MonitorBased {
channel_name: train_roc_auc-0v1,
prop_decrease: 0.,
N: 1,
},
],
},
},
extensions: [
!obj:pylearn2.train_extensions.roc_auc.RocAucChannel {
channel_name_suffix: roc_auc-0v1,
positive_class_index: 0,
negative_class_index: 1,
},
!obj:pylearn2.train_extensions.roc_auc.RocAucChannel {
channel_name_suffix: roc_auc-0v2,
positive_class_index: 0,
negative_class_index: 2,
},
!obj:pylearn2.train_extensions.roc_auc.RocAucChannel {
channel_name_suffix: roc_auc-1v2,
positive_class_index: 1,
negative_class_index: 2,
},
],
}
"""
| bsd-3-clause |
dan-silver/machine-learning-visualizer | visualize_d_tree_generator/convert_tree_to_json.py | 1 | 1822 | from __future__ import division
import sklearn.tree
from sklearn import tree
import json
from math import log10, floor
def round_sig(x, sig=4):
return round(x, sig-int(floor(log10(x)))-1)
class ConvertTreeToJSON(object):
def __init__(self, tree, labels):
self.dt = tree
self.labels = labels
self.t = tree.tree_
def print_tree(self, side, root=0):
left_child = self.t.children_left[root]
right_child = self.t.children_right[root]
dataPercentage = round_sig(self.t.n_node_samples[root] / self.dt.tree_.n_node_samples[0] * 100) # this nodes count / the root nodes count
if left_child == sklearn.tree._tree.TREE_LEAF:
return {
'name': 'leaf',
'side': side,
'count': self.t.n_node_samples[root],
'dataPercentage': dataPercentage
}
else:
left_child = self.print_tree('left', root=left_child)
right_child = self.print_tree('right', root=right_child)
return {
'side': side,
'feature': self.labels[self.t.feature[root]],
'featureIdx': self.t.feature[root],
'threshold': self.t.threshold[root],
'count': self.t.n_node_samples[root],
'impurity': round_sig(self.t.impurity[root]),
'children': [left_child, right_child],
'dataPercentage': dataPercentage
}
def convert(self):
nodes = self.print_tree('top')
# hack to generate png from python
# import os
# tree.export_graphviz(dt, out_file='tree.dot')
# os.system("dot -Tpng tree.dot -o tree.png")
return json.dumps(nodes) # Add indent=4, separators=(',', ': ') for human readable version | mit |
rahuldhote/scikit-learn | sklearn/tree/tests/test_tree.py | 57 | 47417 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._tree import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, X)
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
| bsd-3-clause |
smharper/openmc | tests/regression_tests/mgxs_library_nuclides/test.py | 4 | 1925 | import hashlib
import openmc
import openmc.mgxs
from openmc.examples import pwr_pin_cell
from tests.testing_harness import PyAPITestHarness
class MGXSTestHarness(PyAPITestHarness):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Initialize a two-group structure
energy_groups = openmc.mgxs.EnergyGroups(group_edges=[0, 0.625, 20.e6])
# Initialize MGXS Library for a few cross section types
self.mgxs_lib = openmc.mgxs.Library(self._model.geometry)
self.mgxs_lib.by_nuclide = True
# Test all MGXS types
self.mgxs_lib.mgxs_types = openmc.mgxs.MGXS_TYPES
self.mgxs_lib.energy_groups = energy_groups
self.mgxs_lib.legendre_order = 3
self.mgxs_lib.domain_type = 'material'
self.mgxs_lib.build_library()
# Add tallies
self.mgxs_lib.add_to_tallies_file(self._model.tallies, merge=False)
def _get_results(self, hash_output=True):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
sp = openmc.StatePoint(self._sp_name)
# Load the MGXS library from the statepoint
self.mgxs_lib.load_from_statepoint(sp)
# Build a string from Pandas Dataframe for each MGXS
outstr = ''
for domain in self.mgxs_lib.domains:
for mgxs_type in self.mgxs_lib.mgxs_types:
mgxs = self.mgxs_lib.get_mgxs(domain, mgxs_type)
df = mgxs.get_pandas_dataframe()
outstr += df.to_string() + '\n'
# Hash the results if necessary
if hash_output:
sha512 = hashlib.sha512()
sha512.update(outstr.encode('utf-8'))
outstr = sha512.hexdigest()
return outstr
def test_mgxs_library_nuclides():
model = pwr_pin_cell()
harness = MGXSTestHarness('statepoint.10.h5', model)
harness.main()
| mit |
JeffreyFish/DocWebTool | Rename.py | 2 | 5793 | #!/usr/bin/env python
# -*- Coding: UTF-8 -*-
#------------------------------------
#--Author: Jeffrey Yu
#--CreationDate: 2017/10/24 14:00
#--RevisedDate:
#------------------------------------
import os
import pandas as pd
import datetime
import common
def rename(x, type='umbrella'):
if type == 'umbrella':
dicts = Umbrella_dict
elif type == 'fund1':
dicts = Fund_dict_Abbr_1
elif type == 'fund2':
dicts = Fund_dict_Abbr_2
elif type == 'specialword':
dicts = Special_words
elif type == 'others':
dicts = Others
for each_word in dicts.keys():
if each_word in x:
x = x.replace(each_word, dicts[each_word])
if (type == 'fund1' or type == 'fund2') and len(x) <= 40:
return x
return x
def replace_word_1(x):
if x in Fund_dict_Abbr_1.keys():
x = Fund_dict_Abbr_1[x]
return x
def reverse_list(x):
i = len(x)
new_x = []
while i > 0:
i = i - 1
new_x.append(x[i])
return new_x
def remove_blank(x, blank_type=0):
if ',' in x:
x = x.replace(',',', ')
if '.' in x:
x = x.replace('.','. ')
x = ' '.join(filter(lambda x: x, x.split(' ')))
if blank_type != 0:
x += ' '
return x
def capitalize_word(x):
x = ' '.join(word[0].upper() + word[1:] for word in x.split())
return x
def remove_head(x):
if x[0] == '-':
x = x[1:]
if x[1] == '-':
x = x[2:]
if x[2] == '-':
x = x[3:]
if x[0] == ' ':
x = x[1:]
return x
Umbrella_dict = {}
with open(common.cur_file_dir() + '\\static\\Rename_Tool_dicts\\Umbrella_dict.txt', 'r', encoding='UTF-8') as r_d:
for line in r_d.readlines():
list1 = line.split('#')
a = str(list1[0].strip('\n'))
b = str(list1[1].strip('\n'))
Umbrella_dict[a] = b
Fund_dict_Abbr_1 = {}
with open(common.cur_file_dir() + '\\static\\Rename_Tool_dicts\\Fund_dict_Abbr_1.txt', 'r', encoding='UTF-8') as r_d:
if r_d.read() == '':
Fund_dict_Abbr_1 = {"":""}
else:
with open(common.cur_file_dir() + '\\static\\Rename_Tool_dicts\\Fund_dict_Abbr_1.txt', 'r', encoding='UTF-8') as r_d:
for line in r_d.readlines():
list1 = line.split('#')
a = str(list1[0].strip('\n'))
b = str(list1[1].strip('\n'))
Fund_dict_Abbr_1[a] = b
Fund_dict_Abbr_2 = {}
with open(common.cur_file_dir() + '\\static\\Rename_Tool_dicts\\Fund_dict_Abbr_2.txt', 'r', encoding='UTF-8') as r_d:
if r_d.read() == '':
Fund_dict_Abbr_2 = {"":""}
else:
with open(common.cur_file_dir() + '\\static\\Rename_Tool_dicts\\Fund_dict_Abbr_2.txt', 'r', encoding='UTF-8') as r_d:
for line in r_d.readlines():
list1 = line.split('#')
a = str(list1[0].strip('\n'))
b = str(list1[1].strip('\n'))
Fund_dict_Abbr_2[a] = b
Special_words = {}
with open(common.cur_file_dir() + '\\static\\Rename_Tool_dicts\\Special_words.txt', 'r', encoding='UTF-8') as r_d:
for line in r_d.readlines():
list1 = line.split('#')
a = str(list1[0].strip('\n'))
b = str(list1[1].strip('\n'))
Special_words[a] = b
Others = {}
with open(common.cur_file_dir() + '\\static\\Rename_Tool_dicts\\Others.txt', 'r', encoding='UTF-8') as r_d:
for line in r_d.readlines():
list1 = line.split('#')
a = str(list1[0].strip('\n'))
b = str(list1[1].strip('\n'))
Others[a] = b
def run(x):
fundlist = []
xx = x.split('\n')
for fundname in xx:
fundname = str(fundname.strip('\n'))
fundname = str(fundname.strip('\r'))
fundlist.append(fundname)
result_file = 'Rename_Result-' + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + '.csv'
result_file_path = common.temp_path
all_fund = []
for fund in fundlist:
fund_detail = []
old_name = fund
# fund = capitalize_word(remove_blank(fund, blank_type=0))
if ' - ' in fund:
symbol_location = fund.find(' - ') + 1
before_fund = remove_blank(fund[0:symbol_location], blank_type=0)
after_fund = remove_blank(capitalize_word(fund[symbol_location + 1:]), blank_type=0)
before_fund = rename(before_fund, type='specialword')
before_fund = rename(before_fund, type='umbrella')
after_fund = rename(after_fund, type='specialword')
after_fund = rename(after_fund, type='fund1')
after_word_list = after_fund.split(' ')
new_after_fund_list = []
for each_word in reverse_list(after_word_list):
each_word = replace_word_1(each_word)
new_after_fund_list.append(each_word)
after_fund = ' '.join(reverse_list(new_after_fund_list))
after_fund = rename(after_fund, type='others')
after_fund = rename(after_fund, type='fund1')
fund = before_fund + ' - ' + after_fund
fund = remove_blank(fund, blank_type=0)
if len(fund) > 40:
fund = rename(fund, type='fund1')
fund = remove_blank(fund, blank_type=0)
if len(fund) > 40:
fund = rename(fund, type='fund2')
fund = fund.replace(' - ',' ')
else:
fund = rename(fund, type='specialword')
fund = rename(fund, type='others')
fund = rename(fund, type='fund1')
fund_list = fund.split(' ')
new_fund_list = []
for each_word in reverse_list(fund_list):
each_word = replace_word_1(each_word)
new_fund_list.append(each_word)
fund = ' '.join(reverse_list(new_fund_list))
fund = rename(fund, type='fund1')
fund = remove_blank(fund,blank_type=0)
if len(fund) > 40:
fund = rename(fund, type='fund2')
fund = remove_blank(remove_head(fund), blank_type=0)
new_name = fund
name_num = len(new_name)
fund_detail = [old_name, new_name, name_num]
all_fund.append(fund_detail)
df = pd.DataFrame(all_fund, columns=['Old Name', 'New Name', 'New Name Length'])
try:
if os.path.isfile(result_file_path + result_file):
os.remove(result_file_path + result_file)
df.to_csv(result_file_path + result_file, encoding='GB18030')
except:
if os.path.isfile(result_file_path + result_file):
os.remove(result_file_path + result_file)
df.to_csv(result_file_path + result_file, encoding='UTF-8')
return result_file
| gpl-3.0 |
rajat1994/scikit-learn | examples/svm/plot_rbf_parameters.py | 132 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
aerospaceresearch/orbitdeterminator | orbitdeterminator/main.py | 1 | 20077 | '''
Runs the whole process in one file for a .csv positional data file (time, x, y, z)
and generates the final set of keplerian elements along with a plot and a filtered.csv data file
'''
from util import (read_data, kep_state, rkf78, golay_window)
from filters import (sav_golay, triple_moving_average, wiener)
from kep_determination import (lamberts_kalman, interpolation, ellipse_fit, gibbs_method, gauss_method)
from optimization import (with_mcmc)
import argparse
import numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
from propagation import sgp4
import inquirer
from vpython import *
import animate_orbit
import kep_determination.orbital_elements as oe
import random
def get_timestamp_index_by_orbitperiod(semimajor_axis, timestamps):
T_orbitperiod = oe.T_orbitperiod(semimajor_axis=semimajor_axis)
runtime = np.subtract(timestamps, np.min(timestamps))
index = np.argmax(runtime >= T_orbitperiod // 2) - 1 # only half orbit is good for Gibbs method
if index < 2:
# in case there are not enough points to have the result at index point at 2
# or the argmax search does not find anything and sets index = 0.
index = len(timestamps) - 1
return index
def process(data_file, error_apriori, units):
'''
Given a .csv data file in the format of (time, x, y, z) applies both filters, generates a filtered.csv data
file, prints out the final keplerian elements computed from both Lamberts and Interpolation and finally plots
the initial, filtered data set and the final orbit.
Args:
data_file (string): The name of the .csv file containing the positional data
error_apriori (float): apriori estimation of the measurements error in km
Returns:
Runs the whole process of the program
'''
# First read the csv file called "orbit" with the positional data
print("Imported file format is:", read_data.detect_file_format(data_file)["file"])
print("")
data = read_data.load_data(data_file)
if(units == 'm'):
# Transform m to km
data[:, 1:4] = data[:, 1:4] / 1000
print("***********Choose filter(s) in desired order of application***********")
print("(SPACE to toggle, UP/DOWN to navigate, RIGHT/LEFT to select/deselect and ENTER to submit)")
print("*if nothing is selected, Triple Moving Average followed by Savitzky Golay will be applied")
questions = [
inquirer.Checkbox('filter',
message="Select filter(s)",
choices=['None', 'Savitzky Golay Filter', 'Triple Moving Average Filter','Wiener Filter'],
),
]
choices = inquirer.prompt(questions)
data_after_filter = data
if(len(choices['filter']) == 0):
print("Applying Triple Moving Average followed by Savitzky Golay...")
# Apply the Triple moving average filter with window = 3
data_after_filter = triple_moving_average.generate_filtered_data(data_after_filter, 3)
# Use the golay_window.py script to find the window for the Savitzky Golay filter based on the error you input
window = golay_window.window(error_apriori, data_after_filter)
polyorder = 3
if polyorder < window:
# Apply the Savitzky Golay filter with window = window (51 for example_data/orbit.csv) and polynomial order = 3
data_after_filter = sav_golay.golay(data_after_filter, window, polyorder)
else:
for index, choice in enumerate(choices['filter']):
if(choice == 'None'):
print("Using the original data...")
# no filter is applied
data_after_filter = data_after_filter
elif (choice == 'Savitzky Golay Filter'):
print("Applying Savitzky Golay Filter...")
# Use the golay_window.py script to find the window for the Savitzky Golay filter
# based on the error you input
window = golay_window.window(error_apriori, data_after_filter)
polyorder = 3
if polyorder < window:
# Apply the Savitzky Golay filter with window = window (51 for example_data/orbit.csv) and polynomial order = 3
data_after_filter = sav_golay.golay(data_after_filter, window, polyorder)
elif(choice == 'Wiener Filter'):
print("Applying Wiener Filter...")
# Apply the Wiener filter
data_after_filter = wiener.wiener_new(data_after_filter, 3)
else:
print("Applying Triple Moving Average Filter...")
# Apply the Triple moving average filter with window = 3
data_after_filter = triple_moving_average.generate_filtered_data(data_after_filter, 3)
# Compute the residuals between filtered data and initial data and then the sum and mean values of each axis
res = data_after_filter[:, 1:4] - data[:, 1:4]
sums = np.sum(res, axis = 0)
print("\nDisplaying the sum of the residuals for each axis")
print(sums, "\n")
means = np.mean(res, axis = 0)
print("Displaying the mean of the residuals for each axis")
print(means, "\n")
# Save the filtered data into a new csv called "filtered"
np.savetxt("filtered.csv", data_after_filter, delimiter = ",")
print("***********Choose Method(s) for Orbit Determination***********")
print("(SPACE to toggle, UP/DOWN to navigate, RIGHT/LEFT to select/deselect and ENTER to submit)")
print("*if nothing is selected, Cubic Spline Interpolation will be used for Orbit Determination")
questions = [
inquirer.Checkbox('method',
message="Select Method(s)",
choices=['Lamberts Kalman',
'Cubic Spline Interpolation',
'Ellipse Best Fit',
'Gibbs 3 Vector',
'Gauss 3 Vector',
'MCMC (exp.)'],
),
]
choices = inquirer.prompt(questions)
kep_elements = {}
if(len(choices['method']) == 0):
# Apply the interpolation method
kep_inter = interpolation.main(data_after_filter)
# Apply Kalman filters to find the best approximation of the keplerian elements for all solutions
# We set an estimate of measurement variance R = 0.01 ** 2
kep_final_inter = lamberts_kalman.kalman(kep_inter, 0.01 ** 2)
kep_final_inter = np.transpose(kep_final_inter)
kep_final_inter = np.resize(kep_final_inter, ((7, 1)))
kep_final_inter[6, 0] = sgp4.rev_per_day(kep_final_inter[0, 0])
kep_elements['Cubic Spline Interpolation'] = kep_final_inter
else:
for index, choice in enumerate(choices['method']):
if(choice == 'Lamberts Kalman'):
# Apply Lambert Kalman method for the filtered data set
#previously, all data...
#kep_lamb = lamberts_kalman.create_kep(data_after_filter)
# only three (3) observations from half an orbit.
# also just two (2) observations are fine for lamberts.
data = np.array([data_after_filter[:, :][0],
data_after_filter[:, :][len(data_after_filter) // 2],
data_after_filter[:, :][-1]])
kep_lamb = lamberts_kalman.create_kep(data)
# Determination of orbit period
semimajor_axis = kep_lamb[0][0]
timestamps = data_after_filter[:, 0]
index = get_timestamp_index_by_orbitperiod(semimajor_axis, timestamps)
# enough data for half orbit
data = np.array([data_after_filter[:, :][0],
data_after_filter[:, :][index // 2],
data_after_filter[:, :][index]])
kep_lamb = lamberts_kalman.create_kep(data)
# Apply Kalman filters to find the best approximation of the keplerian elements for all solutions
# We set an estimate of measurement variance R = 0.01 ** 2
kep_final_lamb = lamberts_kalman.kalman(kep_lamb, 0.01 ** 2)
kep_final_lamb = np.transpose(kep_final_lamb)
kep_final_lamb = np.resize(kep_final_lamb, ((7, 1)))
kep_final_lamb[6, 0] = sgp4.rev_per_day(kep_final_lamb[0, 0])
kep_elements['Lamberts Kalman'] = kep_final_lamb
elif(choice == 'Cubic Spline Interpolation'):
# Apply the interpolation method
kep_inter = interpolation.main(data_after_filter)
# Apply Kalman filters to find the best approximation of the keplerian elements for all solutions
# We set an estimate of measurement variance R = 0.01 ** 2
kep_final_inter = lamberts_kalman.kalman(kep_inter, 0.01 ** 2)
kep_final_inter = np.transpose(kep_final_inter)
kep_final_inter = np.resize(kep_final_inter, ((7, 1)))
kep_final_inter[6, 0] = sgp4.rev_per_day(kep_final_inter[0, 0])
kep_elements['Cubic Spline Interpolation'] = kep_final_inter
elif(choice == 'Ellipse Best Fit'):
# Apply the ellipse best fit method
kep_ellip = ellipse_fit.determine_kep(data_after_filter[:, 1:])[0]
kep_final_ellip = np.transpose(kep_ellip)
kep_final_ellip = np.resize(kep_final_ellip, ((7, 1)))
kep_final_ellip[6, 0] = sgp4.rev_per_day(kep_final_ellip[0, 0])
kep_elements['Ellipse Best Fit'] = kep_final_ellip
elif (choice == 'Gibbs 3 Vector'):
# Apply the Gibbs method
# first only with first, middle and last measurement
R = np.array([data_after_filter[:, 1:][0],
data_after_filter[:, 1:][len(data_after_filter) // 2],
data_after_filter[:, 1:][-1]])
kep_gibbs = gibbs_method.gibbs_get_kep(R)
# Determination of orbit period
semimajor_axis = kep_gibbs[0][0]
timestamps = data_after_filter[:, 0]
index = get_timestamp_index_by_orbitperiod(semimajor_axis, timestamps)
# enough data for half orbit
R = np.array([data_after_filter[:, 1:][0],
data_after_filter[:, 1:][index // 2],
data_after_filter[:, 1:][index]])
kep_gibbs = gibbs_method.gibbs_get_kep(R)
# Apply Kalman filters to find the best approximation of the keplerian elements for all solutions
# We set an estimate of measurement variance R = 0.01 ** 2
kep_final_gibbs = lamberts_kalman.kalman(kep_gibbs, 0.01 ** 2)
kep_final_gibbs = np.transpose(kep_final_gibbs)
kep_final_gibbs = np.resize(kep_final_gibbs, ((7, 1)))
kep_final_gibbs[6, 0] = sgp4.rev_per_day(kep_final_gibbs[0, 0])
kep_elements['Gibbs 3 Vector'] = kep_final_gibbs
elif (choice == 'Gauss 3 Vector'):
# Apply the Gauss method
# first only with first, middle and last measurement
R = np.array([data_after_filter[:, 1:][0],
data_after_filter[:, 1:][len(data_after_filter) // 2],
data_after_filter[:, 1:][-1]])
t1 = data_after_filter[:, 0][0]
t2 = data_after_filter[:, 0][len(data_after_filter) // 2]
t3 = data_after_filter[:, 0][-1]
v2 = gauss_method.gauss_method_get_velocity(R[0], R[1], R[2], t1, t2, t3)
# Determination of orbit period
semimajor_axis = oe.semimajor_axis(R[0], v2)
timestamps = data_after_filter[:, 0]
index = get_timestamp_index_by_orbitperiod(semimajor_axis, timestamps)
# enough data for half orbit
R = np.array([data_after_filter[:, 1:][0],
data_after_filter[:, 1:][index // 2],
data_after_filter[:, 1:][index]])
t1 = data_after_filter[:, 0][0]
t2 = data_after_filter[:, 0][index // 2]
t3 = data_after_filter[:, 0][index]
v2 = gauss_method.gauss_method_get_velocity(R[0], R[1], R[2], t1, t2, t3)
semimajor_axis = oe.semimajor_axis(R[0], v2)
ecc = oe.eccentricity_v(R[1], v2)
ecc = np.linalg.norm(ecc)
inc = oe.inclination(R[1], v2) * 180.0 / np.pi
AoP = oe.AoP(R[1], v2) * 180.0 / np.pi
raan = oe.raan(R[1], v2) * 180.0 / np.pi
true_anomaly = oe.true_anomaly(R[1], v2) * 180.0 / np.pi
T_orbitperiod = oe.T_orbitperiod(semimajor_axis=semimajor_axis)
n_mean_motion_perday = oe.n_mean_motion_perday(T_orbitperiod)
kep_gauss = np.array([[semimajor_axis, ecc, inc, AoP, raan, true_anomaly, n_mean_motion_perday]])
# Apply Kalman filters to find the best approximation of the keplerian elements for all solutions
# We set an estimate of measurement variance R = 0.01 ** 2
kep_final_gauss = lamberts_kalman.kalman(kep_gauss, 0.01 ** 2)
kep_final_gauss = np.transpose(kep_final_gauss)
kep_final_gauss = np.resize(kep_final_gauss, ((7, 1)))
kep_final_gauss[6, 0] = sgp4.rev_per_day(kep_final_gauss[0, 0])
kep_elements['Gauss 3 Vector'] = kep_final_gauss
else:
# apply mcmc method, a real optimizer
# all data
timestamps = data_after_filter[:, 0]
R = np.array(data_after_filter[:, 1:])
# all data can make the MCMC very slow. so we just pick a few in random, but in order.
timestamps_short = []
R_short = []
if len(timestamps) > 25:
print("Too many positions for MCMC. Just 25 positons are selected")
# pick randomly, but in order and no duplicates
l = list(np.linspace(0, len(timestamps) - 1, num=len(timestamps)))
select_index = sorted(random.sample(list(l)[1:-1], k=23))
print(select_index)
timestamps_short.append(timestamps[0])
R_short.append(R[0])
for select in range(len(select_index)):
timestamps_short.append(timestamps[int(select_index[select])])
R_short.append(R[int(select_index[select])])
timestamps_short.append(timestamps[-1])
R_short.append(R[-1])
else:
timestamps_short = timestamps
R_short = R
parameters = with_mcmc.fromposition(timestamps_short, R_short)
r_a = parameters["r_a"]
r_p = parameters["r_p"]
AoP = parameters["AoP"]
inc = parameters["inc"]
raan = parameters["raan"]
tp = parameters["tp"]
semimajor_axis = (r_p + r_a) / 2.0
ecc = (r_a - r_p) / (r_a + r_p)
T_orbitperiod = oe.T_orbitperiod(semimajor_axis=semimajor_axis)
true_anomaly = tp / T_orbitperiod * 360.0
n_mean_motion_perday = oe.n_mean_motion_perday(T_orbitperiod)
kep_mcmc = np.array([[semimajor_axis, ecc, inc, AoP, raan, true_anomaly, n_mean_motion_perday]])
kep_elements['MCMC (exp.)'] = kep_mcmc
kep_final = np.zeros((7, len(kep_elements)))
order = []
for index, key in enumerate(kep_elements):
kep_final[:, index] = np.ravel(kep_elements[key])
order.append(str(key))
# Print the final orbital elements for all solutions
kep_elements = ["Semi major axis (a)(km)", "Eccentricity (e)", "Inclination (i)(deg)",
"Argument of perigee (ω)(deg)", "Right acension of ascending node (Ω)(deg)",
"True anomaly (v)(deg)", "Frequency (f)(rev/day)"]
for i in range(0, len(order)):
print("\n******************Output for %s Method******************\n" % order[i])
for j in range(0, 7):
print("%s: %.16f" % (kep_elements[j], kep_final[j, i]))
print("\nShow plots? [y/n]")
user_input = input()
if(user_input == "y" or user_input == "Y"):
for j in range(0, len(order)):
# Plot the initial data set, the filtered data set and the final orbit
# First we transform the set of keplerian elements into a state vector
state = kep_state.kep_state(np.resize(kep_final[:, j], (7, 1)))
# Then we produce more state vectors at varius times using a Runge Kutta algorithm
keep_state = np.zeros((6, 150))
ti = 0.0
tf = 1.0
t_hold = np.zeros((150, 1))
x = state
h = 0.1
tetol = 1e-04
for i in range(0, 150):
keep_state[:, i] = np.ravel(rkf78.rkf78(6, ti, tf, h, tetol, x))
t_hold[i, 0] = tf
tf = tf + 1
positions = keep_state[0:3, :]
## Finally we plot the graph
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = plt.axes(projection = '3d')
ax.plot(data[:, 1], data[:, 2], data[:, 3], ".", label = 'Initial data ')
ax.plot(data_after_filter[:, 1], data_after_filter[:, 2], data_after_filter[:, 3], "k", linestyle = '-',
label = 'Filtered data')
ax.plot(positions[0, :], positions[1, :], positions[2, :], "r-", label = 'Orbit after %s method' % order[j])
ax.legend()
ax.can_zoom()
ax.set_xlabel('x (km)')
ax.set_ylabel('y (km)')
ax.set_zlabel('z (km)')
plt.show()
def read_args():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file_path', type = str, help = "path to .csv data file", default = 'example_data/orbit.csv')
parser.add_argument('-e', '--error', type = float, help = "estimation of the measurement error", default = 10.0)
parser.add_argument('-u', '--units', type = str, help = "m for metres, k for kilometres", default = 'm')
return parser.parse_args()
if __name__ == "__main__":
print("\n************Welcome To OrbitDeterminator************\n")
print("Workflow for OrbitDeterminator is as follows:")
workflow = " ----------- ----------------------\n"\
"Positional data--->| Filters |--->| Keplerian elements |--->Determined Orbit\n"\
" | | | Determination |\n"\
" ----------- ----------------------\n\n"\
"Available filters: | Available methods for orbit determination:\n"\
" 1. None (original data) | 1. Lamberts Kalman\n"\
" 2. Savitzky Golay Filter | 2. Cubic spline interpolation\n"\
" 4. Triple Moving Average Filter| 3. Ellipse Bset Fit\n"\
" 5. Wiener Filter | 4. Gibbs 3 Vector\n"\
" | 5. Gauss 3 Vector\n"\
" | 6. MCMC (experimental)\n"
print("\n" + workflow)
args = read_args()
process(args.file_path, args.error, args.units)
animate_orbit.animate(args.file_path, 6400) | mit |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/io/test_pickle.py | 1 | 11976 | """
manage legacy pickle tests
How to add pickle tests:
1. Install pandas version intended to output the pickle.
2. Execute "generate_legacy_storage_files.py" to create the pickle.
$ python generate_legacy_storage_files.py <output_dir> pickle
3. Move the created pickle to "data/legacy_pickle/<version>" directory.
"""
import bz2
import glob
import gzip
import os
import pickle
import shutil
from warnings import catch_warnings, simplefilter
import zipfile
import pytest
from pandas.compat import _get_lzma_file, _import_lzma, is_platform_little_endian
import pandas as pd
from pandas import Index
import pandas.util.testing as tm
from pandas.tseries.offsets import Day, MonthEnd
lzma = _import_lzma()
@pytest.fixture(scope="module")
def current_pickle_data():
# our current version pickle data
from pandas.tests.io.generate_legacy_storage_files import create_pickle_data
return create_pickle_data()
# ---------------------
# comparison functions
# ---------------------
def compare_element(result, expected, typ, version=None):
if isinstance(expected, Index):
tm.assert_index_equal(expected, result)
return
if typ.startswith("sp_"):
comparator = getattr(tm, "assert_{typ}_equal".format(typ=typ))
comparator(result, expected, exact_indices=False)
elif typ == "timestamp":
if expected is pd.NaT:
assert result is pd.NaT
else:
assert result == expected
assert result.freq == expected.freq
else:
comparator = getattr(
tm, "assert_{typ}_equal".format(typ=typ), tm.assert_almost_equal
)
comparator(result, expected)
def compare(data, vf, version):
data = pd.read_pickle(vf)
m = globals()
for typ, dv in data.items():
for dt, result in dv.items():
expected = data[typ][dt]
# use a specific comparator
# if available
comparator = "compare_{typ}_{dt}".format(typ=typ, dt=dt)
comparator = m.get(comparator, m["compare_element"])
comparator(result, expected, typ, version)
return data
def compare_sp_series_ts(res, exp, typ, version):
tm.assert_sp_series_equal(res, exp)
def compare_series_ts(result, expected, typ, version):
# GH 7748
tm.assert_series_equal(result, expected)
assert result.index.freq == expected.index.freq
assert not result.index.freq.normalize
tm.assert_series_equal(result > 0, expected > 0)
# GH 9291
freq = result.index.freq
assert freq + Day(1) == Day(2)
res = freq + pd.Timedelta(hours=1)
assert isinstance(res, pd.Timedelta)
assert res == pd.Timedelta(days=1, hours=1)
res = freq + pd.Timedelta(nanoseconds=1)
assert isinstance(res, pd.Timedelta)
assert res == pd.Timedelta(days=1, nanoseconds=1)
def compare_series_dt_tz(result, expected, typ, version):
tm.assert_series_equal(result, expected)
def compare_series_cat(result, expected, typ, version):
tm.assert_series_equal(result, expected)
def compare_frame_dt_mixed_tzs(result, expected, typ, version):
tm.assert_frame_equal(result, expected)
def compare_frame_cat_onecol(result, expected, typ, version):
tm.assert_frame_equal(result, expected)
def compare_frame_cat_and_float(result, expected, typ, version):
compare_frame_cat_onecol(result, expected, typ, version)
def compare_index_period(result, expected, typ, version):
tm.assert_index_equal(result, expected)
assert isinstance(result.freq, MonthEnd)
assert result.freq == MonthEnd()
assert result.freqstr == "M"
tm.assert_index_equal(result.shift(2), expected.shift(2))
def compare_sp_frame_float(result, expected, typ, version):
tm.assert_sp_frame_equal(result, expected)
files = glob.glob(
os.path.join(os.path.dirname(__file__), "data", "legacy_pickle", "*", "*.pickle")
)
@pytest.fixture(params=files)
def legacy_pickle(request, datapath):
return datapath(request.param)
# ---------------------
# tests
# ---------------------
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_pickles(current_pickle_data, legacy_pickle):
if not is_platform_little_endian():
pytest.skip("known failure on non-little endian")
version = os.path.basename(os.path.dirname(legacy_pickle))
with catch_warnings(record=True):
simplefilter("ignore")
compare(current_pickle_data, legacy_pickle, version)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_round_trip_current(current_pickle_data):
def python_pickler(obj, path):
with open(path, "wb") as fh:
pickle.dump(obj, fh, protocol=-1)
def python_unpickler(path):
with open(path, "rb") as fh:
fh.seek(0)
return pickle.load(fh)
data = current_pickle_data
for typ, dv in data.items():
for dt, expected in dv.items():
for writer in [pd.to_pickle, python_pickler]:
if writer is None:
continue
with tm.ensure_clean() as path:
# test writing with each pickler
writer(expected, path)
# test reading with each unpickler
result = pd.read_pickle(path)
compare_element(result, expected, typ)
result = python_unpickler(path)
compare_element(result, expected, typ)
def test_pickle_v0_14_1(datapath):
cat = pd.Categorical(
values=["a", "b", "c"], ordered=False, categories=["a", "b", "c", "d"]
)
pickle_path = datapath("io", "data", "categorical_0_14_1.pickle")
# This code was executed once on v0.14.1 to generate the pickle:
#
# cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
# name='foobar')
# with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
#
tm.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
def test_pickle_v0_15_2(datapath):
# ordered -> _ordered
# GH 9347
cat = pd.Categorical(
values=["a", "b", "c"], ordered=False, categories=["a", "b", "c", "d"]
)
pickle_path = datapath("io", "data", "categorical_0_15_2.pickle")
# This code was executed once on v0.15.2 to generate the pickle:
#
# cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
# name='foobar')
# with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
#
tm.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
def test_pickle_path_pathlib():
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(df.to_pickle, pd.read_pickle)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath():
df = tm.makeDataFrame()
result = tm.round_trip_localpath(df.to_pickle, pd.read_pickle)
tm.assert_frame_equal(df, result)
# ---------------------
# test pickle compression
# ---------------------
@pytest.fixture
def get_random_path():
return "__{}__.pickle".format(tm.rands(10))
class TestCompression:
_compression_to_extension = {
None: ".none",
"gzip": ".gz",
"bz2": ".bz2",
"zip": ".zip",
"xz": ".xz",
}
def compress_file(self, src_path, dest_path, compression):
if compression is None:
shutil.copyfile(src_path, dest_path)
return
if compression == "gzip":
f = gzip.open(dest_path, "w")
elif compression == "bz2":
f = bz2.BZ2File(dest_path, "w")
elif compression == "zip":
with zipfile.ZipFile(dest_path, "w", compression=zipfile.ZIP_DEFLATED) as f:
f.write(src_path, os.path.basename(src_path))
elif compression == "xz":
f = _get_lzma_file(lzma)(dest_path, "w")
else:
msg = "Unrecognized compression type: {}".format(compression)
raise ValueError(msg)
if compression != "zip":
with open(src_path, "rb") as fh, f:
f.write(fh.read())
def test_write_explicit(self, compression, get_random_path):
base = get_random_path
path1 = base + ".compressed"
path2 = base + ".raw"
with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2:
df = tm.makeDataFrame()
# write to compressed file
df.to_pickle(p1, compression=compression)
# decompress
with tm.decompress_file(p1, compression=compression) as f:
with open(p2, "wb") as fh:
fh.write(f.read())
# read decompressed file
df2 = pd.read_pickle(p2, compression=None)
tm.assert_frame_equal(df, df2)
@pytest.mark.parametrize("compression", ["", "None", "bad", "7z"])
def test_write_explicit_bad(self, compression, get_random_path):
with pytest.raises(ValueError, match="Unrecognized compression type"):
with tm.ensure_clean(get_random_path) as path:
df = tm.makeDataFrame()
df.to_pickle(path, compression=compression)
@pytest.mark.parametrize("ext", ["", ".gz", ".bz2", ".no_compress", ".xz"])
def test_write_infer(self, ext, get_random_path):
base = get_random_path
path1 = base + ext
path2 = base + ".raw"
compression = None
for c in self._compression_to_extension:
if self._compression_to_extension[c] == ext:
compression = c
break
with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2:
df = tm.makeDataFrame()
# write to compressed file by inferred compression method
df.to_pickle(p1)
# decompress
with tm.decompress_file(p1, compression=compression) as f:
with open(p2, "wb") as fh:
fh.write(f.read())
# read decompressed file
df2 = pd.read_pickle(p2, compression=None)
tm.assert_frame_equal(df, df2)
def test_read_explicit(self, compression, get_random_path):
base = get_random_path
path1 = base + ".raw"
path2 = base + ".compressed"
with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2:
df = tm.makeDataFrame()
# write to uncompressed file
df.to_pickle(p1, compression=None)
# compress
self.compress_file(p1, p2, compression=compression)
# read compressed file
df2 = pd.read_pickle(p2, compression=compression)
tm.assert_frame_equal(df, df2)
@pytest.mark.parametrize("ext", ["", ".gz", ".bz2", ".zip", ".no_compress", ".xz"])
def test_read_infer(self, ext, get_random_path):
base = get_random_path
path1 = base + ".raw"
path2 = base + ext
compression = None
for c in self._compression_to_extension:
if self._compression_to_extension[c] == ext:
compression = c
break
with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2:
df = tm.makeDataFrame()
# write to uncompressed file
df.to_pickle(p1, compression=None)
# compress
self.compress_file(p1, p2, compression=compression)
# read compressed file by inferred compression method
df2 = pd.read_pickle(p2)
tm.assert_frame_equal(df, df2)
# ---------------------
# test pickle compression
# ---------------------
class TestProtocol:
@pytest.mark.parametrize("protocol", [-1, 0, 1, 2])
def test_read(self, protocol, get_random_path):
with tm.ensure_clean(get_random_path) as path:
df = tm.makeDataFrame()
df.to_pickle(path, protocol=protocol)
df2 = pd.read_pickle(path)
tm.assert_frame_equal(df, df2)
| apache-2.0 |
marmarko/ml101 | tensorflow/examples/skflow/hdf5_classification.py | 9 | 1992 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, h5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import metrics
from tensorflow.contrib import learn
import h5py # pylint: disable=g-bad-import-order
# Load dataset.
iris = learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('test_hdf5.h5', 'r')
x_train = h5f['X_train']
x_test = h5f['X_test']
y_train = h5f['y_train']
y_test = h5f['y_test']
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = learn.infer_real_valued_columns_from_input(x_train)
classifier = learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
score = metrics.accuracy_score(y_test, classifier.predict(x_test))
print('Accuracy: {0:f}'.format(score))
| bsd-2-clause |
almarklein/bokeh | examples/glyphs/colors.py | 1 | 8544 | from __future__ import print_function
from math import pi
import pandas as pd
from bokeh.models import Plot, ColumnDataSource, FactorRange, CategoricalAxis
from bokeh.models.glyphs import Rect
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.browserlib import view
css3_colors = pd.DataFrame([
("Pink", "#FFC0CB", "Pink"),
("LightPink", "#FFB6C1", "Pink"),
("HotPink", "#FF69B4", "Pink"),
("DeepPink", "#FF1493", "Pink"),
("PaleVioletRed", "#DB7093", "Pink"),
("MediumVioletRed", "#C71585", "Pink"),
("LightSalmon", "#FFA07A", "Red"),
("Salmon", "#FA8072", "Red"),
("DarkSalmon", "#E9967A", "Red"),
("LightCoral", "#F08080", "Red"),
("IndianRed", "#CD5C5C", "Red"),
("Crimson", "#DC143C", "Red"),
("FireBrick", "#B22222", "Red"),
("DarkRed", "#8B0000", "Red"),
("Red", "#FF0000", "Red"),
("OrangeRed", "#FF4500", "Orange"),
("Tomato", "#FF6347", "Orange"),
("Coral", "#FF7F50", "Orange"),
("DarkOrange", "#FF8C00", "Orange"),
("Orange", "#FFA500", "Orange"),
("Yellow", "#FFFF00", "Yellow"),
("LightYellow", "#FFFFE0", "Yellow"),
("LemonChiffon", "#FFFACD", "Yellow"),
("LightGoldenrodYellow", "#FAFAD2", "Yellow"),
("PapayaWhip", "#FFEFD5", "Yellow"),
("Moccasin", "#FFE4B5", "Yellow"),
("PeachPuff", "#FFDAB9", "Yellow"),
("PaleGoldenrod", "#EEE8AA", "Yellow"),
("Khaki", "#F0E68C", "Yellow"),
("DarkKhaki", "#BDB76B", "Yellow"),
("Gold", "#FFD700", "Yellow"),
("Cornsilk", "#FFF8DC", "Brown"),
("BlanchedAlmond", "#FFEBCD", "Brown"),
("Bisque", "#FFE4C4", "Brown"),
("NavajoWhite", "#FFDEAD", "Brown"),
("Wheat", "#F5DEB3", "Brown"),
("BurlyWood", "#DEB887", "Brown"),
("Tan", "#D2B48C", "Brown"),
("RosyBrown", "#BC8F8F", "Brown"),
("SandyBrown", "#F4A460", "Brown"),
("Goldenrod", "#DAA520", "Brown"),
("DarkGoldenrod", "#B8860B", "Brown"),
("Peru", "#CD853F", "Brown"),
("Chocolate", "#D2691E", "Brown"),
("SaddleBrown", "#8B4513", "Brown"),
("Sienna", "#A0522D", "Brown"),
("Brown", "#A52A2A", "Brown"),
("Maroon", "#800000", "Brown"),
("DarkOliveGreen", "#556B2F", "Green"),
("Olive", "#808000", "Green"),
("OliveDrab", "#6B8E23", "Green"),
("YellowGreen", "#9ACD32", "Green"),
("LimeGreen", "#32CD32", "Green"),
("Lime", "#00FF00", "Green"),
("LawnGreen", "#7CFC00", "Green"),
("Chartreuse", "#7FFF00", "Green"),
("GreenYellow", "#ADFF2F", "Green"),
("SpringGreen", "#00FF7F", "Green"),
("MediumSpringGreen", "#00FA9A", "Green"),
("LightGreen", "#90EE90", "Green"),
("PaleGreen", "#98FB98", "Green"),
("DarkSeaGreen", "#8FBC8F", "Green"),
("MediumSeaGreen", "#3CB371", "Green"),
("SeaGreen", "#2E8B57", "Green"),
("ForestGreen", "#228B22", "Green"),
("Green", "#008000", "Green"),
("DarkGreen", "#006400", "Green"),
("MediumAquamarine", "#66CDAA", "Cyan"),
("Aqua", "#00FFFF", "Cyan"),
("Cyan", "#00FFFF", "Cyan"),
("LightCyan", "#E0FFFF", "Cyan"),
("PaleTurquoise", "#AFEEEE", "Cyan"),
("Aquamarine", "#7FFFD4", "Cyan"),
("Turquoise", "#40E0D0", "Cyan"),
("MediumTurquoise", "#48D1CC", "Cyan"),
("DarkTurquoise", "#00CED1", "Cyan"),
("LightSeaGreen", "#20B2AA", "Cyan"),
("CadetBlue", "#5F9EA0", "Cyan"),
("DarkCyan", "#008B8B", "Cyan"),
("Teal", "#008080", "Cyan"),
("LightSteelBlue", "#B0C4DE", "Blue"),
("PowderBlue", "#B0E0E6", "Blue"),
("LightBlue", "#ADD8E6", "Blue"),
("SkyBlue", "#87CEEB", "Blue"),
("LightSkyBlue", "#87CEFA", "Blue"),
("DeepSkyBlue", "#00BFFF", "Blue"),
("DodgerBlue", "#1E90FF", "Blue"),
("CornflowerBlue", "#6495ED", "Blue"),
("SteelBlue", "#4682B4", "Blue"),
("RoyalBlue", "#4169E1", "Blue"),
("Blue", "#0000FF", "Blue"),
("MediumBlue", "#0000CD", "Blue"),
("DarkBlue", "#00008B", "Blue"),
("Navy", "#000080", "Blue"),
("MidnightBlue", "#191970", "Blue"),
("Lavender", "#E6E6FA", "Purple"),
("Thistle", "#D8BFD8", "Purple"),
("Plum", "#DDA0DD", "Purple"),
("Violet", "#EE82EE", "Purple"),
("Orchid", "#DA70D6", "Purple"),
("Fuchsia", "#FF00FF", "Purple"),
("Magenta", "#FF00FF", "Purple"),
("MediumOrchid", "#BA55D3", "Purple"),
("MediumPurple", "#9370DB", "Purple"),
("BlueViolet", "#8A2BE2", "Purple"),
("DarkViolet", "#9400D3", "Purple"),
("DarkOrchid", "#9932CC", "Purple"),
("DarkMagenta", "#8B008B", "Purple"),
("Purple", "#800080", "Purple"),
("Indigo", "#4B0082", "Purple"),
("DarkSlateBlue", "#483D8B", "Purple"),
("SlateBlue", "#6A5ACD", "Purple"),
("MediumSlateBlue", "#7B68EE", "Purple"),
("White", "#FFFFFF", "White"),
("Snow", "#FFFAFA", "White"),
("Honeydew", "#F0FFF0", "White"),
("MintCream", "#F5FFFA", "White"),
("Azure", "#F0FFFF", "White"),
("AliceBlue", "#F0F8FF", "White"),
("GhostWhite", "#F8F8FF", "White"),
("WhiteSmoke", "#F5F5F5", "White"),
("Seashell", "#FFF5EE", "White"),
("Beige", "#F5F5DC", "White"),
("OldLace", "#FDF5E6", "White"),
("FloralWhite", "#FFFAF0", "White"),
("Ivory", "#FFFFF0", "White"),
("AntiqueWhite", "#FAEBD7", "White"),
("Linen", "#FAF0E6", "White"),
("LavenderBlush", "#FFF0F5", "White"),
("MistyRose", "#FFE4E1", "White"),
("Gainsboro", "#DCDCDC", "Gray/Black"),
("LightGray", "#D3D3D3", "Gray/Black"),
("Silver", "#C0C0C0", "Gray/Black"),
("DarkGray", "#A9A9A9", "Gray/Black"),
("Gray", "#808080", "Gray/Black"),
("DimGray", "#696969", "Gray/Black"),
("LightSlateGray", "#778899", "Gray/Black"),
("SlateGray", "#708090", "Gray/Black"),
("DarkSlateGray", "#2F4F4F", "Gray/Black"),
("Black", "#000000", "Gray/Black"),
], columns=["Name", "Color", "Group"])
source = ColumnDataSource(dict(
names = list(css3_colors.Name),
groups = list(css3_colors.Group),
colors = list(css3_colors.Color),
))
xdr = FactorRange(factors=list(css3_colors.Group.unique()))
ydr = FactorRange(factors=list(reversed(css3_colors.Name)))
plot = Plot(title="CSS3 Color Names", x_range=xdr, y_range=ydr, plot_width=600, plot_height=2000)
rect = Rect(x="groups", y="names", width=1, height=1, fill_color="colors", line_color=None)
plot.add_glyph(source, rect)
xaxis_above = CategoricalAxis(major_label_orientation=pi/4)
plot.add_layout(xaxis_above, 'above')
xaxis_below = CategoricalAxis(major_label_orientation=pi/4)
plot.add_layout(xaxis_below, 'below')
plot.add_layout(CategoricalAxis(), 'left')
doc = Document()
doc.add(plot)
if __name__ == "__main__":
filename = "colors.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "CSS3 Color Names"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause |
linnarsson-lab/loompy | doc/conf.py | 1 | 9128 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# loompy documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 3 00:11:17 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_bootstrap_theme
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('_ext'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'edit_on_github']
intersphinx_mapping = {
'urllib3': ('http://urllib3.readthedocs.org/en/latest', None),
'python': ('http://docs.python.org/3', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('http://matplotlib.org', None),
'h5py': ('http://docs.h5py.org/en/latest/', None)
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'loompy'
copyright = '2017, LinnarssonLab'
author = 'Linnarsson lab members'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
__version__ = "0.0.0"
exec(open('../loompy/_version.py').read())
# The short X.Y version.
version = ".".join(__version__.split(".")[:-1])
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# (Optional) Logo. Should be small enough to fit the navbar (ideally 24x24).
# Path should be relative to the ``_static`` files directory.
html_logo = "Loom_icon.png"
# Theme options are theme-specific and customize the look and feel of a
# theme further.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "Loom",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Contents",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
'navbar_links': [
("loompy.org", "http://loompy.org", True),
("GitHub", "https://github.com/linnarsson-lab/loompy", True),
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': False,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar navbar-light",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme
# such as "cosmo" or "sandstone".
#
# The set of valid themes depend on the version of Bootstrap
# that's used (the next config option).
#
# Currently, the supported themes are:
# - Bootstrap 2: https://bootswatch.com/2
# - Bootstrap 3: https://bootswatch.com/3
'bootswatch_theme': "simplex",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': ['localtoc.html']
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'loompydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'loompy.tex', 'loompy Documentation',
'LinnarssonLab', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'loompy', 'loompy Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'loompy', 'loompy Documentation',
author, 'loompy', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# --- Options for "Edit on GitHub" ---
edit_on_github_project = 'linnarsson-lab/loompy'
edit_on_github_branch = 'master/doc'
# --- Sort the autodocs by source (currently not workin not sure why)
autodoc_member_order = 'bysource'
html_scaled_image_link = False | bsd-2-clause |
camisatx/pySecMaster | pySecMaster/download.py | 1 | 67312 | from datetime import datetime, timedelta
from functools import wraps
import numpy as np
import pandas as pd
import time
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
from utilities.date_conversions import date_to_iso
__author__ = 'Josh Schertz'
__copyright__ = 'Copyright (C) 2018 Josh Schertz'
__description__ = 'An automated system to store and maintain financial data.'
__email__ = 'josh[AT]joshschertz[DOT]com'
__license__ = 'GNU AGPLv3'
__maintainer__ = 'Josh Schertz'
__status__ = 'Development'
__url__ = 'https://joshschertz.com/'
__version__ = '1.5.0'
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
def rate_limit(rate=2000, period_sec=600, threads=1):
"""
A decorator that limits the rate at which a function is run. If the function
is run over that rate, a forced sleep will occur. The main purpose of this
is to make sure an API is not overloaded with requests. For Quandl, the
default API limit is 2,000 calls in a 10 minute time frame. If multiple
threads are using the API concurrently, make sure to increase the threads
variable to the number of threads being used.
:param rate: Integer of the number of items that are downloaded
:param period_sec: Integer of the period (seconds) that the rate occurs in
:param threads: Integer of the threads that will be running concurrently
"""
optimal_rate = float((rate / period_sec) / threads)
min_interval = 1.0 / optimal_rate
def rate_decorator(func):
last_check = [0.0]
@wraps(func)
def rate_limit_func(*args, **kargs):
elapsed = time.time() - last_check[0]
time_to_wait = min_interval - elapsed
if time_to_wait > 0:
time.sleep(time_to_wait)
# print('Sleeping for %0.2f seconds' % int(time_to_wait))
ret = func(*args, **kargs)
last_check[0] = time.time()
return ret
return rate_limit_func
return rate_decorator
def csv_load_converter(input):
try:
return float(input)
except ValueError:
return -1
class QuandlDownload(object):
def __init__(self, quandl_token, db_url):
"""Items that are always required when downloading Quandl data.
:param quandl_token: String of the sensitive Quandl API token
:param db_url: String of the database API url
"""
self.quandl_token = quandl_token
self.db_url = db_url
def download_quandl_codes(self, db_name, page_num, download_try=0):
"""The token, database name, database url and page number are provided,
and this downloads the metadata library for that particular page as a
csv file. Quandl has a restriction where only 300 items can be
downloaded at a time, thus multiple requests must be sent. This is
handled by the page number variable.
:param db_name: String of the name of the database being downloaded
:param page_num: Integer of the database's metadata page to download
:param download_try: Integer of the number of attempts to download data
:return: A DataFrame with the Quandl database metadata
"""
download_try += 1
col_names = ['q_code', 'name', 'start_date', 'end_date', 'frequency',
'last_updated']
file = self.download_data(db_name, page_num=page_num)
try:
df = pd.read_csv(file, index_col=False, names=col_names,
encoding='utf-8')
if len(df) == 0:
# When there are no more codes to download, the file object
# will be an empty CSV, and in turn, and empty DF. Return an
# empty DF, which will indicate the no more pages to download.
return pd.DataFrame()
except TypeError:
# When there are no more codes to download, the file object will be
# an empty CSV. With pandas prior to 0.17, this will cause the
# read_csv function to fail on a TypeError since it's not able to
# add column names to an empty DF. Return an empty DF, which will
# indicate the no more pages to download.
return pd.DataFrame()
except Exception as e:
print(e)
if download_try <= 10:
print('Error: An unknown issue occurred when downloading the '
'Quandl codes CSV. Will download the CSV file again.')
df = self.download_quandl_codes(db_name, page_num, download_try)
return df # Stop the recursion
else:
raise OSError('Unknown error when downloading page %s of the '
'%s database. Quitting after 10 failed attempts.'
% (page_num, db_name))
df['start_date'] = df.apply(date_to_iso, axis=1, args=('start_date',))
df['end_date'] = df.apply(date_to_iso, axis=1, args=('end_date',))
df['last_updated'] = df.apply(date_to_iso, axis=1, args=('last_updated',))
df.insert(len(df.columns), 'page_num', page_num)
df.insert(len(df.columns), 'created_date', datetime.now().isoformat())
df.insert(len(df.columns), 'updated_date', datetime.now().isoformat())
return df
def download_quandl_data(self, q_code, csv_out, beg_date=None,
verbose=True):
"""Receives a Quandl Code as a string, and it calls download_data to
actually download it. Once downloaded, this adds titles to the column
headers, depending on what type of Quandl Code it is. Last, a column
for the q_code is added to the DataFrame.
:param q_code: A string of the Quandl Code
:param csv_out: String of directory and CSV file name; used to store
the quandl codes that do not have any data
:param beg_date: String of the start date (YYYY-MM-DD) to download
:param verbose: Boolean
:return: A DataFrame with the data points for the Quandl Code
"""
# Download the data to a CSV file
if beg_date is not None:
file = self.download_data(q_code, beg_date=beg_date)
else:
file = self.download_data(q_code)
# Specify the column headers
if q_code[:4] == 'WIKI':
column_names = ['date', 'open', 'high', 'low', 'close', 'volume',
'dividend', 'split', 'adj_open',
'adj_high', 'adj_low', 'adj_close', 'adj_volume']
columns_to_remove = ['adj_open', 'adj_high', 'adj_low', 'adj_close',
'adj_volume']
elif q_code[:3] == 'EOD':
column_names = ['date', 'open', 'high', 'low', 'close', 'volume',
'dividend', 'split', 'adj_open', 'adj_high',
'adj_low', 'adj_close', 'adj_volume']
columns_to_remove = ['adj_open', 'adj_high', 'adj_low', 'adj_close',
'adj_volume']
elif q_code[:4] == 'GOOG':
column_names = ['date', 'open', 'high', 'low', 'close', 'volume']
columns_to_remove = []
elif q_code[:5] == 'YAHOO':
column_names = ['date', 'open', 'high', 'low', 'close',
'volume', 'adjusted_close']
columns_to_remove = ['adjusted_close']
else:
print('The column headers the %s download are not defined within '
'QuandlDownload.download_quandl_data in download.py' % q_code)
return pd.DataFrame()
if file:
try:
# Create a DataFrame from the file object
raw_df = pd.read_csv(file, index_col=False, names=column_names,
encoding='utf-8',
converters={'open': csv_load_converter,
'high': csv_load_converter,
'low': csv_load_converter,
'close': csv_load_converter,
'volume': csv_load_converter})
except IndexError:
return pd.DataFrame()
except OSError:
return pd.DataFrame()
except Exception as e:
print('Unknown error occurred when reading Quandl CSV for %s '
'in download_quandl_data in download.py' % q_code)
print(e)
return pd.DataFrame()
# Remove all adjusted columns
raw_df.drop(columns_to_remove, axis=1, inplace=True)
# Data successfully downloaded; check to see if code was on the list
try:
codes_wo_data_df = pd.read_csv(csv_out, index_col=False)
if len(codes_wo_data_df.
loc[codes_wo_data_df['q_code'] == q_code]) > 0:
# This q_code now has data whereas it didn't on that last
# run. Remove the code from the DataFrame
wo_data_df = codes_wo_data_df[codes_wo_data_df.q_code !=
q_code]
# Remove any duplicates (keeping the latest) and save to CSV
clean_wo_data_df = \
wo_data_df.drop_duplicates(subset='q_code', keep='last')
clean_wo_data_df.to_csv(csv_out, index=False)
if verbose:
print('%s was removed from the wo_data CSV file since '
'data was available for download.' % (q_code,))
except ValueError:
# The CSV file wasn't able to be read, so skip it for now
pass
else:
# There is no data for this code, so add it to the CSV file
try:
codes_wo_data_df = pd.read_csv(csv_out, index_col=False)
except ValueError:
# The CSV file wasn't able to be read, so skip it for now
return pd.DataFrame()
except Exception as e:
# An error that happens sometimes; idk
print('Unknown error occured in download_quandl_data')
print(e)
return pd.DataFrame()
try:
# check the DataFrame for values
codes_wo_data_df.loc[codes_wo_data_df['q_code'] == q_code]
except KeyError:
# The CSV file wasn't able to be read, so skip it for now
return pd.DataFrame()
cur_date = datetime.now().isoformat()
if len(codes_wo_data_df.
loc[codes_wo_data_df['q_code'] == q_code]) > 0:
# The code already exists within the CSV, so update the date
codes_wo_data_df.set_value(codes_wo_data_df['q_code'] == q_code,
'date_tried', cur_date)
# Remove any duplicates (keeping the latest) and save to a CSV
clean_wo_data_df = codes_wo_data_df.\
drop_duplicates(subset='q_code', keep='last')
clean_wo_data_df.to_csv(csv_out, index=False)
if verbose:
print('%s still did not have data. Date tried was updated '
'in the wo_data CSV file.' % (q_code,))
else:
# The code does not exists within the CSV, so create and append
# it to the CSV file. Do this via a DataFrame to CSV append
no_data_df = pd.DataFrame(data=[(q_code, cur_date)],
columns=['q_code', 'date_tried'])
with open(csv_out, 'a') as f:
no_data_df.to_csv(f, mode='a', header=False, index=False)
if verbose:
print('%s did not have data, thus it was added to the '
'wo_data CSV file.' % (q_code,))
# Return an empty DF; QuandlDataExtractor will be able to handle it
return pd.DataFrame()
if len(raw_df) in [0, 1]:
# The raw data has no values
return pd.DataFrame()
raw_df = raw_df[1:] # Removes the column headers from data download
raw_df['date'] = raw_df.apply(date_to_iso, axis=1, args=('date',))
raw_df.insert(len(raw_df.columns), 'updated_date',
datetime.now().isoformat())
# Check each price column for outliers
for column in raw_df.columns:
# Skip the date and updated_date columns
if column in ['date', 'updated_date']:
continue
# Convert each column's values to a number, forcing all non-numbers
# to be NaN values
raw_df[column] = pd.to_numeric(raw_df[column], errors='coerce')
# Fill all NaN values with -1 to indicate no data
raw_df.fillna(-1.0, inplace=True)
try:
# Remove all rows that have values larger than 3 deviations mean
# raw_df = (raw_df[(pd.DataFrame.abs(stats.zscore(raw_df)) < 3).
# all(axis=1)])
# raw_df = raw_df[pd.DataFrame.abs(raw_df-raw_df.mean()) <=
# (3*raw_df.std())]
if column in ['open', 'high', 'low', 'close']:
# Check column for values over 1M, create DF for outliers
outliers_df = raw_df[pd.DataFrame.abs(raw_df[column]) >
1000000]
if len(outliers_df):
print(outliers_df)
# If outlier, replace the value for the row with -1
for index, row in outliers_df.iterrows():
# Index from the outlier_df is the index from raw_df
raw_df.set_value(index, column, -1.0)
# Round all data values to their appropriate levels
raw_df[column] = np.round(raw_df[column], decimals=4)
# elif column in ['dividend']:
# # Round all data values to their appropriate levels
# raw_df[column] = np.round(raw_df[column], decimals=3)
elif column in ['volume']:
# Round all data values to their appropriate levels
raw_df[column] = np.round(raw_df[column], decimals=0)
except TypeError:
pass
return raw_df
def download_data(self, name, page_num=None, beg_date=None, download_try=0):
"""Downloads the CSV from the Quandl API URL provided.
:param name: String of the object being downloaded. It can either be
the database name or a Quandl Code
:param page_num: Integer used when downloading database Quandl Codes
:param beg_date: String of the start date (YYYY-MM-DD) to download
:param download_try: Optional integer that indicates a download
retry; utilized after an HTTP error to try the download again
recursively
:return: CSV file of the downloaded data
"""
db_url = self.db_url[0] + name + self.db_url[1]
download_try += 1
# Only Quandl Code downloads have page numbers
if page_num is not None:
# There is no need for the Quandl Code queries to have dates
url_var = str(page_num) + '&auth_token=' + self.quandl_token
else:
url_var = '?auth_token=' + self.quandl_token
if beg_date is not None:
# NOTE: This only works with the v1 API
url_var = url_var + '&trim_start=' + beg_date
try:
csv_file = urlopen(db_url + url_var)
return csv_file
except HTTPError as e:
if 'http error 400' in str(e).lower():
# HTTP Error 400: Bad Request
# Don't raise an exception; indicates a non existent code
print('HTTPError %s: %s does not exist.' % (e.reason, name))
elif 'http error 403' in str(e).lower():
# HTTP Error 403: Forbidden
raise OSError('HTTPError %s: Reached Quandl API call '
'limit. Make the RateLimit more restrictive.'
% e.reason)
elif 'http error 404' in str(e).lower():
# HTTP Error 404: Not Found
if page_num:
raise OSError('HTTPError %s: Quandl page %i for %s not '
'found.' % (e.reason, page_num, name))
# else:
# # Don't raise an exception; indicates the last page
# print('HTTPError %s: %s not found.' % (e.reason, name))
elif 'http error 429' in str(e).lower():
# HTTP Error 429: Too Many Requests
if download_try <= 5:
print('HTTPError %s: Exceeded Quandl API limit. Make the '
'rate_limit more restrictive. Program will sleep for '
'11 minutes and will try again...' % (e.reason,))
time.sleep(11 * 60)
self.download_data(name, download_try=download_try)
else:
raise OSError('HTTPError %s: Exceeded Quandl API limit. '
'After trying 5 time, the download was still '
'not successful. You could have hit the '
'50,000 calls per day limit.' % (e.reason,))
elif 'http error 500' in str(e).lower():
# HTTP Error 500: Internal Server Error
if download_try <= 10:
print('HTTPError %s: Internal Server Error' % (e.reason,))
elif 'http error 502' in str(e).lower():
# HTTP Error 502: Bad Gateway
if download_try <= 10:
print('HTTPError %s: Encountered a bad gateway with the '
'server. Maybe the network is down. Will sleep for '
'5 minutes' % (e.reason,))
time.sleep(5 * 60)
self.download_data(name, download_try=download_try)
else:
raise OSError('HTTPError %s: Server is currently '
'unavailable. After trying 10 times, the '
'download was still not successful. '
'Quitting for now.' % (e.reason,))
elif 'http error 503' in str(e).lower():
# HTTP Error 503: Service Unavailable
if download_try <= 10:
print('HTTPError %s: Server is currently unavailable. '
'Maybe the network is down. Will sleep for 5 '
'minutes' % (e.reason,))
time.sleep(5 * 60)
self.download_data(name, download_try=download_try)
else:
raise OSError('HTTPError %s: Server is currently '
'unavailable. After trying 10 time, the '
'download was still not successful. '
'Quitting for now.' % (e.reason,))
elif 'http error 504' in str(e).lower():
# HTTP Error 504: GATEWAY_TIMEOUT
if download_try <= 10:
print('HTTPError %s: Server connection timed out. Maybe '
'the network is down. Will sleep for 5 minutes' %
(e.reason,))
time.sleep(5 * 60)
self.download_data(name, download_try=download_try)
else:
raise OSError('HTTPError %s: Server is currently '
'unavailable. After trying 10 time, the '
'download was still not successful. '
'Quitting for now.' % (e.reason,))
else:
print('Base URL used: %s' % (db_url + url_var,))
if page_num:
raise OSError('%s - Unknown error when downloading page '
'%i for %s' % (e, page_num, name))
else:
raise OSError('%s - Unknown error when downloading %s' %
(e, name))
except URLError as e:
if download_try <= 10:
print('Warning: Experienced URL Error %s. Program will sleep '
'for 5 minutes and will then try again...' % (e.reason,))
print('URL used: %s' % (db_url + url_var,))
time.sleep(5 * 60)
self.download_data(name, download_try=download_try)
else:
raise URLError('Warning: Still experiencing URL Error %s. '
'After trying 10 times, the error remains. '
'Quitting for now, but you can try again later.'
% (e.reason,))
except Exception as e:
print(e)
raise OSError('Warning: Encountered an unknown error when '
'downloading %s in download_csv in download.py' %
(name,))
def download_google_data(db_url, tsid, exchanges_df, csv_out, verbose=True):
""" Receives a tsid as a string, splits the code into ticker and
exchange, then passes it to the url to download the data. Once downloaded,
this adds titles to the column headers.
:param db_url: Dictionary of google finance url components
:param tsid: A string of the tsid
:param exchanges_df: DataFrame with all exchanges and their symbols
:param csv_out: String with the file directory for the CSV file that has
all the codes that don't have any data
:param verbose: Boolean of whether to print debugging statements
:return: A DataFrame with the data points for the tsid.
"""
ticker = tsid[:tsid.find('.')]
exchange_symbol = tsid[tsid.find('.')+1:tsid.find('.', tsid.find('.')+1)]
try:
# Use the tsid exchange symbol to get the Google exchange symbol
exchange = (exchanges_df.loc[exchanges_df['tsid_symbol'] ==
exchange_symbol, 'goog_symbol'].values)
except KeyError:
exchange = None
# Make the url string; aside from the root, the items can be in any order
url_string = db_url['root'] # Establish the url root
for key, item in db_url.items():
if key == 'root':
continue # Already used above
elif key == 'ticker':
url_string += '&' + item + ticker
elif key == 'exchange':
if exchange:
url_string += '&' + item + exchange[0]
else:
url_string += '&' + item
def download_data(url, download_try=0):
""" Downloads the text data from the url provided.
:param url: String that contains the url of the data to download.
:param download_try: Integer of the number of attempts to download data.
:return: A list of bytes of the data downloaded.
"""
download_try += 1
try:
# Download the data
return urlopen(url).readlines()
except HTTPError as e:
if 'http error 403' in str(e).lower():
# HTTP Error 403: Forbidden
raise OSError('HTTPError %s: Reached API call limit. Make the '
'RateLimit more restrictive.' % (e.reason,))
elif 'http error 404' in str(e).lower():
# HTTP Error 404: Not Found
raise OSError('HTTPError %s: %s not found' % (e.reason, tsid))
elif 'http error 429' in str(e).lower():
# HTTP Error 429: Too Many Requests
if download_try <= 5:
print('HTTPError %s: Exceeded API limit. Make the '
'RateLimit more restrictive. Program will sleep for '
'11 minutes and will try again...' % (e.reason,))
time.sleep(11 * 60)
download_data(url, download_try)
else:
raise OSError('HTTPError %s: Exceeded API limit. After '
'trying 5 time, the download was still not '
'successful. You could have hit the per day '
'call limit.' % (e.reason,))
elif 'http error 500' in str(e).lower():
# HTTP Error 500: Internal Server Error
if download_try <= 10:
print('HTTPError %s: Internal Server Error' % (e.reason,))
elif 'http error 502' in str(e).lower():
# HTTP Error 502: Bad Gateway
if download_try <= 10:
print('HTTPError %s: Encountered a bad gateway with the '
'server. Maybe the network is down. Will sleep for '
'5 minutes'
% (e.reason,))
time.sleep(5 * 60)
download_data(url, download_try)
else:
raise OSError('HTTPError %s: Server is currently '
'unavailable. After trying 10 times, the '
'download was still not successful. Quitting '
'for now.' % (e.reason,))
elif 'http error 503' in str(e).lower():
# HTTP Error 503: Service Unavailable
# Received this HTTP Error after 2000 queries. Browser showed
# captcha message upon loading url.
if download_try <= 10:
print('HTTPError %s: Server is currently unavailable. '
'Maybe the network is down or the server is blocking '
'you. Will sleep for 5 minutes...' % (e.reason,))
time.sleep(5 * 60)
download_data(url, download_try)
else:
raise OSError('HTTPError %s: Server is currently '
'unavailable. After trying 10 time, the '
'download was still not successful. '
'Quitting for now.' % (e.reason,))
elif 'http error 504' in str(e).lower():
# HTTP Error 504: GATEWAY_TIMEOUT
if download_try <= 10:
print('HTTPError %s: Server connection timed out. Maybe '
'the network is down. Will sleep for 5 minutes'
% (e.reason,))
time.sleep(5 * 60)
download_data(url, download_try)
else:
raise OSError('HTTPError %s: Server is currently '
'unavailable. After trying 10 time, the '
'download was still not successful. Quitting '
'for now.' % (e.reason,))
else:
print('Base URL used: %s' % (url,))
raise OSError('%s - Unknown error when downloading %s'
% (e, tsid))
except URLError as e:
if download_try <= 10:
print('Warning: Experienced URL Error %s. Program will '
'sleep for 5 minutes and will then try again...' %
(e.reason,))
time.sleep(5 * 60)
download_data(url, download_try)
else:
raise URLError('Warning: Still experiencing URL Error %s. '
'After trying 10 times, the error remains. '
'Quitting for now, but you can try again later.'
% (e.reason,))
except Exception as e:
print(e)
print('Warning: Encountered an unknown error when downloading %s '
'in download_data in download.py' % (tsid,))
def google_data_processing(url_obj):
""" Takes the url object returned from Google, and formats the text data
into a DataFrame that can be saved to the SQL Database. Saves each
processed line to a list as a tuple, with each element a piece of data.
The list is changed to a DataFrame before being returned.
:param url_obj: A text byte object that represents the downloaded data
:return: A DataFrame of the processed minute data.
"""
# Find the interval in seconds that the data was downloaded to
if url_obj[3][:8].decode('utf-8') == 'INTERVAL':
interval = int(url_obj[3][9:].decode('utf-8'))
# Normal trading hours: data starts on line 7
data_start_line = 7
# Interval on the 4th line if receiving extended hours quotations
elif url_obj[4][:8].decode('utf-8') == 'INTERVAL':
interval = int(url_obj[4][9:].decode('utf-8'))
# Extended trading hours: data starts on line 8
data_start_line = 8
else:
interval = 60 # Assume default of 60 seconds
data_start_line = 7 # Assume data starts on line 7
data = []
# From the text file downloaded, adding each line to a list as a tuple
for line_num in range(data_start_line, len(url_obj)):
line = url_obj[line_num].decode('utf-8')
if line.count(',') == 5:
date, close, high, low, open_, volume = line.split(',')
if str(date[0]) == 'a':
# The whole unix time
date_obj = datetime.utcfromtimestamp(int(date[1:]))
else:
# Get the prior line's unix time/period
prior_line = url_obj[line_num - 1].decode('utf-8')
if prior_line[0] == 'a':
# The prior line had the entire unix time
prior_unix_time = prior_line[1:prior_line.find(',')]
# Add the product of the current date period and the
# interval to the prior line's unix time
next_date = int(prior_unix_time) + (int(date)*interval)
date_obj = datetime.utcfromtimestamp(next_date)
else:
# The prior line is a date period, so find the delta
prior_unix_sec = prior_line[:prior_line.find(',')]
# Difference between the current and the prior unix sec
unix_sec_diff = int(date) - int(prior_unix_sec)
# Add the product of the time delta and the interval to
# the prior bar's datetime
date_obj = (data[-1][0] +
timedelta(seconds=unix_sec_diff*interval))
data.append(tuple((date_obj, float(close), float(high),
float(low), float(open_), int(volume))))
column_names = ['date', 'close', 'high', 'low', 'open', 'volume']
processed_df = pd.DataFrame(data, columns=column_names)
return processed_df
url_obj = download_data(url_string)
try:
raw_df = google_data_processing(url_obj)
except IndexError:
return pd.DataFrame()
except Exception as e:
print('Unknown error occurred when processing Google raw data for %s' %
tsid)
print(e)
return pd.DataFrame()
if len(raw_df.index) > 0:
# Data successfully downloaded; check to see if code was on the list
try:
codes_wo_data_df = pd.read_csv(csv_out, index_col=False)
if len(codes_wo_data_df.loc[codes_wo_data_df['tsid'] == tsid]) > 0:
# This tsid now has data whereas it didn't on that last run.
# Remove the code from the DataFrame
wo_data_df = codes_wo_data_df[codes_wo_data_df.tsid != tsid]
# Remove any duplicates (keeping the latest) and save to a CSV
clean_wo_data_df = wo_data_df.drop_duplicates(subset='tsid',
keep='last')
clean_wo_data_df.to_csv(csv_out, index=False)
if verbose:
print('%s was removed from the wo_data CSV file since data '
'was available for download.' % (tsid,))
except ValueError:
# The CSV file wasn't able to be read, so skip it for now
pass
else:
# There is no price data for this code; add to CSV file via DataFrame
try:
codes_wo_data_df = pd.read_csv(csv_out, index_col=False)
cur_date = datetime.now().isoformat()
if len(codes_wo_data_df.loc[codes_wo_data_df['tsid'] == tsid]) > 0:
# The code already exists within the CSV, so update the date
codes_wo_data_df.set_value(codes_wo_data_df['tsid'] == tsid,
'date_tried', cur_date)
# Remove any duplicates (keeping the latest) and save to a CSV
clean_wo_data_df = \
codes_wo_data_df.drop_duplicates(subset='tsid', keep='last')
clean_wo_data_df.to_csv(csv_out, index=False)
if verbose:
print('%s still did not have data. Date tried was updated '
'in the wo_data CSV file.' % (tsid,))
else:
# The code does not exists within the CSV, so create and append
# it to the CSV file. Do this via a DataFrame to CSV append
no_data_df = pd.DataFrame(data=[(tsid, cur_date)],
columns=['tsid', 'date_tried'])
with open(csv_out, 'a') as f:
no_data_df.to_csv(f, mode='a', header=False, index=False)
if verbose:
print('%s did not have data, thus it was added to the '
'wo_data CSV file.' % (tsid,))
except Exception as e:
print('Error occurred when trying to update %s CSV data for %s' %
(csv_out, tsid))
print(e)
# Return an empty DF; DataExtraction class will be able to handle it
return pd.DataFrame()
if db_url['interval'] == 'i=' + str(60*60*24):
# Processing daily data, thus remove the time stamp from the date
raw_df['date'] = raw_df['date'].apply(lambda x: x.date().isoformat())
else:
raw_df['date'] = raw_df['date'].apply(lambda x: x.isoformat())
raw_df.insert(len(raw_df.columns), 'updated_date',
datetime.now().isoformat())
# Check each price column for outliers
for column in raw_df.columns:
# Skip the date and updated_date columns
if column in ['date', 'updated_date']:
continue
# Convert each column's values to a number, forcing all non-numbers
# to be NaN values
raw_df[column] = pd.to_numeric(raw_df[column], errors='coerce')
# Fill all NaN values with -1 to indicate no data
raw_df.fillna(-1.0, inplace=True)
try:
# Remove all rows that have values larger than 3 deviations mean
# raw_df = (raw_df[(pd.DataFrame.abs(stats.zscore(raw_df)) < 3).
# all(axis=1)])
# raw_df = raw_df[pd.DataFrame.abs(raw_df-raw_df.mean()) <=
# (3*raw_df.std())]
if column in ['open', 'high', 'low', 'close']:
# Check column for values over 1M, creating DF for all outliers
outliers_df = raw_df[pd.DataFrame.abs(raw_df[column]) > 1000000]
if len(outliers_df):
print(outliers_df)
# If there is outlier, replace the value for the row with -1
for index, row in outliers_df.iterrows():
# Index from the outlier_df is the index from the raw_df
raw_df.set_value(index, column, -1.0)
# Round all data values to their appropriate levels
raw_df[column] = np.round(raw_df[column], decimals=4)
elif column in ['volume']:
# Round all data values to their appropriate levels
raw_df[column] = np.round(raw_df[column], decimals=0)
except TypeError:
pass
return raw_df
def download_yahoo_data(db_url, tsid, exchanges_df, csv_out, verbose=True):
""" Receives a tsid as a string, splits the code into ticker and
exchange, then passes it to the url to download the data. Once downloaded,
this adds titles to the column headers.
:param db_url: Dictionary of yahoo finance url components
:param tsid: A string of the tsid
:param exchanges_df: DataFrame with all exchanges and their symbols
:param csv_out: String with the file directory for the CSV file that has
all the codes that don't have any data
:param verbose: Boolean of whether to print debugging statements
:return: A DataFrame with the data points for the tsid.
"""
ticker = tsid[:tsid.find('.')]
exchange_symbol = tsid[tsid.find('.')+1:tsid.find('.', tsid.find('.')+1)]
try:
# Use the tsid exchange symbol to get the Yahoo exchange symbol
exchange = (exchanges_df.loc[exchanges_df['tsid_symbol'] ==
exchange_symbol, 'yahoo_symbol'].values)
except KeyError:
exchange = None
# Make the url string; aside from the root, the items can be in any order
url_string = db_url['root'] # Establish the url root
for key, item in db_url.items():
if key == 'root':
continue # Already used above
elif key == 'ticker':
if exchange:
# If an exchange was found, Yahoo requires both ticker and
# exchange
url_string += '&' + item + ticker + '.' + exchange
else:
# Ticker is in a major exchange and doesn't need exchange info
url_string += '&' + item + ticker
else:
url_string += '&' + item
def download_data(url, download_try=0):
""" Downloads the CSV file from the url provided.
:param url: String that contains the url of the data to download.
:param download_try: Integer of the number of attempts to download data.
:return: A list of bytes of the data downloaded.
"""
download_try += 1
try:
# Download the csv file
return urlopen(url)
except HTTPError as e:
if 'http error 403' in str(e).lower():
# HTTP Error 403: Forbidden
raise OSError('HTTPError %s: Reached API call limit. Make the '
'RateLimit more restrictive.' % (e.reason,))
elif 'http error 404' in str(e).lower():
# HTTP Error 404: Not Found
# if verbose:
# print('HTTPError %s: %s not found' % (e.reason, tsid))
return None
elif 'http error 429' in str(e).lower():
# HTTP Error 429: Too Many Requests
if download_try <= 5:
print('HTTPError %s: Exceeded API limit. Make the '
'RateLimit more restrictive. Program will sleep for '
'11 minutes and will try again...' % (e.reason,))
time.sleep(11 * 60)
download_data(url, download_try)
else:
raise OSError('HTTPError %s: Exceeded API limit. After '
'trying 5 time, the download was still not '
'successful. You could have hit the per day '
'call limit.' % (e.reason,))
elif 'http error 500' in str(e).lower():
# HTTP Error 500: Internal Server Error
if download_try <= 10:
print('HTTPError %s: Internal Server Error' % (e.reason,))
elif 'http error 502' in str(e).lower():
# HTTP Error 502: Bad Gateway
if download_try <= 10:
print('HTTPError %s: Encountered a bad gateway with the '
'server. Maybe the network is down. Will sleep for '
'5 minutes'
% (e.reason,))
time.sleep(5 * 60)
download_data(url, download_try)
else:
raise OSError('HTTPError %s: Server is currently '
'unavailable. After trying 10 times, the '
'download was still not successful. Quitting '
'for now.' % (e.reason,))
elif 'http error 503' in str(e).lower():
# HTTP Error 503: Service Unavailable
# Received this HTTP Error after 2000 queries. Browser showed
# captcha message upon loading url.
if download_try <= 10:
print('HTTPError %s: Server is currently unavailable. '
'Maybe the network is down or the server is blocking '
'you. Will sleep for 5 minutes...' % (e.reason,))
time.sleep(5 * 60)
download_data(url, download_try)
else:
raise OSError('HTTPError %s: Server is currently '
'unavailable. After trying 10 time, the '
'download was still not successful. '
'Quitting for now.' % (e.reason,))
elif 'http error 504' in str(e).lower():
# HTTP Error 504: GATEWAY_TIMEOUT
if download_try <= 10:
print('HTTPError %s: Server connection timed out. Maybe '
'the network is down. Will sleep for 5 minutes'
% (e.reason,))
time.sleep(5 * 60)
download_data(url, download_try)
else:
raise OSError('HTTPError %s: Server is currently '
'unavailable. After trying 10 time, the '
'download was still not successful. Quitting '
'for now.' % (e.reason,))
else:
print('Base URL used: %s' % (url,))
raise OSError('%s - Unknown error when downloading %s' %
(e, tsid))
except URLError as e:
if download_try <= 10:
print('Warning: Experienced URL Error %s. Program will '
'sleep for 5 minutes and will then try again...' %
(e.reason,))
time.sleep(5 * 60)
download_data(url, download_try)
else:
raise URLError('Warning: Still experiencing URL Error %s. '
'After trying 10 times, the error remains. '
'Quitting for now, but you can try again later.'
% (e.reason,))
except Exception as e:
print(e)
print('Warning: Encountered an unknown error when downloading %s '
'in download_yahoo_data.download_data' % (tsid,))
url_obj = download_data(url_string)
column_names = ['date', 'open', 'high', 'low', 'close', 'volume',
'adj_close']
try:
raw_df = pd.read_csv(url_obj, index_col=False, names=column_names,
encoding='utf-8',
converters={'open': csv_load_converter,
'high': csv_load_converter,
'low': csv_load_converter,
'close': csv_load_converter,
'volume': csv_load_converter})
except IndexError:
return pd.DataFrame()
except OSError:
# Occurs when the url_obj is None, meaning the url returned a 404 error
return pd.DataFrame()
except Exception as e:
print('Unknown error occurred when reading Yahoo CSV for %s' % tsid)
print(e)
return pd.DataFrame()
if len(raw_df.index) > 0:
# Data successfully downloaded; check to see if code was on the list
try:
codes_wo_data_df = pd.read_csv(csv_out, index_col=False)
if len(codes_wo_data_df.loc[codes_wo_data_df['tsid'] == tsid]) > 0:
# This tsid now has data whereas it didn't on that last run.
# Remove the code from the DataFrame
wo_data_df = codes_wo_data_df[codes_wo_data_df.tsid != tsid]
# Remove any duplicates (keeping the latest) and save to a CSV
clean_wo_data_df = wo_data_df.drop_duplicates(subset='tsid',
keep='last')
clean_wo_data_df.to_csv(csv_out, index=False)
if verbose:
print('%s was removed from the wo_data CSV file since data '
'was available for download.' % (tsid,))
except ValueError:
# The CSV file wasn't able to be read, so skip it for now
pass
else:
# There is no price data for this code; add to CSV file via DataFrame
try:
codes_wo_data_df = pd.read_csv(csv_out, index_col=False)
cur_date = datetime.now().isoformat()
if len(codes_wo_data_df.loc[codes_wo_data_df['tsid'] == tsid]) > 0:
# The code already exists within the CSV, so update the date
codes_wo_data_df.set_value(codes_wo_data_df['tsid'] == tsid,
'date_tried', cur_date)
# Remove any duplicates (keeping the latest) and save to a CSV
clean_wo_data_df = \
codes_wo_data_df.drop_duplicates(subset='tsid', keep='last')
clean_wo_data_df.to_csv(csv_out, index=False)
if verbose:
print('%s still did not have data. Date tried was updated '
'in the wo_data CSV file.' % (tsid,))
else:
# The code does not exists within the CSV, so create and append
# it to the CSV file. Do this via a DataFrame to CSV append
no_data_df = pd.DataFrame(data=[(tsid, cur_date)],
columns=['tsid', 'date_tried'])
with open(csv_out, 'a') as f:
no_data_df.to_csv(f, mode='a', header=False, index=False)
if verbose:
print('%s did not have data, thus it was added to the '
'wo_data CSV file.' % (tsid,))
except Exception as e:
print('Error occurred when trying to update %s CSV data for %s' %
(csv_out, tsid))
print(e)
# Return an empty DF; DataExtraction class will be able to handle it
return pd.DataFrame()
# Removes the column headers from data download
raw_df = raw_df[1:]
raw_df['date'] = raw_df.apply(date_to_iso, axis=1, args=('date',))
raw_df.insert(len(raw_df.columns), 'updated_date',
datetime.now().isoformat())
# Remove the adjusted close column since this is calculated manually
raw_df.drop('adj_close', axis=1, inplace=True)
# Check each price column for outliers
for column in raw_df.columns:
# Skip the date and updated_date columns
if column in ['date', 'updated_date']:
continue
# Convert each column's values to a number, forcing all non-numbers
# to be NaN values
raw_df[column] = pd.to_numeric(raw_df[column], errors='coerce')
# Fill all NaN values with -1 to indicate no data
raw_df.fillna(-1.0, inplace=True)
try:
# Remove all rows that have values larger than 3 deviations mean
# raw_df = (raw_df[(pd.DataFrame.abs(stats.zscore(raw_df)) < 3).
# all(axis=1)])
# raw_df = raw_df[pd.DataFrame.abs(raw_df-raw_df.mean()) <=
# (3*raw_df.std())]
if column in ['open', 'high', 'low', 'close']:
# Check column for values over 1M, creating DF for all outliers
outliers_df = raw_df[pd.DataFrame.abs(raw_df[column]) > 1000000]
if len(outliers_df):
print(outliers_df)
# If there is outlier, replace the value for the row with -1
for index, row in outliers_df.iterrows():
# Index from the outlier_df is the index from the raw_df
raw_df.set_value(index, column, -1.0)
# Round all data values to their appropriate levels
raw_df[column] = np.round(raw_df[column], decimals=4)
elif column in ['volume']:
# Round all data values to their appropriate levels
raw_df[column] = np.round(raw_df[column], decimals=0)
except TypeError:
pass
return raw_df
def download_csidata_factsheet(db_url, data_type, exchange_id=None,
data_format='csv'):
""" Downloads the CSV factsheet for the provided data_type (stocks,
commodities, currencies, etc.). A DataFrame is returned.
http://www.csidata.com/factsheets.php?type=stock&format=csv
:param db_url: String of the url root for the CSI Data website
:param data_type: String of the data to download
:param exchange_id: None or integer of the specific exchange to download
:param data_format: String of the type of file that should be returned.
Default as a CSV
:return:
"""
url_string = db_url + 'type=' + data_type + '&format=' + data_format
if exchange_id:
url_string += '&exchangeid=' + exchange_id
download_try = 0
def download_data(url, download_try):
""" Downloads the data from the url provided.
:param url: String that contains the url of the data to download.
:param download_try: Integer of the number of attempts to download data.
:return: A CSV file as a url object
"""
download_try += 1
try:
# Download the data
return urlopen(url)
except HTTPError as e:
if 'http error 403' in str(e).lower():
# HTTP Error 403: Forbidden
raise OSError('HTTPError %s: Reached API call limit. Make the '
'RateLimit more restrictive.' % (e.reason,))
elif 'http error 404' in str(e).lower():
# HTTP Error 404: Not Found
raise OSError('HTTPError %s: %s not found' %
(e.reason, data_type))
elif 'http error 429' in str(e).lower():
# HTTP Error 429: Too Many Requests
if download_try <= 5:
print('HTTPError %s: Exceeded API limit. Make the '
'RateLimit more restrictive. Program will sleep for '
'11 minutes and will try again...' % (e.reason,))
time.sleep(11 * 60)
download_data(url, download_try)
else:
raise OSError('HTTPError %s: Exceeded API limit. After '
'trying 5 time, the download was still not '
'successful. You could have hit the per day '
'call limit.' % (e.reason,))
elif 'http error 500' in str(e).lower():
# HTTP Error 500: Internal Server Error
if download_try <= 10:
print('HTTPError %s: Internal Server Error' % (e.reason,))
elif 'http error 502' in str(e).lower():
# HTTP Error 502: Bad Gateway
if download_try <= 10:
print('HTTPError %s: Encountered a bad gateway with the '
'server. Maybe the network is down. Will sleep for '
'5 minutes'
% (e.reason,))
time.sleep(5 * 60)
download_data(url, download_try)
else:
raise OSError('HTTPError %s: Server is currently '
'unavailable. After trying 10 times, the '
'download was still not successful. Quitting '
'for now.' % (e.reason,))
elif 'http error 503' in str(e).lower():
# HTTP Error 503: Service Unavailable
# Received this HTTP Error after 2000 queries. Browser showed
# captch message upon loading url.
if download_try <= 10:
print('HTTPError %s: Server is currently unavailable. '
'Maybe the network is down or the server is blocking '
'you. Will sleep for 5 minutes...' % (e.reason,))
time.sleep(5 * 60)
download_data(url, download_try)
else:
raise OSError('HTTPError %s: Server is currently '
'unavailable. After trying 10 time, the '
'download was still not successful. '
'Quitting for now.' % (e.reason,))
elif 'http error 504' in str(e).lower():
# HTTP Error 504: GATEWAY_TIMEOUT
if download_try <= 10:
print('HTTPError %s: Server connection timed out. Maybe '
'the network is down. Will sleep for 5 minutes'
% (e.reason,))
time.sleep(5 * 60)
download_data(url, download_try)
else:
raise OSError('HTTPError %s: Server is currently '
'unavailable. After trying 10 time, the '
'download was still not successful. Quitting '
'for now.' % (e.reason,))
else:
print('Base URL used: %s' % (url,))
raise OSError('%s - Unknown error when downloading %s'
% (e, data_type))
except URLError as e:
if download_try <= 10:
print('Warning: Experienced URL Error %s. Program will '
'sleep for 5 minutes and will then try again...' %
(e.reason,))
time.sleep(5 * 60)
download_data(url, download_try)
else:
raise URLError('Warning: Still experiencing URL Error %s. '
'After trying 10 times, the error remains. '
'Quitting for now, but you can try again later.'
% (e.reason,))
except Exception as e:
print(e)
raise OSError('Warning: Encountered an unknown error when '
'downloading %s in download_data in download.py' %
(data_type,))
def datetime_to_iso(row, column):
"""
Change the default date format of "YYYY-MM-DD" to an ISO 8601 format
"""
raw_date = row[column]
try:
raw_date_obj = datetime.strptime(raw_date, '%Y-%m-%d').isoformat()
except TypeError: # Occurs if there is no date provided ("nan")
raw_date_obj = None
return raw_date_obj
csv_file = download_data(url_string, download_try)
try:
df = pd.read_csv(csv_file, encoding='latin_1', low_memory=False)
# Rename column headers to a standardized format
df.rename(columns={'CsiNumber': 'csi_number', 'Symbol': 'symbol',
'Name': 'name', 'Exchange': 'exchange',
'IsActive': 'is_active', 'StartDate': 'start_date',
'EndDate': 'end_date',
'ConversionFactor': 'conversion_factor',
'SwitchCfDate': 'switch_cf_date',
'PreSwitchCf': 'pre_switch_cf',
'SubExchange': 'sub_exchange'},
inplace=True)
# Rearrange the columns so sub_exchange is right after exchange
df = df[['csi_number', 'symbol', 'name', 'exchange', 'sub_exchange',
'is_active', 'start_date', 'end_date', 'conversion_factor',
'switch_cf_date', 'pre_switch_cf']]
if data_type == 'stock':
df['start_date'] = df.apply(datetime_to_iso, axis=1,
args=('start_date',))
df['end_date'] = df.apply(datetime_to_iso, axis=1,
args=('end_date',))
df['switch_cf_date'] = df.apply(datetime_to_iso, axis=1,
args=('switch_cf_date',))
except Exception as e:
print('Error occurred when processing CSI %s data in '
'download_csidata_factsheet' % data_type)
print(e)
return pd.DataFrame()
df.insert(len(df.columns), 'created_date', datetime.now().isoformat())
df.insert(len(df.columns), 'updated_date', datetime.now().isoformat())
return df
def download_nasdaq_industry_sector(db_url, exchange_list):
""" Download the CSV file from nasdaq.com that includes all company sector
and industry values for the specified exchange. Only NASDAQ, NYSE and AMEX
exchanges are available from NASDAQ's website.
http://www.nasdaq.com/screening/companies-by-industry.aspx?exchange=NASDAQ
&render=download
:param db_url: String of the url root
:param exchange_list: List of the exchanges to download; valid exchanges
include NASDAQ, NYSE and AMEX
:return: DataFrame of the industry and sector values for each tsid
"""
def download_data(url, download_try=0):
""" Downloads the data from the url provided.
:param url: String that contains the url of the data to download.
:param download_try: Integer of the number of attempts to download data.
:return: A CSV file as a url object
"""
download_try += 1
try:
# Download the data
return urlopen(url)
except HTTPError as e:
if 'http error 403' in str(e).lower():
# HTTP Error 403: Forbidden
raise OSError('HTTPError %s: Reached API call limit. Make the '
'RateLimit more restrictive.' % (e.reason,))
elif 'http error 404' in str(e).lower():
# HTTP Error 404: Not Found
raise OSError('HTTPError %s: Not found' % (e.reason,))
elif 'http error 429' in str(e).lower():
# HTTP Error 429: Too Many Requests
if download_try <= 5:
print('HTTPError %s: Exceeded API limit. Make the '
'RateLimit more restrictive. Program will sleep for '
'11 minutes and will try again...' % (e.reason,))
time.sleep(11 * 60)
download_data(url, download_try)
else:
raise OSError('HTTPError %s: Exceeded API limit. After '
'trying 5 time, the download was still not '
'successful. You could have hit the per day '
'call limit.' % (e.reason,))
elif 'http error 500' in str(e).lower():
# HTTP Error 500: Internal Server Error
if download_try <= 10:
print('HTTPError %s: Internal Server Error' % (e.reason,))
elif 'http error 502' in str(e).lower():
# HTTP Error 502: Bad Gateway
if download_try <= 10:
print('HTTPError %s: Encountered a bad gateway with the '
'server. Maybe the network is down. Will sleep for '
'5 minutes'
% (e.reason,))
time.sleep(5 * 60)
download_data(url, download_try)
else:
raise OSError('HTTPError %s: Server is currently '
'unavailable. After trying 10 times, the '
'download was still not successful. Quitting '
'for now.' % (e.reason,))
elif 'http error 503' in str(e).lower():
# HTTP Error 503: Service Unavailable
# Received this HTTP Error after 2000 queries. Browser showed
# captch message upon loading url.
if download_try <= 10:
print('HTTPError %s: Server is currently unavailable. '
'Maybe the network is down or the server is blocking '
'you. Will sleep for 5 minutes...' % (e.reason,))
time.sleep(5 * 60)
download_data(url, download_try)
else:
raise OSError('HTTPError %s: Server is currently '
'unavailable. After trying 10 time, the '
'download was still not successful. '
'Quitting for now.' % (e.reason,))
elif 'http error 504' in str(e).lower():
# HTTP Error 504: GATEWAY_TIMEOUT
if download_try <= 10:
print('HTTPError %s: Server connection timed out. Maybe '
'the network is down. Will sleep for 5 minutes'
% (e.reason,))
time.sleep(5 * 60)
download_data(url, download_try)
else:
raise OSError('HTTPError %s: Server is currently '
'unavailable. After trying 10 time, the '
'download was still not successful. Quitting '
'for now.' % (e.reason,))
else:
print('Base URL used: %s' % url)
raise OSError('%s - Unknown error when downloading data' % e)
except URLError as e:
if download_try <= 10:
print('Warning: Experienced URL Error %s. Program will '
'sleep for 5 minutes and will then try again...' %
(e.reason,))
time.sleep(5 * 60)
download_data(url, download_try)
else:
raise URLError('Warning: Still experiencing URL Error %s. '
'After trying 10 times, the error remains. '
'Quitting for now, but you can try again later.'
% (e.reason,))
except Exception as e:
print(e)
raise OSError('Warning: Encountered an unknown error when '
'downloading data in download_data in download.py')
exchanges_df = pd.DataFrame(columns=['symbol', 'exchange', 'sector',
'industry'])
for exchange in exchange_list:
url_string = db_url + 'exchange=' + exchange + '&render=download'
csv_file = download_data(url=url_string)
try:
# df = pd.read_csv(csv_file, encoding='utf-8', low_memory=False)
df = pd.read_csv(csv_file, encoding='utf-8')
# Only keep the symbol, sector and industry columns
df = df[['Symbol', 'Sector', 'Industry']]
# Rename column headers to a standardized format
df.rename(columns={'Symbol': 'symbol', 'Sector': 'sector',
'Industry': 'industry'}, inplace=True)
# Replace n/a values with Numpy NaN
df.replace(to_replace='n/a', value=np.nan, inplace=True)
# Change any numpy nan values to None
df = df.where((pd.notnull(df)), None)
# Drop all rows where the sector and industry are None
df.dropna(how='all', subset=['sector', 'industry'], inplace=True)
# Add the exchange to the second column; it'll be used to convert
# the symbol to a tsid
df.insert(1, 'exchange', exchange)
exchanges_df = exchanges_df.append(df, ignore_index=True)
except Exception as e:
print('Error occurred when processing the %s exchange sector and '
'industry data in download_nasdaq_sector_industry' % exchange)
print(e)
return exchanges_df
if __name__ == '__main__':
output_dir = 'C:/Users/Josh/Desktop/'
url_root = 'http://www.csidata.com/factsheets.php?'
csi_data_type = 'commodity' # commodity, stock
csi_exchange_id = '113' # 113, 89
df1 = download_csidata_factsheet(url_root, csi_data_type, csi_exchange_id)
print(df1.head(10))
| agpl-3.0 |
trafferty/utils | python/parseSSLog_allCalls.py | 1 | 11541 | #!/usr/bin/env python
import sys
import time
import re
import argparse
import json
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import datetime as dt
import collections
# local:
from colors import *
#PrintColors = (BRIGHT_WHITE, WHITE)
PrintColors = (WHITE, WHITE)
def parseSSLog(ss_log, output_path, pdf_path, generic=False):
'''
import re
p = re.compile(ur'../../15\ (?P<start_ts>[0-9:.]*): \(XaarCmdAPI \) \[DEBUG\] Calling (?P<func_name>[a-zA-Z]*).*?\n../../15\ (?P<end_ts>[0-9:.]*): \(XaarCmdAPI \) \[DEBUG\] [Call(s)]* success!', re.DOTALL)
test_str = u"09/29/15 20:28:13.769: (XaarCmdAPI ) [DEBUG] Call success!\n09/29/15 20:28:13.770: (CommandProcessor) [DEBUG] Reply ID: 23 msg size: 68\n09/29/15 20:28:13.771: (XaarCmdAPI ) [DEBUG] Calling bXaarScorpionGetFPGAVersion...\n09/29/15 20:28:13.773: (XaarCmdAPI ) [DEBUG] Call success!\n09/29/15 20:28:13.775: (CommandProcessor) [DEBUG] Reply ID: 24 msg size: 98\n09/29/15 20:28:13.776: (XaarCmdAPI ) [DEBUG] Calling bXaarScorpionSetupEncoderDirectionSEPD: Card=1, SEPD=2, Value=0\n09/29/15 20:28:13.777: (XaarCmdAPI ) [DEBUG] Call success!\n09/29/15 20:28:13.778: (CommandProcessor) [DEBUG] Reply ID: 25 msg size: 81\n09/29/15 20:28:13.779: (XaarCmdAPI ) [DEBUG] Calling bXaarScorpionSetEncoderDivide...\n09/29/15 20:28:13.783: (XaarCmdAPI ) [DEBUG] Call success!\n09/29/15 20:28:13.784: (CommandProcessor) [DEBUG] Reply ID: 26 msg size: 97\n09/29/15 20:28:13.785: (XaarCmdAPI ) [DEBUG] Calling bXaarScorpionEncoderPulseMultiplySEPD: Card=1, SEPD=2, Value=9\n09/29/15 20:28:13.789: (XaarCmdAPI ) [DEBUG] Call success!\n09/29/15 20:28:13.790: (CommandProcessor) [DEBUG] Reply ID: 27 msg size: 92\n09/29/15 20:28:13.791: (XaarCmdAPI ) [DEBUG] Calling bXaarScorpionPDNoReverseSEPD: Card=1, SEPD=2, Value=0\n09/29/15 20:28:13.792: (XaarCmdAPI ) [DEBUG] Call success!\n09/29/15 20:28:13.793: (CommandProcessor) [DEBUG] Reply ID: 28 msg size: 45\n09/29/15 20:28:13.794: (XaarCmdAPI ) [DEBUG] Calling XaarScorpionGetXUSBCount...\n09/29/15 20:28:13.795: (CommandProcessor) [DEBUG] Reply ID: 29 msg size: 98\n09/29/15 20:28:13.796: (XaarCmdAPI ) [DEBUG] Calling bXaarScorpionHeadPowerControl...\n09/29/15 20:28:13.809: (XaarCmdAPI ) [DEBUG] Call success!\n09/29/15 20:28:13.810: (CommandProcessor) [DEBUG] Reply ID: 30 msg size: 62\n09/29/15 20:28:13.811: (XaarCmdAPI ) [DEBUG] Calling bXaarScorpionGetUsbOk...\n09/29/15 20:28:13.812: (XaarCmdAPI ) [DEBUG] Call success!\n09/29/15 20:28:13.813: (CommandProcessor) [DEBUG] Reply ID: 31 msg size: 98\n09/29/15 20:28:13.814: (XaarCmdAPI ) [DEBUG] Calling bXaarScorpionHeadPowerControl...\n09/29/15 20:28:13.828: (XaarCmdAPI ) [DEBUG] Call success!\n09/29/15 20:28:13.829: (CommandProcessor) [DEBUG] Reply ID: 32 msg size: 62\n09/29/15 20:28:13.830: (XaarCmdAPI ) [DEBUG] Calling bXaarScorpionGetUsbOk...\n09/29/15 20:28:13.831: (XaarCmdAPI ) [DEBUG] Call success!\n09/29/15 20:28:13.833: (CommandProcessor) [DEBUG] Reply ID: 33 msg size: 133\n09/29/15 20:28:13.834: (XaarCmdAPI ) [DEBUG] Calling bXaarScorpionGetHeadType. Cards: 12, Heads: 8\n09/29/15 20:28:13.834: (XaarCmdAPI ) [DEBUG] Call(s) success!\n09/29/15 20:28:13.838: (CommandProcessor) [DEBUG] Reply ID: 34 msg size: 74\n09/29/15 20:28:13.839: (XaarCmdAPI ) [DEBUG] Calling bXaarScorpionGetHeadType...\n09/29/15 20:28:13.840: (CommandProcessor) [DEBUG] Reply ID: 35 msg size: 76\n09/29/15 20:28:13.841: (XaarCmdAPI ) [DEBUG] Calling bXaarScorpionGetHeadSerial...\n09/29/15 20:28:13.857: (XaarCmdAPI ) [DEBUG] Call success!\n09/29/15 20:28:13.859: (CommandProcessor) [DEBUG] Reply ID: 36 msg size: 164\n"
re.findall(p, test_str)
'''
SSCalls_pattern=ur'../../15\ (?P<start_ts>[0-9:.]*): \(XaarCmdAPI \) \[DEBUG\] Calling (?P<func_name>[a-zA-Z]*).*?\n../../15\ (?P<end_ts>[0-9:.]*): \(XaarCmdAPI \) \[DEBUG\] [Call(s)]* success'
f = open(ss_log, 'r')
buf = f.read()
f.close()
print "File (%s) opened and read into buffer, length of buf: %d" % (ss_log, len(buf))
SSCalls_sets = [x.groupdict() for x in re.finditer(SSCalls_pattern, buf, re.DOTALL)]
print "Parsing log for SSCalls calls...found %d records." % (len(SSCalls_sets))
if len(SSCalls_sets) > 0: print " >> Date range: %s - %s" % (SSCalls_sets[0]['start_ts'], SSCalls_sets[-1]['start_ts'])
timestamp_format = "%H:%M:%S.%f"
processing_times_SSCalls = []
for SSCalls_set in SSCalls_sets:
'''
[{u'end_ts': u'20:28:13.773',u'func_name': u'bXaarScorpionGetFPGAVersion',u'start_ts': u'20:28:13.771'},
{u'end_ts': u'20:28:13.777',u'func_name': u'bXaarScorpionSetupEncoderDirectionSEPD',u'start_ts': u'20:28:13.776'},
...
{u'end_ts': u'20:28:13.857',u'func_name': u'bXaarScorpionGetHeadType', u'start_ts': u'20:28:13.839'}]
'''
start_ts = dt.datetime.strptime(SSCalls_set['start_ts'], timestamp_format)
func_name= SSCalls_set['func_name']
end_ts = dt.datetime.strptime(SSCalls_set['end_ts'], timestamp_format)
time_delta = end_ts-start_ts
delta_ms = float(time_delta.total_seconds() * 1000.0)
if delta_ms <= 0:
delta_ms = 0.5
processing_times_SSCalls.append( (func_name, delta_ms) )
if len(processing_times_SSCalls) == 0:
return
func_deltas = {}
for processing_time in processing_times_SSCalls:
func_name = processing_time[0]
delta = processing_time[1]
if func_name not in func_deltas:
func_deltas[func_name] = []
func_deltas[func_name].append(delta)
func_deltas_ordered = collections.OrderedDict(sorted(func_deltas.items()))
func_metrics = {}
for k in func_deltas_ordered.keys():
deltas_np = np.array(func_deltas_ordered[k])
func_metrics[k] = []
func_metrics[k].append( len(deltas_np) )
func_metrics[k].append( deltas_np.max() )
func_metrics[k].append( deltas_np.min() )
func_metrics[k].append( deltas_np.mean() )
func_metrics[k].append( deltas_np.std() )
func_metrics_ordered = collections.OrderedDict(sorted(func_metrics.items()))
print "%s---------------------------------------------------------------------------------------------------------------%s" % (BRIGHT_WHITE, RESET)
print "%s Scorpion Sample Max Min Mean Stdev %s" % (BRIGHT_WHITE, RESET)
print "%s Func name Count (ms) (ms) (ms) (ms) %s" % (BRIGHT_WHITE, RESET)
print "%s---------------------------------------------------------------------------------------------------------------%s" % (BRIGHT_WHITE, RESET)
idx = 0
for k in func_metrics_ordered.keys():
print("%s%-57s %9d %7.1f %7.1f %7.1f %7.1f %s" %
(PrintColors[idx%2], k, func_metrics_ordered[k][0], func_metrics_ordered[k][1], func_metrics_ordered[k][2],
func_metrics_ordered[k][3], func_metrics_ordered[k][4], RESET))
idx+=1
print("\nNow creating histograms for each function, see %s" % pdf_path)
with PdfPages(pdf_path) as pdf:
d = pdf.infodict()
d['Title'] = u'ScorpionDLL v6.2 FunctionTiming'
d['Author'] = u'Tom H. Rafferty / CNT'
d['Subject'] = u'ScorpionDLL v6.2 FunctionTiming'
d['CreationDate'] = dt.datetime.today()
d['ModDate'] = dt.datetime.today()
bins = 100
for k in func_deltas_ordered.keys():
deltas_np = np.array(func_deltas_ordered[k])
max = deltas_np.max()
min = deltas_np.min()
if max > bins:
binL = min
binH = max
else:
binL = 0
binH = bins
#fig = plt.figure(figsize=(10*2,5))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('Histogram: %s' % k)
ax.set_ylabel('Bin Count')
ax.set_xlabel('Call Time (ms)')
ax.text(0.75, 0.75, "Call Stats:",
horizontalalignment='left',verticalalignment='bottom',transform=ax.transAxes)
ax.text(0.75, 0.70, ("n: %d" % (len(deltas_np))),
horizontalalignment='left',verticalalignment='bottom',transform=ax.transAxes)
ax.text(0.75, 0.65, ("min: %.2f" % (deltas_np.min())),
horizontalalignment='left',verticalalignment='bottom',transform=ax.transAxes)
ax.text(0.75, 0.60, ("max: %.2f" % (deltas_np.max())),
horizontalalignment='left',verticalalignment='bottom',transform=ax.transAxes)
ax.text(0.75, 0.55, ("mean: %.2f" % (np.round(deltas_np.mean()))),
horizontalalignment='left',verticalalignment='bottom',transform=ax.transAxes)
ax.text(0.75, 0.50, ("stdev: %.2f" % (deltas_np.std())),
horizontalalignment='left',verticalalignment='bottom',transform=ax.transAxes)
ax.hist(deltas_np, bins, [binL, binH])
# ax.axvline(np.round(deltas_np.mean()), linewidth=2, linestyle='--', color='g')
# ax.text(np.round(deltas_np.mean()), 0, "Mean",
# horizontalalignment='center',verticalalignment='top')
# ax.axvline(np.round(deltas_np.max()), linewidth=2, linestyle='--', color='r')
# ax.text(np.round(deltas_np.max()), 0, "Max",
# horizontalalignment='center',verticalalignment='top')
# ax.axvline(np.round(deltas_np.min()), linewidth=2, linestyle='--', color='c')
# ax.text(np.round(deltas_np.min()), 0, "Min",
# horizontalalignment='center',verticalalignment='top')
# #hist,bins = np.histogram(deltas_np, 50, [0,50])
# plt.hist(deltas_np,bins,[0,bins])
# plt.ylim([ 0, max + 2 ])
# plt.text(bins*.7, max*.8, ("min=%.f" % (np.round(deltas_np.min()))), fontsize=14)
# plt.text(bins*.7, max*.8, ("max=%.f" % (np.round(deltas_np.max()))), fontsize=14)
# plt.text(bins*.7, max*.6, ("mean=%.f" % (np.round(deltas_np.mean()))), fontsize=14)
# plt.text(bins*.7, max*.5, ("stdev=%.f" % (np.round(deltas_np.std()))), fontsize=14)
# #plt.bar(hist)
# #plt.xlim([ bins[0], bins[-1] ])
# plt.title('Histogram: %s' % k)
pdf.savefig() # saves the current figure into a pdf page
plt.close()
if __name__ == "__main__":
'''
parseSSLog.py -i file_to_parse
'''
parser = argparse.ArgumentParser(description='open process log file, parse it according to parse function')
parser.add_argument('-i', '--in_file', dest='in_file', type=str,
help='input file...if not specified then use stdin')
parser.add_argument('-o', '--output_path', dest='output_path', type=str,
help='output path...if not specified then will use /tmp', default='/tmp')
parser.add_argument('-p', '--pdf_path', dest='pdf_path', type=str,
help='pdf path', default='/home/trafferty/ScorpionDLL_FuncCallTiming_Hists.pdf')
args = parser.parse_args()
if args.in_file:
parseSSLog(args.in_file, args.output_path, args.pdf_path)
else:
parser.print_help()
sys.exit(1)
| gpl-2.0 |
mhdella/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
poryfly/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 232 | 4761 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
toastedcornflakes/scikit-learn | examples/cluster/plot_color_quantization.py | 61 | 3444 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1])
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
neuroidss/nupic.research | projects/l2_pooling/multi_column_synapse_sampling.py | 4 | 17170 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This evaluates the effect of synapse sampling on the feedforward and lateral
connections of L2. Specifically, how low can we go with L2 activation threshold,
number of distal synapses and number of proximal synapses while still get
reliable performance.
We consider the problem of multi-column convergence.
"""
import random
import os
import pprint
import numpy as np
import pandas as pd
import cPickle
from multiprocessing import Pool
import matplotlib.pyplot as plt
plt.ion()
from htmresearch.frameworks.layers.l2_l4_inference import L4L2Experiment
from htmresearch.frameworks.layers.object_machine_factory import (
createObjectMachine
)
def getL4Params():
"""
Returns a good default set of parameters to use in the L4 region.
"""
return {
"columnCount": 2048,
"cellsPerColumn": 8,
"learn": True,
"learnOnOneCell": False,
"initialPermanence": 0.51,
"connectedPermanence": 0.6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.02,
"minThreshold": 10,
"basalPredictedSegmentDecrement": 0.002,
"activationThreshold": 13,
"sampleSize": 20,
"implementation": "ApicalTiebreakCPP",
"seed": 41
}
def getL2Params():
"""
Returns a good default set of parameters to use in the L4 region.
"""
return {
"inputWidth": 2048 * 8,
"cellCount": 4096,
"sdrSize": 40,
"synPermProximalInc": 0.1,
"synPermProximalDec": 0.001,
"initialProximalPermanence": 0.6,
"minThresholdProximal": 1,
"sampleSizeProximal": 20,
"connectedPermanenceProximal": 0.5,
"synPermDistalInc": 0.1,
"synPermDistalDec": 0.001,
"initialDistalPermanence": 0.41,
"activationThresholdDistal": 13,
"sampleSizeDistal": 20,
"connectedPermanenceDistal": 0.5,
"seed": 41,
"learningMode": True,
}
def runExperiment(args):
"""
Run experiment. What did you think this does?
args is a dict representing the parameters. We do it this way to support
multiprocessing. args contains one or more of the following keys:
@param noiseLevel (float) Noise level to add to the locations and features
during inference. Default: None
@param profile (bool) If True, the network will be profiled after
learning and inference. Default: False
@param numObjects (int) The number of objects we will train.
Default: 10
@param numPoints (int) The number of points on each object.
Default: 10
@param numLocations (int) For each point, the number of locations to choose
from. Default: 10
@param numFeatures (int) For each point, the number of features to choose
from. Default: 10
@param numColumns (int) The total number of cortical columns in network.
Default: 2
The method returns the args dict updated with two additional keys:
convergencePoint (int) The average number of iterations it took
to converge across all objects
objects (pairs) The list of objects we trained on
"""
numObjects = args.get("numObjects", 10)
numLocations = args.get("numLocations", 10)
numFeatures = args.get("numFeatures", 10)
numColumns = args.get("numColumns", 2)
profile = args.get("profile", False)
noiseLevel = args.get("noiseLevel", None) # TODO: implement this?
numPoints = args.get("numPoints", 10)
trialNum = args.get("trialNum", 42)
l2Params = args.get("l2Params", getL2Params())
l4Params = args.get("l4Params", getL4Params())
objectSeed = args.get("objectSeed", 41)
# Create the objects
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=1024,
externalInputSize=1024,
numCorticalColumns=numColumns,
seed=objectSeed,
)
objects.createRandomObjects(numObjects, numPoints=numPoints,
numLocations=numLocations,
numFeatures=numFeatures)
# print "Objects are:"
# for o in objects:
# pairs = objects[o]
# pairs.sort()
# print str(o) + ": " + str(pairs)
# Setup experiment and train the network
name = "convergence_O%03d_L%03d_F%03d_C%03d_T%03d" % (
numObjects, numLocations, numFeatures, numColumns, trialNum
)
exp = L4L2Experiment(
name,
L2Overrides=l2Params,
L4Overrides=l4Params,
numCorticalColumns=numColumns,
seed=trialNum
)
exp.learnObjects(objects.provideObjectsToLearn())
L2TimeLearn = 0
L2TimeInfer = 0
if profile:
# exp.printProfile(reset=True)
L2TimeLearn = getProfileInfo(exp)
args.update({"L2TimeLearn": L2TimeLearn})
exp.resetProfile()
# For inference, we will check and plot convergence for each object. For each
# object, we create a sequence of random sensations for each column. We will
# present each sensation for 3 time steps to let it settle and ensure it
# converges.
for objectId in objects:
obj = objects[objectId]
# Create sequence of sensations for this object for all columns
objectSensations = {}
for c in range(numColumns):
objectCopy = [pair for pair in obj]
random.shuffle(objectCopy)
# stay multiple steps on each sensation
sensations = []
for pair in objectCopy:
for _ in xrange(2):
sensations.append(pair)
objectSensations[c] = sensations
inferConfig = {
"object": objectId,
"numSteps": len(objectSensations[0]),
"pairs": objectSensations
}
exp.infer(objects.provideObjectToInfer(inferConfig), objectName=objectId)
if profile:
L2TimeInfer += getProfileInfo(exp)
exp.resetProfile()
# exp.printProfile(reset=True)
if profile:
L2TimeInfer /= len(objects)
args.update({"L2TimeInfer": L2TimeInfer})
convergencePoint, _ = exp.averageConvergencePoint("L2 Representation", 40, 40)
print "objectSeed {} # distal syn {} # proximal syn {}, " \
"# convergence point={:4.2f} train time {:4.3f} infer time {:4.3f}".format(
objectSeed,
l2Params["sampleSizeDistal"],
l2Params["sampleSizeProximal"],
convergencePoint, L2TimeLearn, L2TimeInfer)
# Return our convergence point as well as all the parameters and objects
args.update({"objects": objects.getObjects()})
args.update({"convergencePoint": convergencePoint})
# prepare experiment results
numLateralConnections = []
numProximalConnections = []
for l2Columns in exp.L2Columns:
numLateralConnections.append(
l2Columns._pooler.numberOfDistalSynapses())
numProximalConnections.append(
np.sum(l2Columns._pooler.numberOfProximalSynapses()))
result = {
'trial': objectSeed,
'L2TimeLearn': args['L2TimeLearn'],
'L2TimeInfer': args['L2TimeInfer'],
'sampleSizeProximal': l2Params["sampleSizeProximal"],
'sampleSizeDistal': l2Params["sampleSizeDistal"],
'numLateralConnections': np.mean(np.array(numLateralConnections)),
'numProximalConnections': np.mean(np.array(numProximalConnections)),
'convergencePoint': args['convergencePoint']}
return result
def getProfileInfo(exp):
"""
Prints profiling information.
Parameters:
----------------------------
@param reset (bool)
If set to True, the profiling will be reset.
"""
totalTime = 0.000001
for region in exp.network.regions.values():
timer = region.getComputeTimer()
totalTime += timer.getElapsed()
# Sort the region names
regionNames = list(exp.network.regions.keys())
regionNames.sort()
count = 1
profileInfo = []
L2Time = 0.0
L4Time = 0.0
for regionName in regionNames:
region = exp.network.regions[regionName]
timer = region.getComputeTimer()
count = max(timer.getStartCount(), count)
profileInfo.append([region.name,
timer.getStartCount(),
timer.getElapsed(),
100.0 * timer.getElapsed() / totalTime,
timer.getElapsed() / max(timer.getStartCount(), 1)])
if "L2Column" in regionName:
L2Time += timer.getElapsed()
elif "L4Column" in regionName:
L4Time += timer.getElapsed()
return L2Time
def experimentVaryingSynapseSampling(expParams,
sampleSizeDistalList,
sampleSizeProximalList):
"""
Test multi-column convergence with varying amount of proximal/distal sampling
:return:
"""
numRpts = 20
df = None
args = []
for sampleSizeProximal in sampleSizeProximalList:
for sampleSizeDistal in sampleSizeDistalList:
for rpt in range(numRpts):
l4Params = getL4Params()
l2Params = getL2Params()
l2Params["sampleSizeProximal"] = sampleSizeProximal
l2Params["minThresholdProximal"] = sampleSizeProximal
l2Params["sampleSizeDistal"] = sampleSizeDistal
l2Params["activationThresholdDistal"] = sampleSizeDistal
args.append(
{
"numObjects": expParams['numObjects'],
"numLocations": expParams['numLocations'],
"numFeatures": expParams['numFeatures'],
"numColumns": expParams['numColumns'],
"trialNum": rpt,
"l4Params": l4Params,
"l2Params": l2Params,
"profile": True,
"objectSeed": rpt,
}
)
pool = Pool(processes=expParams['numWorkers'])
result = pool.map(runExperiment, args)
#
# if df is None:
# df = pd.DataFrame.from_dict(result, orient='index')
# else:
# df = pd.concat([df, pd.DataFrame.from_dict(result, orient='index')], axis=1)
#
# df = df.transpose()
return result
def convertResultsToDataFrames(results):
df = None
for result in results:
if df is None:
df = pd.DataFrame.from_dict(result, orient='index')
else:
df = pd.concat([df, pd.DataFrame.from_dict(result, orient='index')], axis=1)
df = df.transpose()
return df
def experimentVaryingDistalSynapseNumber(expParams):
sampleSizeDistalList = [2, 3, 4, 5, 6, 8, 10, 15, 20]
sampleSizeProximalList = [5]
result = experimentVaryingSynapseSampling(expParams,
sampleSizeDistalList,
sampleSizeProximalList)
resultsName = './results/multi_column_distal_sampling_' \
'numFeature_{}_numColumn_{}'.format(expParams['numFeatures'],
expParams['numColumns'])
with open(resultsName,"wb") as f:
cPickle.dump(result, f)
return result
def experimentVaryingProximalSynapseNumber(expParams):
"""
Fix distal synapse sampling, varying proximal synapse sampling
:param expParams:
:return:
"""
sampleSizeDistalList = [5]
sampleSizeProximalList = [1, 2, 3, 4, 5, 6, 8, 10, 15]
result = experimentVaryingSynapseSampling(expParams,
sampleSizeDistalList,
sampleSizeProximalList)
resultsName = './results/multi_column_proximal_sampling_' \
'numFeature_{}_numColumn_{}'.format(expParams['numFeatures'],
expParams['numColumns'])
with open(resultsName,"wb") as f:
cPickle.dump(result, f)
def plotDistalSynSamplingResult():
fig, ax = plt.subplots(2, 2)
legends =[]
for numColumns in [3, 5, 7]:
resultsName = './results/multi_column_distal_sampling_' \
'numFeature_{}_numColumn_{}'.format(expParams['numFeatures'],
numColumns)
with open(resultsName, "rb") as f:
results = cPickle.load(f)
df = convertResultsToDataFrames(results)
l2LearnTimeList = []
l2InferTimeList = []
convergencePointList =[]
numLateralConnectionsList = []
sampleSizeDistalList = np.sort(np.unique(df['sampleSizeDistal']))
sampleSizeProximalList = np.sort(np.unique(df['sampleSizeProximal']))
for sampleSizeDistal in sampleSizeDistalList:
idx = np.where(np.logical_and(
df['sampleSizeDistal'] == sampleSizeDistal,
df['sampleSizeProximal'] == sampleSizeProximalList[0]))[0]
l2LearnTimeList.append(np.mean(df['L2TimeLearn'].iloc[idx]))
l2InferTimeList.append(np.mean(df['L2TimeInfer'].iloc[idx]))
convergencePointList.append(np.mean(df['convergencePoint'].iloc[idx]))
numLateralConnectionsList.append(np.mean(df['numLateralConnections'].iloc[idx]))
ax[0, 0].plot(sampleSizeDistalList, convergencePointList, '-o',
label='numColumn_{}'.format(numColumns))
ax[0, 0].set_ylabel('# pts to converge')
ax[0, 0].set_xlabel('Distal sample size')
ax[0, 1].plot(sampleSizeDistalList, numLateralConnectionsList, '-o')
ax[0, 1].set_ylabel('# lateral connections / column')
ax[0, 1].set_xlabel('Distal sample size')
ax[1, 0].plot(sampleSizeDistalList, l2LearnTimeList, '-o')
ax[1, 0].set_ylabel('L2 training time (s)')
ax[1, 0].set_xlabel('Distal sample size')
ax[1, 1].plot(sampleSizeDistalList, l2InferTimeList, '-o')
ax[1, 1].set_ylabel('L2 infer time (s)')
ax[1, 1].set_xlabel('Distal sample size')
legends.append('{}-column'.format(numColumns))
plt.tight_layout()
ax[0, 0].set_title('distal synapse sampling')
plt.legend(legends)
plt.savefig('plots/L2PoolingDistalSynapseSampling.pdf')
def plotProximalSynSamplingResult():
fig, ax = plt.subplots(2, 2)
legends =[]
for numColumns in [3, 5, 7]:
resultsName = './results/multi_column_proximal_sampling_' \
'numFeature_{}_numColumn_{}'.format(expParams['numFeatures'],
numColumns)
with open(resultsName, "rb") as f:
results = cPickle.load(f)
df = convertResultsToDataFrames(results)
l2LearnTimeList = []
l2InferTimeList = []
convergencePointList =[]
numLateralConnectionsList = []
numProximalConnectionsList = []
sampleSizeDistalList = np.sort(np.unique(df['sampleSizeDistal']))
sampleSizeProximalList = np.sort(np.unique(df['sampleSizeProximal']))
for sampleSizeProximal in sampleSizeProximalList:
idx = np.where(np.logical_and(
df['sampleSizeDistal'] == sampleSizeDistalList[0],
df['sampleSizeProximal'] == sampleSizeProximal))[0]
l2LearnTimeList.append(np.mean(df['L2TimeLearn'].iloc[idx]))
l2InferTimeList.append(np.mean(df['L2TimeInfer'].iloc[idx]))
convergencePointList.append(np.mean(df['convergencePoint'].iloc[idx]))
numProximalConnectionsList.append(np.mean(df['numProximalConnections'].iloc[idx]))
ax[0, 0].plot(sampleSizeProximalList, convergencePointList, '-o',
label='numColumn_{}'.format(numColumns))
ax[0, 0].set_ylabel('# pts to converge')
ax[0, 0].set_xlabel('Proximal sample size')
ax[0, 1].plot(sampleSizeProximalList, numProximalConnectionsList, '-o')
ax[0, 1].set_ylabel('# proximal connections / column')
ax[0, 1].set_xlabel('Proximal sample size')
ax[1, 0].plot(sampleSizeProximalList, l2LearnTimeList, '-o')
ax[1, 0].set_ylabel('L2 training time (s)')
ax[1, 0].set_xlabel('Proximal sample size')
ax[1, 1].plot(sampleSizeProximalList, l2InferTimeList, '-o')
ax[1, 1].set_ylabel('L2 infer time (s)')
ax[1, 1].set_xlabel('Proximal sample size')
legends.append('{}-column'.format(numColumns))
plt.tight_layout()
ax[0, 0].set_title('proximal synapse sampling')
plt.legend(legends)
plt.savefig('plots/L2PoolingProximalSynapseSampling.pdf')
if __name__ == "__main__":
expParams = {
"numObjects": 10,
"numLocations": 10,
"numFeatures": 3,
"numColumns": 3,
'numWorkers': 6,
}
for numColumns in [3, 5, 7]:
expParams['numColumns'] = numColumns
# Fixed number of proximal synapses, varying distal synapse sampling
result = experimentVaryingDistalSynapseNumber(expParams)
# Fixed number of distal synapses, varying distal synapse sampling
result = experimentVaryingProximalSynapseNumber(expParams)
plotDistalSynSamplingResult()
plotProximalSynSamplingResult()
| agpl-3.0 |
akrherz/idep | scripts/gridorder/flowpath_yearly.py | 2 | 2401 | import glob
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import sys
GRIDORDER = sys.argv[1]
def summarize():
res = []
for fn in glob.glob("dfs/*.csv"):
df = pd.read_csv(fn)
sdf = df.groupby("flowpath").sum() / 10.0
adf = df[["flowpath", "length"]].groupby("flowpath").mean()
for fp, row in sdf.iterrows():
length = adf.at[fp, "length"]
res.append(
dict(
flowpath=fp,
length=length,
avg_det=(row["av_det"] * 4.463),
runoff=row["runoff"],
delivery=(row["delivery"] * 4.463),
)
)
df = pd.DataFrame(res)
df.to_csv("flowpaths%s.csv" % (GRIDORDER,))
def plot1(df):
(fig, ax) = plt.subplots(1, 1)
plt.hist2d(df["length"], df["avg_det"], bins=[160, 320], norm=LogNorm())
plt.colorbar(label="Flowpaths")
ax.set_ylabel("Soil Detachment [T/a per year]")
ax.set_xlabel("Flowpath Length [m]")
ax.set_xlim(0, 400)
ax.set_ylim(0, 50)
df["ilength"] = (df["length"] / 5.0).astype("i")
gdf = df.groupby("ilength").mean()
ax.plot(
gdf.index.values * 5.0,
gdf["avg_det"].values,
lw=2,
color="k",
label="Avg",
zorder=5,
)
ax.plot(
gdf.index.values * 5.0,
gdf["avg_det"].values,
lw=4,
color="w",
zorder=4,
)
ax.grid(True)
ax.set_title("Iowa DEP:: Yearly Avg Detachment by Flowpath Length")
ax.legend()
fig.savefig("test.png")
def main():
df = pd.read_csv("flowpaths%s.csv" % (GRIDORDER,))
x = []
y = []
y2 = []
for i in np.arange(0, 50, 0.5):
x.append(i)
y.append(df[df["length"] >= i]["delivery"].mean())
y2.append(df[df["length"] >= i]["avg_det"].mean())
(fig, ax) = plt.subplots(1, 1)
ax.plot(x, y, label="Delivery")
ax.plot(x, y2, label="Detachment")
ax.set_xlabel(
"Flowpath Length Floor [m], (average computed for len >= floor)"
)
ax.set_title("Iowa DEP: Yearly Averages by Truncated Flowpath Length")
ax.legend(loc="best")
ax.grid(True)
ax.set_ylabel("Soil Delivery or Detachment [T/a per year]")
fig.savefig("test.png")
if __name__ == "__main__":
summarize()
# main()
| mit |
starbuck10/CS109a_DataScience_UserRatings_Team_Project | Final Milestone/MovieLens/common.py | 1 | 3083 | import math
import time
from contextlib import contextmanager
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
@contextmanager
def elapsed_time(title):
start = time.time()
yield
elapsed = time.time() - start
print '%s: %.2f secs' % (title, elapsed)
def get_xy(ratings_df):
y = ratings_df['rating']
x = ratings_df.drop('rating', axis=1)
return x, y
def root_mean_squared_error(y, y_pred):
return math.sqrt(mean_squared_error(y, y_pred))
def show_scores_plot(k_neighbors_values, val_scores, train_scores, model_name):
_, ax = plt.subplots(1, 1, figsize=(15, 10))
ax.plot(k_neighbors_values, val_scores, label='validation')
ax.plot(k_neighbors_values, train_scores, label='train')
ax.set_xlabel('k_neighbors')
ax.set_ylabel('$R^2$')
ax.set_title('Test and validation scores for different k_neighbors values (%s)' % model_name)
ax.legend(loc='best')
plt.tight_layout()
plt.show()
def score_model(ratings_df, model_f, model_name):
train_val_ratings_df, test_ratings_df = train_test_split(ratings_df)
train_ratings_df, validation_ratings_df = train_test_split(train_val_ratings_df)
best_score = -float('inf')
best_k_neighbors = None
model = model_f()
model = model.fit(train_ratings_df)
k_neighbors_values = [1, 5, 10, 20, 30, 40, 50, 60, 80, 100]
val_scores = []
train_scores = []
for k_neighbors in k_neighbors_values:
model.set_k_neighbors(k_neighbors=k_neighbors)
x_train, y_train = get_xy(train_ratings_df)
x_val, y_val = get_xy(validation_ratings_df)
y_train_pred = model.predict(x_train)
y_val_pred = model.predict(x_val)
train_score = r2_score(y_train, y_train_pred)
val_score = r2_score(y_val, y_val_pred)
if val_score > best_score:
best_score = val_score
best_k_neighbors = k_neighbors
val_scores.append(val_score)
train_scores.append(train_score)
print 'k: %d, validation score: %.5f, train score: %.5f\n' % (k_neighbors, val_score, train_score)
print 'best k: %d, best score: %.5f' % (best_k_neighbors, best_score)
model = model_f(k_neighbors=best_k_neighbors)
model = model.fit(train_val_ratings_df)
x_train_val, y_train_val = get_xy(train_val_ratings_df)
x_test, y_test = get_xy(test_ratings_df)
y_train_val_pred = model.predict(x_train_val)
y_test_pred = model.predict(x_test)
train_val_score = r2_score(y_train_val, y_train_val_pred)
test_score = r2_score(y_test, y_test_pred)
train_val_rmse = root_mean_squared_error(y_train_val, y_train_val_pred)
test_rmse = root_mean_squared_error(y_test, y_test_pred)
print 'train score: %.4f, test score: %.4f' % (train_val_score, test_score)
print 'train rmse: %.4f, test rmse: %.4f' % (train_val_rmse, test_rmse)
show_scores_plot(k_neighbors_values, val_scores, train_scores, model_name=model_name)
| mit |
hainm/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
lenovor/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
jbornschein/mca-genmodel | examples/barstest-mmca.py | 1 | 2623 | #!/usr/bin/env python
#
# Author: Jorg Bornschein <[email protected])
# Lincense: Academic Free License (AFL) v3.0
#
from __future__ import division
import sys
sys.path.insert(0, '..')
import numpy as np
from mpi4py import MPI
from matplotlib import use
from pulp.utils.parallel import pprint
from pulp.utils.datalog import dlog, StoreToH5, TextPrinter
from pulp.visualize.gui import GUI, RFViewer, YTPlotter
from pulp.em import EM
from pulp.em.annealing import LinearAnnealing
from pulp.em.camodels.mmca_et import MMCA_ET
# Parameters
N = 1000
D2 = 5
D = D2**2
H = 2*D2
Hprime = 6
gamma = 3
Tsteps = 50
Tstart = 1.1
Tend = 1.1
#============================================================================
# Main
if __name__ == "__main__":
comm = MPI.COMM_WORLD
pprint("="*78)
pprint(" Running %d parallel processes" % comm.size)
pprint("="*78)
#Configure DataLogger
#use('GTKAgg')
dlog.start_gui(GUI)
#dlog.set_handler('freeEnergy', YTPlotter)
dlog.set_handler(('T', 'Qmean', 'pi', 'sigma', 'Wmin', 'Wmean', 'Wmax'), TextPrinter)
dlog.set_handler('W', RFViewer, rf_shape=(D2, D2), symmetric=1, global_maximum=1)
dlog.set_handler('y', RFViewer, rf_shape=(D2, D2))
dlog.set_handler(['pi'], YTPlotter)
dlog.set_handler(['sigma'], YTPlotter)
# Choose annealing schedule
anneal = LinearAnnealing(Tsteps)
anneal['T'] = [(10, Tstart) , (-20, Tend)]
anneal['Ncut_factor'] = [(0, 0.), (2./3, 1.2)]
anneal['W_noise'] = [(-10, 0.01), (-1, 0.01)]
# Prepare ground-truth GFs (bars)
W_gt = np.zeros( (H, D2, D2) )
for i in xrange(D2):
W_gt[ i, i, :] = -10.
W_gt[D2+i, :, i] = +10.
W_gt = W_gt.reshape( (H, D) )
W_gt += np.random.normal(size=(H, D), scale=0.5)
# Prepare model...
model = MMCA_ET(D, H, Hprime, gamma)
gt_params = {
'W' : W_gt,
'pi' : 2./H,
'sigma' : 0.10
}
# Generate trainig data
my_N = N // comm.size
my_data = model.generate_data(gt_params, my_N)
dlog.append('y', my_data['y'][0:25,:])
# Initialize model parameters (to be learned)
params = {
# 'W' : W_gt,
'W' : np.random.normal(size=W_gt.shape),
'pi' : 1/H,
'sigma' : 5.00
}
#params = model.noisify_params(params, anneal)
params = comm.bcast(params)
# Create and start EM annealing
em = EM(model=model, anneal=anneal)
em.data = my_data
em.lparams = params
em.run()
dlog.close(True)
pprint("Done")
#print(em.lparams['W'])
| agpl-3.0 |
alvarofierroclavero/scikit-learn | sklearn/svm/classes.py | 13 | 40017 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
UNR-AERIAL/scikit-learn | benchmarks/bench_multilabel_metrics.py | 276 | 7138 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
mmottahedi/neuralnilm_prototype | scripts/e495.py | 2 | 6880 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation)
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.disaggregate import disaggregate
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
447: first attempt at disaggregation
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 1000
N_SEQ_PER_BATCH = 64
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
window=("2013-03-18", None),
train_buildings=[1, 2, 3, 4, 5],
validation_buildings=[1, 2, 3, 4, 5],
n_seq_per_batch=N_SEQ_PER_BATCH,
standardise_input=True,
standardise_targets=True,
independently_center_inputs=True,
subsample_target=8,
ignore_incomplete=True,
allow_incomplete=True,
include_all=True,
skip_probability=0.25,
offset_probability=1
# ignore_offset_activations=True
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-2,
learning_rate_changes_by_iteration={
1000: 1e-3,
5000: 1e-4
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=32)
)
def exp_a(name, target_appliance, seq_length):
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
target_appliance=target_appliance,
logger=logging.getLogger(name),
seq_length=seq_length
))
source = SameLocation(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
NUM_FILTERS = 4
target_seq_length = seq_length // source.subsample_target
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'label': 'dense0',
'type': DenseLayer,
'num_units': (seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'label': 'dense2',
'type': DenseLayer,
'num_units': 128,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': (target_seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'type': ReshapeLayer,
'shape': (N_SEQ_PER_BATCH, target_seq_length - 3, NUM_FILTERS)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def main():
APPLIANCES = [
('a', ['fridge freezer', 'fridge'], 512),
('b', "'coffee maker'", 512),
('c', "'dish washer'", 2000),
('d', "'hair dryer'", 256),
('e', "'kettle'", 256),
('f', "'oven'", 2000),
('g', "'toaster'", 256),
('h', "'light'", 2000),
('i', ['washer dryer', 'washing machine'], 1504)
]
for experiment, appliance, seq_length in APPLIANCES:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, 'a', full_exp_name)
func_call = func_call[:-1] + ", {}, {})".format(appliance, seq_length)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=5000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source
del net
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e495.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| mit |
fabianp/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
VariationalResearch/Polaron | firstwf.py | 1 | 1856 | from polrabi.wf1 import *
import matplotlib
import matplotlib.pyplot as plt
from scipy.integrate import trapz, simps, cumtrapz
# # Initialization
matplotlib.rcParams.update({'font.size': 12, 'text.usetex': True})
gBB = 0.05
mI = 10
mB = 1
n0 = 1
# # Rabi oscillation
# Omega = 0.1
# w_rot = 0
# aIBi_up = 30
# aIBi_down = 100
# a0 = np.array([[0.0], [1.0]], dtype=complex)
# tVals = np.linspace(0, 100, 1e3)
# a = np.zeros((2, tVals.size), dtype=complex)
# for idt, t in enumerate(tVals):
# temp = wfcoeff(t, a0, aIBi_up, aIBi_down, Omega, w_rot, gBB, mI, mB, n0)
# a[[0, 1], idt] = temp.reshape((2,))
# figR, axR = plt.subplots()
# axR.plot(tVals, abs(a[0, :])**2, 'k-')
# # ax.set_ylim([-50, 50])
# axR.set_xlabel('Time ($t$)')
# axR.set_ylabel(r'$P_{\uparrow}$')
# axR.set_title(r'Probability of being in $\uparrow$ state')
# # plt.show()
# # Atom transfer peak
Omega = 0.1
ts = np.pi / (2 * Omega)
aIBi_up = 100
aIBi_down = 100
a0 = np.array([[0.0], [1.0]], dtype=complex)
wVals = np.linspace(-15, 15, 1e3)
a = np.zeros((2, wVals.size), dtype=complex)
for idw, w in enumerate(wVals):
temp = wfcoeff(ts, a0, aIBi_up, aIBi_down, Omega, w, gBB, mI, mB, n0)
a[[0, 1], idw] = temp.reshape((2,))
figA, axA = plt.subplots()
axA.plot(wVals, np.abs(a[0, :])**2, 'k-')
# ax.set_ylim([-50, 50])
axA.set_xlabel(r'Pumping frequency ($\omega$)')
axA.set_ylabel(r'$P_{\uparrow}$')
axA.set_title(r'Probability of being in $\uparrow$ state')
p_up = np.abs(a[0, :])**2
p_down = np.abs(a[1, :])**2
mask = p_up > 0.1
print(trapz(p_up, x=wVals))
# print(p_up[mask])
# c = cumtrapz(p_up, x=wVals)
# print(c[mask[0:-1]])
# H = Hspin(aIBi_up, aIBi_down, Omega, w_rot, gBB, mI, mB, n0)
gam = 0.1
w21 = 0
def sakurai(w, t):
return gam**2 / (gam**2 + (w - w21)**2 / 4) * np.sin(np.sqrt(gam**2 + (w - w21)**2 / 4) * t)**2
plt.show()
| mit |
blaze/distributed | distributed/tests/test_client.py | 1 | 165917 | import asyncio
from collections import deque
from contextlib import suppress
from functools import partial
import gc
import logging
from operator import add
import os
import pickle
import psutil
import random
import subprocess
import sys
import threading
from threading import Semaphore
from time import sleep
import traceback
import warnings
import weakref
import zipfile
import pytest
from tlz import identity, isdistinct, concat, pluck, valmap, first, merge
import dask
from dask import delayed
from dask.optimization import SubgraphCallable
import dask.bag as db
from distributed import (
Worker,
Nanny,
fire_and_forget,
LocalCluster,
get_client,
secede,
get_worker,
Executor,
profile,
performance_report,
TimeoutError,
CancelledError,
)
from distributed.comm import CommClosedError
from distributed.client import (
Client,
Future,
wait,
as_completed,
tokenize,
_get_global_client,
default_client,
futures_of,
temp_default_client,
)
from distributed.compatibility import WINDOWS
from distributed.metrics import time
from distributed.scheduler import Scheduler, KilledWorker
from distributed.sizeof import sizeof
from distributed.utils import (
mp_context,
sync,
tmp_text,
tokey,
tmpfile,
is_valid_xml,
)
from distributed.utils_test import (
cluster,
slowinc,
slowadd,
slowdec,
randominc,
inc,
dec,
div,
throws,
geninc,
asyncinc,
gen_cluster,
gen_test,
double,
popen,
captured_logger,
varying,
map_varying,
wait_for,
async_wait_for,
pristine_loop,
save_sys_modules,
)
from distributed.utils_test import ( # noqa: F401
client as c,
client_secondary as c2,
cleanup,
cluster_fixture,
loop,
loop_in_thread,
nodebug,
s,
a,
b,
)
@gen_cluster(client=True, timeout=None)
async def test_submit(c, s, a, b):
x = c.submit(inc, 10)
assert not x.done()
assert isinstance(x, Future)
assert x.client is c
result = await x
assert result == 11
assert x.done()
y = c.submit(inc, 20)
z = c.submit(add, x, y)
result = await z
assert result == 11 + 21
s.validate_state()
@gen_cluster(client=True)
async def test_map(c, s, a, b):
L1 = c.map(inc, range(5))
assert len(L1) == 5
assert isdistinct(x.key for x in L1)
assert all(isinstance(x, Future) for x in L1)
result = await L1[0]
assert result == inc(0)
assert len(s.tasks) == 5
L2 = c.map(inc, L1)
result = await L2[1]
assert result == inc(inc(1))
assert len(s.tasks) == 10
# assert L1[0].key in s.tasks[L2[0].key]
total = c.submit(sum, L2)
result = await total
assert result == sum(map(inc, map(inc, range(5))))
L3 = c.map(add, L1, L2)
result = await L3[1]
assert result == inc(1) + inc(inc(1))
L4 = c.map(add, range(3), range(4))
results = await c.gather(L4)
assert results == list(map(add, range(3), range(4)))
def f(x, y=10):
return x + y
L5 = c.map(f, range(5), y=5)
results = await c.gather(L5)
assert results == list(range(5, 10))
y = c.submit(f, 10)
L6 = c.map(f, range(5), y=y)
results = await c.gather(L6)
assert results == list(range(20, 25))
s.validate_state()
@gen_cluster(client=True)
async def test_map_empty(c, s, a, b):
L1 = c.map(inc, [], pure=False)
assert len(L1) == 0
results = await c.gather(L1)
assert results == []
@gen_cluster(client=True)
async def test_map_keynames(c, s, a, b):
futures = c.map(inc, range(4), key="INC")
assert all(f.key.startswith("INC") for f in futures)
assert isdistinct(f.key for f in futures)
futures2 = c.map(inc, [5, 6, 7, 8], key="INC")
assert [f.key for f in futures] != [f.key for f in futures2]
keys = ["inc-1", "inc-2", "inc-3", "inc-4"]
futures = c.map(inc, range(4), key=keys)
assert [f.key for f in futures] == keys
@gen_cluster(client=True)
async def test_map_retries(c, s, a, b):
args = [
[ZeroDivisionError("one"), 2, 3],
[4, 5, 6],
[ZeroDivisionError("seven"), ZeroDivisionError("eight"), 9],
]
x, y, z = c.map(*map_varying(args), retries=2)
assert await x == 2
assert await y == 4
assert await z == 9
x, y, z = c.map(*map_varying(args), retries=1, pure=False)
assert await x == 2
assert await y == 4
with pytest.raises(ZeroDivisionError, match="eight"):
await z
x, y, z = c.map(*map_varying(args), retries=0, pure=False)
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 4
with pytest.raises(ZeroDivisionError, match="seven"):
await z
@gen_cluster(client=True)
async def test_map_batch_size(c, s, a, b):
result = c.map(inc, range(100), batch_size=10)
result = await c.gather(result)
assert result == list(range(1, 101))
result = c.map(add, range(100), range(100), batch_size=10)
result = await c.gather(result)
assert result == list(range(0, 200, 2))
# mismatch shape
result = c.map(add, range(100, 200), range(10), batch_size=2)
result = await c.gather(result)
assert result == list(range(100, 120, 2))
@gen_cluster(client=True)
async def test_compute_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check for varying() use
x = c.compute(delayed(varying(args))())
with pytest.raises(ZeroDivisionError, match="one"):
await x
# Same retries for all
x = c.compute(delayed(varying(args))(), retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
await x
x = c.compute(delayed(varying(args))(), retries=2)
assert await x == 3
args.append(4)
x = c.compute(delayed(varying(args))(), retries=2)
assert await x == 3
# Per-future retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
x, y = [delayed(varying(args))() for args in (xargs, yargs)]
x, y = c.compute([x, y], retries={x: 2})
gc.collect()
assert await x == 30
with pytest.raises(ZeroDivisionError, match="five"):
await y
x, y, z = [delayed(varying(args))() for args in (xargs, yargs, zargs)]
x, y, z = c.compute([x, y, z], retries={(y, z): 2})
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 70
assert await z == 80
def test_retries_get(c):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
assert x.compute(retries=5) == 3
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
with pytest.raises(ZeroDivisionError):
x.compute()
@gen_cluster(client=True)
async def test_compute_persisted_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check
x = c.persist(delayed(varying(args))())
fut = c.compute(x)
with pytest.raises(ZeroDivisionError, match="one"):
await fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
await fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=2)
assert await fut == 3
args.append(4)
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=3)
assert await fut == 3
@gen_cluster(client=True)
async def test_persist_retries(c, s, a, b):
# Same retries for all
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = c.persist(delayed(varying(args))(), retries=1)
x = c.compute(x)
with pytest.raises(ZeroDivisionError, match="two"):
await x
x = c.persist(delayed(varying(args))(), retries=2)
x = c.compute(x)
assert await x == 3
# Per-key retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
x, y, z = [delayed(varying(args))() for args in (xargs, yargs, zargs)]
x, y, z = c.persist([x, y, z], retries={(y, z): 2})
x, y, z = c.compute([x, y, z])
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 70
assert await z == 80
@gen_cluster(client=True)
async def test_retries_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = await future
assert y == 100
@gen_cluster(client=True)
async def test_future_repr(c, s, a, b):
pd = pytest.importorskip("pandas")
x = c.submit(inc, 10)
y = c.submit(pd.DataFrame, {"x": [1, 2, 3]})
await x
await y
for func in [repr, lambda x: x._repr_html_()]:
assert str(x.key) in func(x)
assert str(x.status) in func(x)
assert str(x.status) in repr(c.futures[x.key])
assert "int" in func(x)
assert "pandas" in func(y)
assert "DataFrame" in func(y)
@gen_cluster(client=True)
async def test_future_tuple_repr(c, s, a, b):
da = pytest.importorskip("dask.array")
y = da.arange(10, chunks=(5,)).persist()
f = futures_of(y)[0]
for func in [repr, lambda x: x._repr_html_()]:
for k in f.key:
assert str(k) in func(f)
@gen_cluster(client=True)
async def test_Future_exception(c, s, a, b):
x = c.submit(div, 1, 0)
result = await x.exception()
assert isinstance(result, ZeroDivisionError)
x = c.submit(div, 1, 1)
result = await x.exception()
assert result is None
def test_Future_exception_sync(c):
x = c.submit(div, 1, 0)
assert isinstance(x.exception(), ZeroDivisionError)
x = c.submit(div, 1, 1)
assert x.exception() is None
@gen_cluster(client=True)
async def test_Future_release(c, s, a, b):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
await x
x.release()
await asyncio.sleep(0)
assert not c.futures
x = c.submit(slowinc, 1, delay=0.5)
x.release()
await asyncio.sleep(0)
assert not c.futures
x = c.submit(div, 1, 0)
await x.exception()
x.release()
await asyncio.sleep(0)
assert not c.futures
def test_Future_release_sync(c):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
x.result()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(slowinc, 1, delay=0.8)
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(div, 1, 0)
x.exception()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
def test_short_tracebacks(loop, c):
tblib = pytest.importorskip("tblib")
future = c.submit(div, 1, 0)
try:
future.result()
except Exception:
_, _, tb = sys.exc_info()
tb = tblib.Traceback(tb).to_dict()
n = 0
while tb is not None:
n += 1
tb = tb["tb_next"]
assert n < 5
@gen_cluster(client=True)
async def test_map_naming(c, s, a, b):
L1 = c.map(inc, range(5))
L2 = c.map(inc, range(5))
assert [x.key for x in L1] == [x.key for x in L2]
L3 = c.map(inc, [1, 1, 1, 1])
assert len({x._state for x in L3}) == 1
L4 = c.map(inc, [1, 1, 1, 1], pure=False)
assert len({x._state for x in L4}) == 4
@gen_cluster(client=True)
async def test_submit_naming(c, s, a, b):
a = c.submit(inc, 1)
b = c.submit(inc, 1)
assert a._state is b._state
c = c.submit(inc, 1, pure=False)
assert c.key != a.key
@gen_cluster(client=True)
async def test_exceptions(c, s, a, b):
x = c.submit(div, 1, 2)
result = await x
assert result == 1 / 2
x = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
await x
x = c.submit(div, 10, 2) # continues to operate
result = await x
assert result == 10 / 2
@gen_cluster()
async def test_gc(s, a, b):
c = await Client(s.address, asynchronous=True)
x = c.submit(inc, 10)
await x
assert s.tasks[x.key].who_has
x.__del__()
await async_wait_for(
lambda: x.key not in s.tasks or not s.tasks[x.key].who_has, timeout=0.3
)
await c.close()
def test_thread(c):
x = c.submit(inc, 1)
assert x.result() == 2
x = c.submit(slowinc, 1, delay=0.3)
with pytest.raises(TimeoutError):
x.result(timeout=0.01)
assert x.result() == 2
def test_sync_exceptions(c):
x = c.submit(div, 10, 2)
assert x.result() == 5
y = c.submit(div, 10, 0)
try:
y.result()
assert False
except ZeroDivisionError:
pass
z = c.submit(div, 10, 5)
assert z.result() == 2
@gen_cluster(client=True)
async def test_gather(c, s, a, b):
x = c.submit(inc, 10)
y = c.submit(inc, x)
result = await c.gather(x)
assert result == 11
result = await c.gather([x])
assert result == [11]
result = await c.gather({"x": x, "y": [y]})
assert result == {"x": 11, "y": [12]}
@gen_cluster(client=True)
async def test_gather_lost(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
await a.close()
with pytest.raises(Exception):
await c.gather([x, y])
def test_gather_sync(c):
x = c.submit(inc, 1)
assert c.gather(x) == 2
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
c.gather([x, y])
[xx] = c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True)
async def test_gather_strict(c, s, a, b):
x = c.submit(div, 2, 1)
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
await c.gather([x, y])
[xx] = await c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_gather_skip(c, s, a):
x = c.submit(div, 1, 0, priority=10)
y = c.submit(slowinc, 1, delay=0.5)
with captured_logger(logging.getLogger("distributed.scheduler")) as sched:
with captured_logger(logging.getLogger("distributed.client")) as client:
L = await c.gather([x, y], errors="skip")
assert L == [2]
assert not client.getvalue()
assert not sched.getvalue()
@gen_cluster(client=True)
async def test_limit_concurrent_gathering(c, s, a, b):
futures = c.map(inc, range(100))
await c.gather(futures)
assert len(a.outgoing_transfer_log) + len(b.outgoing_transfer_log) < 100
@gen_cluster(client=True, timeout=None)
async def test_get(c, s, a, b):
future = c.get({"x": (inc, 1)}, "x", sync=False)
assert isinstance(future, Future)
result = await future
assert result == 2
futures = c.get({"x": (inc, 1)}, ["x"], sync=False)
assert isinstance(futures[0], Future)
result = await c.gather(futures)
assert result == [2]
futures = c.get({}, [], sync=False)
result = await c.gather(futures)
assert result == []
result = await c.get(
{("x", 1): (inc, 1), ("x", 2): (inc, ("x", 1))}, ("x", 2), sync=False
)
assert result == 3
def test_get_sync(c):
assert c.get({"x": (inc, 1)}, "x") == 2
def test_no_future_references(c):
from weakref import WeakSet
ws = WeakSet()
futures = c.map(inc, range(10))
ws.update(futures)
del futures
import gc
gc.collect()
start = time()
while list(ws):
sleep(0.01)
assert time() < start + 2
def test_get_sync_optimize_graph_passes_through(c):
bag = db.range(10, npartitions=3).map(inc)
dask.compute(bag.sum(), optimize_graph=False)
@gen_cluster(client=True)
async def test_gather_errors(c, s, a, b):
def f(a, b):
raise TypeError
def g(a, b):
raise AttributeError
future_f = c.submit(f, 1, 2)
future_g = c.submit(g, 1, 2)
with pytest.raises(TypeError):
await c.gather(future_f)
with pytest.raises(AttributeError):
await c.gather(future_g)
await a.close()
@gen_cluster(client=True)
async def test_wait(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
z = c.submit(inc, 2)
done, not_done = await wait([x, y, z])
assert done == {x, y, z}
assert not_done == set()
assert x.status == y.status == "finished"
@gen_cluster(client=True)
async def test_wait_first_completed(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, 1)
z = c.submit(inc, 2)
done, not_done = await wait([x, y, z], return_when="FIRST_COMPLETED")
assert done == {z}
assert not_done == {x, y}
assert z.status == "finished"
assert x.status == "pending"
assert y.status == "pending"
@gen_cluster(client=True, timeout=2)
async def test_wait_timeout(c, s, a, b):
future = c.submit(sleep, 0.3)
with pytest.raises(TimeoutError):
await wait(future, timeout=0.01)
def test_wait_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
done, not_done = wait([x, y])
assert done == {x, y}
assert not_done == set()
assert x.status == y.status == "finished"
future = c.submit(sleep, 0.3)
with pytest.raises(TimeoutError):
wait(future, timeout=0.01)
def test_wait_informative_error_for_timeouts(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
try:
wait(x, y)
except Exception as e:
assert "timeout" in str(e)
assert "list" in str(e)
@gen_cluster(client=True)
async def test_garbage_collection(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
assert c.refcount[x.key] == 2
x.__del__()
await asyncio.sleep(0)
assert c.refcount[x.key] == 1
z = c.submit(inc, y)
y.__del__()
await asyncio.sleep(0)
result = await z
assert result == 3
ykey = y.key
y.__del__()
await asyncio.sleep(0)
assert ykey not in c.futures
@gen_cluster(client=True)
async def test_garbage_collection_with_scatter(c, s, a, b):
[future] = await c.scatter([1])
assert future.key in c.futures
assert future.status == "finished"
assert s.who_wants[future.key] == {c.id}
key = future.key
assert c.refcount[key] == 1
future.__del__()
await asyncio.sleep(0)
assert c.refcount[key] == 0
start = time()
while True:
if key not in s.tasks or not s.tasks[key].who_has:
break
else:
assert time() < start + 3
await asyncio.sleep(0.1)
@gen_cluster(timeout=1000, client=True)
async def test_recompute_released_key(c, s, a, b):
x = c.submit(inc, 100)
result1 = await x
xkey = x.key
del x
import gc
gc.collect()
await asyncio.sleep(0)
assert c.refcount[xkey] == 0
# 1 second batching needs a second action to trigger
while xkey in s.tasks and s.tasks[xkey].who_has or xkey in a.data or xkey in b.data:
await asyncio.sleep(0.1)
x = c.submit(inc, 100)
assert x.key in c.futures
result2 = await x
assert result1 == result2
@pytest.mark.slow
@gen_cluster(client=True)
async def test_long_tasks_dont_trigger_timeout(c, s, a, b):
from time import sleep
x = c.submit(sleep, 3)
await x
@pytest.mark.skip
@gen_cluster(client=True)
async def test_missing_data_heals(c, s, a, b):
a.validate = False
b.validate = False
x = c.submit(inc, 1)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([x, y, z])
# Secretly delete y's key
if y.key in a.data:
del a.data[y.key]
a.release_key(y.key)
if y.key in b.data:
del b.data[y.key]
b.release_key(y.key)
await asyncio.sleep(0)
w = c.submit(add, y, z)
result = await w
assert result == 3 + 4
@pytest.mark.skip
@gen_cluster(client=True)
async def test_gather_robust_to_missing_data(c, s, a, b):
a.validate = False
b.validate = False
x, y, z = c.map(inc, range(3))
await wait([x, y, z]) # everything computed
for f in [x, y]:
for w in [a, b]:
if f.key in w.data:
del w.data[f.key]
await asyncio.sleep(0)
w.release_key(f.key)
xx, yy, zz = await c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@pytest.mark.skip
@gen_cluster(client=True)
async def test_gather_robust_to_nested_missing_data(c, s, a, b):
a.validate = False
b.validate = False
w = c.submit(inc, 1)
x = c.submit(inc, w)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([z])
for worker in [a, b]:
for datum in [y, z]:
if datum.key in worker.data:
del worker.data[datum.key]
await asyncio.sleep(0)
worker.release_key(datum.key)
result = await c.gather([z])
assert result == [inc(inc(inc(inc(1))))]
@gen_cluster(client=True)
async def test_tokenize_on_futures(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
tok = tokenize(x)
assert tokenize(x) == tokenize(x)
assert tokenize(x) == tokenize(y)
c.futures[x.key].finish()
assert tok == tokenize(y)
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_submit(c, s, a, b):
x = c.submit(inc, 1, workers={a.ip})
y = c.submit(inc, x, workers={b.ip})
await wait([x, y])
assert s.host_restrictions[x.key] == {a.ip}
assert x.key in a.data
assert s.host_restrictions[y.key] == {b.ip}
assert y.key in b.data
@gen_cluster(client=True)
async def test_restrictions_ip_port(c, s, a, b):
x = c.submit(inc, 1, workers={a.address})
y = c.submit(inc, x, workers={b.address})
await wait([x, y])
assert s.worker_restrictions[x.key] == {a.address}
assert x.key in a.data
assert s.worker_restrictions[y.key] == {b.address}
assert y.key in b.data
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_map(c, s, a, b):
L = c.map(inc, range(5), workers={a.ip})
await wait(L)
assert set(a.data) == {x.key for x in L}
assert not b.data
for x in L:
assert s.host_restrictions[x.key] == {a.ip}
L = c.map(inc, [10, 11, 12], workers=[{a.ip}, {a.ip, b.ip}, {b.ip}])
await wait(L)
assert s.host_restrictions[L[0].key] == {a.ip}
assert s.host_restrictions[L[1].key] == {a.ip, b.ip}
assert s.host_restrictions[L[2].key] == {b.ip}
with pytest.raises(ValueError):
c.map(inc, [10, 11, 12], workers=[{a.ip}])
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_get(c, s, a, b):
dsk = {"x": 1, "y": (inc, "x"), "z": (inc, "y")}
restrictions = {"y": {a.ip}, "z": {b.ip}}
futures = c.get(dsk, ["y", "z"], restrictions, sync=False)
result = await c.gather(futures)
assert result == [2, 3]
assert "y" in a.data
assert "z" in b.data
@gen_cluster(client=True)
async def dont_test_bad_restrictions_raise_exception(c, s, a, b):
z = c.submit(inc, 2, workers={"bad-address"})
try:
await z
assert False
except ValueError as e:
assert "bad-address" in str(e)
assert z.key in str(e)
@gen_cluster(client=True, timeout=None)
async def test_remove_worker(c, s, a, b):
L = c.map(inc, range(20))
await wait(L)
await b.close()
assert b.address not in s.workers
result = await c.gather(L)
assert result == list(map(inc, range(20)))
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_errors_dont_block(c, s, w):
L = [c.submit(inc, 1), c.submit(throws, 1), c.submit(inc, 2), c.submit(throws, 2)]
start = time()
while not (L[0].status == L[2].status == "finished"):
assert time() < start + 5
await asyncio.sleep(0.01)
result = await c.gather([L[0], L[2]])
assert result == [2, 3]
@gen_cluster(client=True)
async def test_submit_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
x = c.submit(assert_list, [1, 2, 3])
result = await x
assert result
x = c.submit(assert_list, [1, 2, 3], z=[4, 5, 6])
result = await x
assert result
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(assert_list, [x, y])
result = await z
assert result
@gen_cluster(client=True)
async def test_map_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
L = c.map(assert_list, [[1, 2, 3], [4]])
result = await c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], z=[10])
result = await c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], [[]] * 3)
result = await c.gather(L)
assert all(result)
@gen_cluster()
async def test_two_consecutive_clients_share_results(s, a, b):
c = await Client(s.address, asynchronous=True)
x = c.submit(random.randint, 0, 1000, pure=True)
xx = await x
f = await Client(s.address, asynchronous=True)
y = f.submit(random.randint, 0, 1000, pure=True)
yy = await y
assert xx == yy
await c.close()
await f.close()
@gen_cluster(client=True)
async def test_submit_then_get_with_Future(c, s, a, b):
x = c.submit(slowinc, 1)
dsk = {"y": (inc, x)}
result = await c.get(dsk, "y", sync=False)
assert result == 3
@gen_cluster(client=True)
async def test_aliases(c, s, a, b):
x = c.submit(inc, 1)
dsk = {"y": x}
result = await c.get(dsk, "y", sync=False)
assert result == 2
@gen_cluster(client=True)
async def test_aliases_2(c, s, a, b):
dsk_keys = [
({"x": (inc, 1), "y": "x", "z": "x", "w": (add, "y", "z")}, ["y", "w"]),
({"x": "y", "y": 1}, ["x"]),
({"x": 1, "y": "x", "z": "y", "w": (inc, "z")}, ["w"]),
]
for dsk, keys in dsk_keys:
result = await c.gather(c.get(dsk, keys, sync=False))
assert list(result) == list(dask.get(dsk, keys))
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_scatter(c, s, a, b):
d = await c.scatter({"y": 20})
assert isinstance(d["y"], Future)
assert a.data.get("y") == 20 or b.data.get("y") == 20
y_who_has = s.get_who_has(keys=["y"])["y"]
assert a.address in y_who_has or b.address in y_who_has
assert s.get_nbytes(summary=False) == {"y": sizeof(20)}
yy = await c.gather([d["y"]])
assert yy == [20]
[x] = await c.scatter([10])
assert isinstance(x, Future)
assert a.data.get(x.key) == 10 or b.data.get(x.key) == 10
xx = await c.gather([x])
x_who_has = s.get_who_has(keys=[x.key])[x.key]
assert s.tasks[x.key].who_has
assert (
s.workers[a.address] in s.tasks[x.key].who_has
or s.workers[b.address] in s.tasks[x.key].who_has
)
assert s.get_nbytes(summary=False) == {"y": sizeof(20), x.key: sizeof(10)}
assert xx == [10]
z = c.submit(add, x, d["y"]) # submit works on Future
result = await z
assert result == 10 + 20
result = await c.gather([z, x])
assert result == [30, 10]
@gen_cluster(client=True)
async def test_scatter_types(c, s, a, b):
d = await c.scatter({"x": 1})
assert isinstance(d, dict)
assert list(d) == ["x"]
for seq in [[1], (1,), {1}, frozenset([1])]:
L = await c.scatter(seq)
assert isinstance(L, type(seq))
assert len(L) == 1
s.validate_state()
seq = await c.scatter(range(5))
assert isinstance(seq, list)
assert len(seq) == 5
s.validate_state()
@gen_cluster(client=True)
async def test_scatter_non_list(c, s, a, b):
x = await c.scatter(1)
assert isinstance(x, Future)
result = await x
assert result == 1
@gen_cluster(client=True)
async def test_scatter_hash(c, s, a, b):
[a] = await c.scatter([1])
[b] = await c.scatter([1])
assert a.key == b.key
s.validate_state()
@gen_cluster(client=True)
async def test_scatter_tokenize_local(c, s, a, b):
from dask.base import normalize_token
class MyObj:
pass
L = []
@normalize_token.register(MyObj)
def f(x):
L.append(x)
return "x"
obj = MyObj()
future = await c.scatter(obj)
assert L and L[0] is obj
@gen_cluster(client=True)
async def test_scatter_singletons(c, s, a, b):
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
for x in [1, np.ones(5), pd.DataFrame({"x": [1, 2, 3]})]:
future = await c.scatter(x)
result = await future
assert str(result) == str(x)
@gen_cluster(client=True)
async def test_scatter_typename(c, s, a, b):
future = await c.scatter(123)
assert future.key.startswith("int")
@gen_cluster(client=True)
async def test_scatter_hash(c, s, a, b):
x = await c.scatter(123)
y = await c.scatter(123)
assert x.key == y.key
z = await c.scatter(123, hash=False)
assert z.key != y.key
@gen_cluster(client=True)
async def test_get_releases_data(c, s, a, b):
await c.gather(c.get({"x": (inc, 1)}, ["x"], sync=False))
import gc
gc.collect()
start = time()
while c.refcount["x"]:
await asyncio.sleep(0.01)
assert time() < start + 2
def test_current(s, a, b):
with Client(s["address"]) as c:
assert Client.current() is c
with pytest.raises(ValueError):
Client.current()
with Client(s["address"]) as c:
assert Client.current() is c
def test_global_clients(loop):
assert _get_global_client() is None
with pytest.raises(ValueError):
default_client()
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert _get_global_client() is c
assert default_client() is c
with Client(s["address"], loop=loop) as f:
assert _get_global_client() is f
assert default_client() is f
assert default_client(c) is c
assert default_client(f) is f
assert _get_global_client() is None
@gen_cluster(client=True)
async def test_exception_on_exception(c, s, a, b):
x = c.submit(lambda: 1 / 0)
y = c.submit(inc, x)
with pytest.raises(ZeroDivisionError):
await y
z = c.submit(inc, y)
with pytest.raises(ZeroDivisionError):
await z
@gen_cluster(client=True)
async def test_get_nbytes(c, s, a, b):
[x] = await c.scatter([1])
assert s.get_nbytes(summary=False) == {x.key: sizeof(1)}
y = c.submit(inc, x)
await y
assert s.get_nbytes(summary=False) == {x.key: sizeof(1), y.key: sizeof(2)}
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_nbytes_determines_worker(c, s, a, b):
x = c.submit(identity, 1, workers=[a.ip])
y = c.submit(identity, tuple(range(100)), workers=[b.ip])
await c.gather([x, y])
z = c.submit(lambda x, y: None, x, y)
await z
assert s.tasks[z.key].who_has == {s.workers[b.address]}
@gen_cluster(client=True)
async def test_if_intermediates_clear_on_error(c, s, a, b):
x = delayed(div, pure=True)(1, 0)
y = delayed(div, pure=True)(1, 2)
z = delayed(add, pure=True)(x, y)
f = c.compute(z)
with pytest.raises(ZeroDivisionError):
await f
s.validate_state()
assert not any(ts.who_has for ts in s.tasks.values())
@gen_cluster(
client=True, config={"distributed.scheduler.default-task-durations": {"f": "1ms"}}
)
async def test_pragmatic_move_small_data_to_large_data(c, s, a, b):
np = pytest.importorskip("numpy")
lists = c.map(np.ones, [10000] * 10, pure=False)
sums = c.map(np.sum, lists)
total = c.submit(sum, sums)
def f(x, y):
return None
results = c.map(f, lists, [total] * 10)
await wait([total])
await wait(results)
assert (
sum(
s.tasks[r.key].who_has.issubset(s.tasks[l.key].who_has)
for l, r in zip(lists, results)
)
>= 9
)
@gen_cluster(client=True)
async def test_get_with_non_list_key(c, s, a, b):
dsk = {("x", 0): (inc, 1), 5: (inc, 2)}
x = await c.get(dsk, ("x", 0), sync=False)
y = await c.get(dsk, 5, sync=False)
assert x == 2
assert y == 3
@gen_cluster(client=True)
async def test_get_with_error(c, s, a, b):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
await c.get(dsk, "y", sync=False)
def test_get_with_error_sync(c):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
c.get(dsk, "y")
@gen_cluster(client=True)
async def test_directed_scatter(c, s, a, b):
await c.scatter([1, 2, 3], workers=[a.address])
assert len(a.data) == 3
assert not b.data
await c.scatter([4, 5], workers=[b.name])
assert len(b.data) == 2
def test_directed_scatter_sync(c, s, a, b, loop):
futures = c.scatter([1, 2, 3], workers=[b["address"]])
has_what = sync(loop, c.scheduler.has_what)
assert len(has_what[b["address"]]) == len(futures)
assert len(has_what[a["address"]]) == 0
@gen_cluster(client=True)
async def test_scatter_direct(c, s, a, b):
future = await c.scatter(123, direct=True)
assert future.key in a.data or future.key in b.data
assert s.tasks[future.key].who_has
assert future.status == "finished"
result = await future
assert result == 123
assert not s.counters["op"].components[0]["scatter"]
result = await future
assert not s.counters["op"].components[0]["gather"]
result = await c.gather(future)
assert not s.counters["op"].components[0]["gather"]
@gen_cluster(client=True)
async def test_scatter_direct_numpy(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.ones(5)
future = await c.scatter(x, direct=True)
result = await future
assert np.allclose(x, result)
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True)
async def test_scatter_direct_broadcast(c, s, a, b):
future2 = await c.scatter(456, direct=True, broadcast=True)
assert future2.key in a.data
assert future2.key in b.data
assert s.tasks[future2.key].who_has == {s.workers[a.address], s.workers[b.address]}
result = await future2
assert result == 456
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_scatter_direct_balanced(c, s, *workers):
futures = await c.scatter([1, 2, 3], direct=True)
assert sorted([len(w.data) for w in workers]) == [0, 1, 1, 1]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_scatter_direct_broadcast_target(c, s, *workers):
futures = await c.scatter([123, 456], direct=True, workers=workers[0].address)
assert futures[0].key in workers[0].data
assert futures[1].key in workers[0].data
futures = await c.scatter(
[123, 456],
direct=True,
broadcast=True,
workers=[w.address for w in workers[:3]],
)
assert (
f.key in w.data and w.address in s.tasks[f.key].who_has
for f in futures
for w in workers[:3]
)
@gen_cluster(client=True, nthreads=[])
async def test_scatter_direct_empty(c, s):
with pytest.raises((ValueError, TimeoutError)):
await c.scatter(123, direct=True, timeout=0.1)
@gen_cluster(client=True, timeout=None, nthreads=[("127.0.0.1", 1)] * 5)
async def test_scatter_direct_spread_evenly(c, s, *workers):
futures = []
for i in range(10):
future = await c.scatter(i, direct=True)
futures.append(future)
assert all(w.data for w in workers)
@pytest.mark.parametrize("direct", [True, False])
@pytest.mark.parametrize("broadcast", [True, False])
def test_scatter_gather_sync(c, direct, broadcast):
futures = c.scatter([1, 2, 3], direct=direct, broadcast=broadcast)
results = c.gather(futures, direct=direct)
assert results == [1, 2, 3]
delayed(inc)(1).compute(direct=direct)
@gen_cluster(client=True)
async def test_gather_direct(c, s, a, b):
futures = await c.scatter([1, 2, 3])
data = await c.gather(futures, direct=True)
assert data == [1, 2, 3]
@gen_cluster(client=True)
async def test_many_submits_spread_evenly(c, s, a, b):
L = [c.submit(inc, i) for i in range(10)]
await wait(L)
assert a.data and b.data
@gen_cluster(client=True)
async def test_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
tb = await x.traceback()
assert any("x / y" in line for line in pluck(3, traceback.extract_tb(tb)))
@gen_cluster(client=True)
async def test_get_traceback(c, s, a, b):
try:
await c.get({"x": (div, 1, 0)}, "x", sync=False)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
@gen_cluster(client=True)
async def test_gather_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
try:
await c.gather(x)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
def test_traceback_sync(c):
x = c.submit(div, 1, 0)
tb = x.traceback()
assert any(
"x / y" in line
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
y = c.submit(inc, x)
tb2 = y.traceback()
assert set(pluck(3, traceback.extract_tb(tb2))).issuperset(
set(pluck(3, traceback.extract_tb(tb)))
)
z = c.submit(div, 1, 2)
tb = z.traceback()
assert tb is None
@gen_cluster(client=True)
async def test_upload_file(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
for value in [123, 456]:
with tmp_text("myfile.py", "def f():\n return {}".format(value)) as fn:
await c.upload_file(fn)
x = c.submit(g, pure=False)
result = await x
assert result == value
@gen_cluster(client=True)
async def test_upload_file_no_extension(c, s, a, b):
with tmp_text("myfile", "") as fn:
await c.upload_file(fn)
@gen_cluster(client=True)
async def test_upload_file_zip(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
try:
for value in [123, 456]:
with tmp_text(
"myfile.py", "def f():\n return {}".format(value)
) as fn_my_file:
with zipfile.ZipFile("myfile.zip", "w") as z:
z.write(fn_my_file, arcname=os.path.basename(fn_my_file))
await c.upload_file("myfile.zip")
x = c.submit(g, pure=False)
result = await x
assert result == value
finally:
if os.path.exists("myfile.zip"):
os.remove("myfile.zip")
@gen_cluster(client=True)
async def test_upload_file_egg(c, s, a, b):
def g():
import package_1, package_2
return package_1.a, package_2.b
# c.upload_file tells each worker to
# - put this file in their local_directory
# - modify their sys.path to include it
# we don't care about the local_directory
# but we do care about restoring the path
with save_sys_modules():
for value in [123, 456]:
with tmpfile() as dirname:
os.mkdir(dirname)
with open(os.path.join(dirname, "setup.py"), "w") as f:
f.write("from setuptools import setup, find_packages\n")
f.write(
'setup(name="my_package", packages=find_packages(), version="{}")\n'.format(
value
)
)
# test a package with an underscore in the name
package_1 = os.path.join(dirname, "package_1")
os.mkdir(package_1)
with open(os.path.join(package_1, "__init__.py"), "w") as f:
f.write("a = {}\n".format(value))
# test multiple top-level packages
package_2 = os.path.join(dirname, "package_2")
os.mkdir(package_2)
with open(os.path.join(package_2, "__init__.py"), "w") as f:
f.write("b = {}\n".format(value))
# compile these into an egg
subprocess.check_call(
[sys.executable, "setup.py", "bdist_egg"], cwd=dirname
)
egg_root = os.path.join(dirname, "dist")
# first file ending with '.egg'
egg_name = [
fname for fname in os.listdir(egg_root) if fname.endswith(".egg")
][0]
egg_path = os.path.join(egg_root, egg_name)
await c.upload_file(egg_path)
os.remove(egg_path)
x = c.submit(g, pure=False)
result = await x
assert result == (value, value)
@gen_cluster(client=True)
async def test_upload_large_file(c, s, a, b):
assert a.local_directory
assert b.local_directory
with tmp_text("myfile", "abc") as fn:
with tmp_text("myfile2", "def") as fn2:
await c._upload_large_file(fn, remote_filename="x")
await c._upload_large_file(fn2)
for w in [a, b]:
assert os.path.exists(os.path.join(w.local_directory, "x"))
assert os.path.exists(os.path.join(w.local_directory, "myfile2"))
with open(os.path.join(w.local_directory, "x")) as f:
assert f.read() == "abc"
with open(os.path.join(w.local_directory, "myfile2")) as f:
assert f.read() == "def"
def test_upload_file_sync(c):
def g():
import myfile
return myfile.x
with tmp_text("myfile.py", "x = 123") as fn:
c.upload_file(fn)
x = c.submit(g)
assert x.result() == 123
@gen_cluster(client=True)
async def test_upload_file_exception(c, s, a, b):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
await c.upload_file(fn)
def test_upload_file_exception_sync(c):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
c.upload_file(fn)
@pytest.mark.skip
@gen_cluster()
async def test_multiple_clients(s, a, b):
a = await Client(s.address, asynchronous=True)
b = await Client(s.address, asynchronous=True)
x = a.submit(inc, 1)
y = b.submit(inc, 2)
assert x.client is a
assert y.client is b
xx = await x
yy = await y
assert xx == 2
assert yy == 3
z = a.submit(add, x, y)
assert z.client is a
zz = await z
assert zz == 5
await a.close()
await b.close()
@gen_cluster(client=True)
async def test_async_compute(c, s, a, b):
from dask.delayed import delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
[yy, zz, aa] = c.compute([y, z, 3], sync=False)
assert isinstance(yy, Future)
assert isinstance(zz, Future)
assert aa == 3
result = await c.gather([yy, zz])
assert result == [2, 0]
assert isinstance(c.compute(y), Future)
assert isinstance(c.compute([y]), (tuple, list))
@gen_cluster(client=True)
async def test_async_compute_with_scatter(c, s, a, b):
d = await c.scatter({("x", 1): 1, ("y", 1): 2})
x, y = d[("x", 1)], d[("y", 1)]
from dask.delayed import delayed
z = delayed(add)(delayed(inc)(x), delayed(inc)(y))
zz = c.compute(z)
[result] = await c.gather([zz])
assert result == 2 + 3
def test_sync_compute(c):
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
yy, zz = c.compute([y, z], sync=True)
assert (yy, zz) == (2, 0)
@gen_cluster(client=True)
async def test_remote_scatter_gather(c, s, a, b):
x, y, z = await c.scatter([1, 2, 3])
assert x.key in a.data or x.key in b.data
assert y.key in a.data or y.key in b.data
assert z.key in a.data or z.key in b.data
xx, yy, zz = await c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@gen_cluster(timeout=1000, client=True)
async def test_remote_submit_on_Future(c, s, a, b):
x = c.submit(lambda x: x + 1, 1)
y = c.submit(lambda x: x + 1, x)
result = await y
assert result == 3
def test_start_is_idempotent(c):
c.start()
c.start()
c.start()
x = c.submit(inc, 1)
assert x.result() == 2
@gen_cluster(client=True)
async def test_client_with_scheduler(c, s, a, b):
assert s.nthreads == {a.address: a.nthreads, b.address: b.nthreads}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y)
result = await x
assert result == 1 + 1
result = await z
assert result == 1 + 1 + 1 + 2
A, B, C = await c.scatter([1, 2, 3])
AA, BB, xx = await c.gather([A, B, x])
assert (AA, BB, xx) == (1, 2, 2)
result = await c.get({"x": (inc, 1), "y": (add, "x", 10)}, "y", sync=False)
assert result == 12
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_allow_restrictions(c, s, a, b):
aws = s.workers[a.address]
bws = s.workers[a.address]
x = c.submit(inc, 1, workers=a.ip)
await x
assert s.tasks[x.key].who_has == {aws}
assert not s.loose_restrictions
x = c.submit(inc, 2, workers=a.ip, allow_other_workers=True)
await x
assert s.tasks[x.key].who_has == {aws}
assert x.key in s.loose_restrictions
L = c.map(inc, range(3, 13), workers=a.ip, allow_other_workers=True)
await wait(L)
assert all(s.tasks[f.key].who_has == {aws} for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
x = c.submit(inc, 15, workers="127.0.0.3", allow_other_workers=True)
await x
assert s.tasks[x.key].who_has
assert x.key in s.loose_restrictions
L = c.map(inc, range(15, 25), workers="127.0.0.3", allow_other_workers=True)
await wait(L)
assert all(s.tasks[f.key].who_has for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
with pytest.raises(ValueError):
c.submit(inc, 1, allow_other_workers=True)
with pytest.raises(ValueError):
c.map(inc, [1], allow_other_workers=True)
with pytest.raises(TypeError):
c.submit(inc, 20, workers="127.0.0.1", allow_other_workers="Hello!")
with pytest.raises(TypeError):
c.map(inc, [20], workers="127.0.0.1", allow_other_workers="Hello!")
@pytest.mark.skipif("True", reason="because")
def test_bad_address():
try:
Client("123.123.123.123:1234", timeout=0.1)
except (IOError, TimeoutError) as e:
assert "connect" in str(e).lower()
try:
Client("127.0.0.1:1234", timeout=0.1)
except (IOError, TimeoutError) as e:
assert "connect" in str(e).lower()
@gen_cluster(client=True)
async def test_long_error(c, s, a, b):
def bad(x):
raise ValueError("a" * 100000)
x = c.submit(bad, 10)
try:
await x
except ValueError as e:
assert len(str(e)) < 100000
tb = await x.traceback()
assert all(
len(line) < 100000
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
@gen_cluster(client=True)
async def test_map_on_futures_with_kwargs(c, s, a, b):
def f(x, y=10):
return x + y
futures = c.map(inc, range(10))
futures2 = c.map(f, futures, y=20)
results = await c.gather(futures2)
assert results == [i + 1 + 20 for i in range(10)]
future = c.submit(inc, 100)
future2 = c.submit(f, future, y=200)
result = await future2
assert result == 100 + 1 + 200
class BadlySerializedObject:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise TypeError("hello!")
class FatallySerializedObject:
def __getstate__(self):
return 1
def __setstate__(self, state):
print("This should never have been deserialized, closing")
import sys
sys.exit(0)
@gen_cluster(client=True)
async def test_badly_serialized_input(c, s, a, b):
o = BadlySerializedObject()
future = c.submit(inc, o)
futures = c.map(inc, range(10))
L = await c.gather(futures)
assert list(L) == list(map(inc, range(10)))
assert future.status == "error"
@pytest.mark.skipif("True", reason="")
async def test_badly_serialized_input_stderr(capsys, c):
o = BadlySerializedObject()
future = c.submit(inc, o)
start = time()
while True:
sleep(0.01)
out, err = capsys.readouterr()
if "hello!" in err:
break
assert time() - start < 20
assert future.status == "error"
def test_repr(loop):
funcs = [str, repr, lambda x: x._repr_html_()]
with cluster(nworkers=3, worker_kwargs={"memory_limit": "2 GB"}) as (s, [a, b, c]):
with Client(s["address"], loop=loop) as c:
for func in funcs:
text = func(c)
assert c.scheduler.address in text
assert "3" in text
assert "6" in text
assert "GB" in text
if "<table" not in text:
assert len(text) < 80
for func in funcs:
text = func(c)
assert "not connected" in text
@gen_cluster(client=True)
async def test_repr_async(c, s, a, b):
c._repr_html_()
@gen_cluster(client=True, worker_kwargs={"memory_limit": None})
async def test_repr_no_memory_limit(c, s, a, b):
c._repr_html_()
@gen_test()
async def test_repr_localcluster():
cluster = await LocalCluster(
processes=False, dashboard_address=None, asynchronous=True
)
client = await Client(cluster, asynchronous=True)
try:
text = client._repr_html_()
assert cluster.scheduler.address in text
assert is_valid_xml(client._repr_html_())
finally:
await client.close()
await cluster.close()
@gen_cluster(client=True)
async def test_forget_simple(c, s, a, b):
x = c.submit(inc, 1, retries=2)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
await wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
assert set(s.tasks) == {x.key, y.key, z.key}
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.tasks
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key not in s.tasks
assert z.key not in s.tasks
assert not s.tasks[y.key].dependents
s.client_releases_keys(keys=[y.key], client=c.id)
assert not s.tasks
@gen_cluster(client=True)
async def test_forget_complex(e, s, A, B):
a, b, c, d = await e.scatter(list(range(4)))
ab = e.submit(add, a, b)
cd = e.submit(add, c, d)
ac = e.submit(add, a, c)
acab = e.submit(add, ac, ab)
await wait([a, b, c, d, ab, ac, cd, acab])
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[ab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[b.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, acab, a, c, d]}
s.client_releases_keys(keys=[acab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, a, c, d]}
assert b.key not in s.tasks
start = time()
while b.key in A.data or b.key in B.data:
await asyncio.sleep(0.01)
assert time() < start + 10
s.client_releases_keys(keys=[ac.key], client=e.id)
assert set(s.tasks) == {f.key for f in [cd, a, c, d]}
@gen_cluster(client=True)
async def test_forget_in_flight(e, s, A, B):
delayed2 = partial(delayed, pure=True)
a, b, c, d = [delayed2(slowinc)(i) for i in range(4)]
ab = delayed2(slowadd)(a, b, dask_key_name="ab")
cd = delayed2(slowadd)(c, d, dask_key_name="cd")
ac = delayed2(slowadd)(a, c, dask_key_name="ac")
acab = delayed2(slowadd)(ac, ab, dask_key_name="acab")
x, y = e.compute([ac, acab])
s.validate_state()
for i in range(5):
await asyncio.sleep(0.01)
s.validate_state()
s.client_releases_keys(keys=[y.key], client=e.id)
s.validate_state()
for k in [acab.key, ab.key, b.key]:
assert k not in s.tasks
@gen_cluster(client=True)
async def test_forget_errors(c, s, a, b):
x = c.submit(div, 1, 0)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([y])
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key in s.exceptions_blame
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[y.key], client=c.id)
assert x.key not in s.exceptions
assert x.key not in s.exceptions_blame
assert y.key not in s.exceptions_blame
assert z.key not in s.exceptions_blame
def test_repr_sync(c):
s = str(c)
r = repr(c)
assert c.scheduler.address in s
assert c.scheduler.address in r
assert str(2) in s # nworkers
assert "cores" in s or "threads" in s
@gen_cluster(client=True)
async def test_waiting_data(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
await wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
@gen_cluster()
async def test_multi_client(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
assert set(s.client_comms) == {c.id, f.id}
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
assert s.wants_what == {
c.id: {x.key, y.key},
f.id: {y.key},
"fire-and-forget": set(),
}
assert s.who_wants == {x.key: {c.id}, y.key: {c.id, f.id}}
await c.close()
start = time()
while c.id in s.wants_what:
await asyncio.sleep(0.01)
assert time() < start + 5
assert c.id not in s.wants_what
assert c.id not in s.who_wants[y.key]
assert x.key not in s.who_wants
await f.close()
start = time()
while s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 2, s.tasks
def long_running_client_connection(address):
with pristine_loop():
c = Client(address)
x = c.submit(lambda x: x + 1, 10)
x.result()
sleep(100)
@gen_cluster()
async def test_cleanup_after_broken_client_connection(s, a, b):
proc = mp_context.Process(target=long_running_client_connection, args=(s.address,))
proc.daemon = True
proc.start()
start = time()
while not s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 5
proc.terminate()
start = time()
while s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 5
@gen_cluster()
async def test_multi_garbage_collection(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
x.__del__()
start = time()
while x.key in a.data or x.key in b.data:
await asyncio.sleep(0.01)
assert time() < start + 5
assert s.wants_what == {c.id: {y.key}, f.id: {y.key}, "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id, f.id}}
y.__del__()
start = time()
while x.key in s.wants_what[f.id]:
await asyncio.sleep(0.01)
assert time() < start + 5
await asyncio.sleep(0.1)
assert y.key in a.data or y.key in b.data
assert s.wants_what == {c.id: {y.key}, f.id: set(), "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id}}
y2.__del__()
start = time()
while y.key in a.data or y.key in b.data:
await asyncio.sleep(0.01)
assert time() < start + 5
assert not any(v for v in s.wants_what.values())
assert not s.who_wants
await c.close()
await f.close()
@gen_cluster(client=True)
async def test__broadcast(c, s, a, b):
x, y = await c.scatter([1, 2], broadcast=True)
assert a.data == b.data == {x.key: 1, y.key: 2}
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test__broadcast_integer(c, s, *workers):
x, y = await c.scatter([1, 2], broadcast=2)
assert len(s.tasks[x.key].who_has) == 2
assert len(s.tasks[y.key].who_has) == 2
@gen_cluster(client=True)
async def test__broadcast_dict(c, s, a, b):
d = await c.scatter({"x": 1}, broadcast=True)
assert a.data == b.data == {"x": 1}
def test_broadcast(c, s, a, b):
x, y = c.scatter([1, 2], broadcast=True)
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key},
b["address"]: {x.key, y.key},
}
[z] = c.scatter([3], broadcast=True, workers=[a["address"]])
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key, z.key},
b["address"]: {x.key, y.key},
}
@gen_cluster(client=True)
async def test_proxy(c, s, a, b):
msg = await c.scheduler.proxy(msg={"op": "identity"}, worker=a.address)
assert msg["id"] == a.identity()["id"]
@gen_cluster(client=True)
async def test__cancel(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, x)
while y.key not in s.tasks:
await asyncio.sleep(0.01)
await c.cancel([x])
assert x.cancelled()
assert "cancel" in str(x)
s.validate_state()
start = time()
while not y.cancelled():
await asyncio.sleep(0.01)
assert time() < start + 5
assert not s.tasks
s.validate_state()
@gen_cluster(client=True)
async def test_cancel_tuple_key(c, s, a, b):
x = c.submit(inc, 1, key=("x", 0, 1))
await x
await c.cancel(x)
with pytest.raises(CancelledError):
await x
@gen_cluster()
async def test_cancel_multi_client(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
x = c.submit(slowinc, 1)
y = f.submit(slowinc, 1)
assert x.key == y.key
await c.cancel([x])
assert x.cancelled()
assert not y.cancelled()
start = time()
while y.key not in s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 5
out = await y
assert out == 2
with pytest.raises(CancelledError):
await x
await c.close()
await f.close()
@gen_cluster(client=True)
async def test_cancel_collection(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await c.cancel(x)
await c.cancel([x])
assert all(f.cancelled() for f in L)
start = time()
while s.tasks:
assert time() < start + 1
await asyncio.sleep(0.01)
def test_cancel(c):
x = c.submit(slowinc, 1, key="x")
y = c.submit(slowinc, x, key="y")
z = c.submit(slowinc, y, key="z")
c.cancel([y])
start = time()
while not z.cancelled():
sleep(0.01)
assert time() < start + 5
assert x.result() == 2
z.cancel()
assert z.cancelled()
@gen_cluster(client=True)
async def test_future_type(c, s, a, b):
x = c.submit(inc, 1)
await wait([x])
assert x.type == int
assert "int" in str(x)
@gen_cluster(client=True)
async def test_traceback_clean(c, s, a, b):
x = c.submit(div, 1, 0)
try:
await x
except Exception as e:
f = e
exc_type, exc_value, tb = sys.exc_info()
while tb:
assert "scheduler" not in tb.tb_frame.f_code.co_filename
assert "worker" not in tb.tb_frame.f_code.co_filename
tb = tb.tb_next
@gen_cluster(client=True)
async def test_map_differnet_lengths(c, s, a, b):
assert len(c.map(add, [1, 2], [1, 2, 3])) == 2
def test_Future_exception_sync_2(loop, capsys):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert dask.base.get_scheduler() == c.get
out, err = capsys.readouterr()
assert len(out.strip().split("\n")) == 1
assert dask.base.get_scheduler() != c.get
@gen_cluster(timeout=60, client=True)
async def test_async_persist(c, s, a, b):
from dask.delayed import delayed, Delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
w = delayed(add)(y, z)
yy, ww = c.persist([y, w])
assert type(yy) == type(y)
assert type(ww) == type(w)
assert len(yy.dask) == 1
assert len(ww.dask) == 1
assert len(w.dask) > 1
assert y.__dask_keys__() == yy.__dask_keys__()
assert w.__dask_keys__() == ww.__dask_keys__()
while y.key not in s.tasks and w.key not in s.tasks:
await asyncio.sleep(0.01)
assert s.who_wants[y.key] == {c.id}
assert s.who_wants[w.key] == {c.id}
yyf, wwf = c.compute([yy, ww])
yyy, www = await c.gather([yyf, wwf])
assert yyy == inc(1)
assert www == add(inc(1), dec(1))
assert isinstance(c.persist(y), Delayed)
assert isinstance(c.persist([y]), (list, tuple))
@gen_cluster(client=True)
async def test__persist(c, s, a, b):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
g, h = c.compute([y, yy])
gg, hh = await c.gather([g, h])
assert (gg == hh).all()
def test_persist(c):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
zz = yy.compute()
z = y.compute()
assert (zz == z).all()
@gen_cluster(timeout=60, client=True)
async def test_long_traceback(c, s, a, b):
from distributed.protocol.pickle import dumps
def deep(n):
if n == 0:
1 / 0
else:
return deep(n - 1)
x = c.submit(deep, 200)
await wait([x])
assert len(dumps(c.futures[x.key].traceback)) < 10000
assert isinstance(c.futures[x.key].exception, ZeroDivisionError)
@gen_cluster(client=True)
async def test_wait_on_collections(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await wait(x)
assert all(f.key in a.data or f.key in b.data for f in L)
@gen_cluster(client=True)
async def test_futures_of_get(c, s, a, b):
x, y, z = c.map(inc, [1, 2, 3])
assert set(futures_of(0)) == set()
assert set(futures_of(x)) == {x}
assert set(futures_of([x, y, z])) == {x, y, z}
assert set(futures_of([x, [y], [[z]]])) == {x, y, z}
assert set(futures_of({"x": x, "y": [y]})) == {x, y}
b = db.Bag({("b", i): f for i, f in enumerate([x, y, z])}, "b", 3)
assert set(futures_of(b)) == {x, y, z}
sg = SubgraphCallable(
{"x": x, "y": y, "z": z, "out": (add, (add, (add, x, y), z), "in")},
"out",
("in",),
)
assert set(futures_of(sg)) == {x, y, z}
def test_futures_of_class():
da = pytest.importorskip("dask.array")
assert futures_of([da.Array]) == []
@gen_cluster(client=True)
async def test_futures_of_cancelled_raises(c, s, a, b):
x = c.submit(inc, 1)
await c.cancel([x])
with pytest.raises(CancelledError):
await x
with pytest.raises(CancelledError):
await c.get({"x": (inc, x), "y": (inc, 2)}, ["x", "y"], sync=False)
with pytest.raises(CancelledError):
c.submit(inc, x)
with pytest.raises(CancelledError):
c.submit(add, 1, y=x)
with pytest.raises(CancelledError):
c.map(add, [1], y=x)
assert "y" not in s.tasks
@pytest.mark.skip
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_dont_delete_recomputed_results(c, s, w):
x = c.submit(inc, 1) # compute first time
await wait([x])
x.__del__() # trigger garbage collection
await asyncio.sleep(0)
xx = c.submit(inc, 1) # compute second time
start = time()
while xx.key not in w.data: # data shows up
await asyncio.sleep(0.01)
assert time() < start + 1
while time() < start + (s.delete_interval + 100) / 1000: # and stays
assert xx.key in w.data
await asyncio.sleep(0.01)
@gen_cluster(nthreads=[], client=True)
async def test_fatally_serialized_input(c, s):
o = FatallySerializedObject()
future = c.submit(inc, o)
while not s.tasks:
await asyncio.sleep(0.01)
@pytest.mark.skip(reason="Use fast random selection now")
@gen_cluster(client=True)
async def test_balance_tasks_by_stacks(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
y = c.submit(inc, 2)
await wait(y)
assert len(a.data) == len(b.data) == 1
@gen_cluster(client=True)
async def test_run(c, s, a, b):
results = await c.run(inc, 1)
assert results == {a.address: 2, b.address: 2}
results = await c.run(inc, 1, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(inc, 1, workers=[])
assert results == {}
@gen_cluster(client=True)
async def test_run_handles_picklable_data(c, s, a, b):
futures = c.map(inc, range(10))
await wait(futures)
def func():
return {}, set(), [], (), 1, "hello", b"100"
results = await c.run_on_scheduler(func)
assert results == func()
results = await c.run(func)
assert results == {w.address: func() for w in [a, b]}
def test_run_sync(c, s, a, b):
def func(x, y=10):
return x + y
result = c.run(func, 1, y=2)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(func, 1, y=2, workers=[a["address"]])
assert result == {a["address"]: 3}
@gen_cluster(client=True)
async def test_run_coroutine(c, s, a, b):
results = await c.run(geninc, 1, delay=0.05)
assert results == {a.address: 2, b.address: 2}
results = await c.run(geninc, 1, delay=0.05, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(geninc, 1, workers=[])
assert results == {}
with pytest.raises(RuntimeError, match="hello"):
await c.run(throws, 1)
results = await c.run(asyncinc, 2, delay=0.01)
assert results == {a.address: 3, b.address: 3}
def test_run_coroutine_sync(c, s, a, b):
result = c.run(geninc, 2, delay=0.01)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(geninc, 2, workers=[a["address"]])
assert result == {a["address"]: 3}
t1 = time()
result = c.run(geninc, 2, delay=10, wait=False)
t2 = time()
assert result is None
assert t2 - t1 <= 1.0
def test_run_exception(c):
def raise_exception(exc_type, exc_msg):
raise exc_type(exc_msg)
for exc_type in [ValueError, RuntimeError]:
with pytest.raises(exc_type, match="informative message"):
c.run(raise_exception, exc_type, "informative message")
def test_diagnostic_ui(loop):
with cluster() as (s, [a, b]):
a_addr = a["address"]
b_addr = b["address"]
with Client(s["address"], loop=loop) as c:
d = c.nthreads()
assert d == {a_addr: 1, b_addr: 1}
d = c.nthreads([a_addr])
assert d == {a_addr: 1}
d = c.nthreads(a_addr)
assert d == {a_addr: 1}
d = c.nthreads(a["address"])
assert d == {a_addr: 1}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(inc, 3)
wait([x, y, z])
d = c.who_has()
assert set(d) == {x.key, y.key, z.key}
assert all(w in [a_addr, b_addr] for v in d.values() for w in v)
assert all(d.values())
d = c.who_has([x, y])
assert set(d) == {x.key, y.key}
d = c.who_has(x)
assert set(d) == {x.key}
d = c.has_what()
assert set(d) == {a_addr, b_addr}
assert all(k in [x.key, y.key, z.key] for v in d.values() for k in v)
d = c.has_what([a_addr])
assert set(d) == {a_addr}
d = c.has_what(a_addr)
assert set(d) == {a_addr}
def test_diagnostic_nbytes_sync(c):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
wait(incs + doubles)
assert c.nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert c.nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_cluster(client=True)
async def test_diagnostic_nbytes(c, s, a, b):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
await wait(incs + doubles)
assert s.get_nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert s.get_nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_test()
async def test_worker_aliases():
s = await Scheduler(validate=True, port=0)
a = Worker(s.address, name="alice")
b = Worker(s.address, name="bob")
w = Worker(s.address, name=3)
await asyncio.gather(a, b, w)
c = await Client(s.address, asynchronous=True)
L = c.map(inc, range(10), workers="alice")
future = await c.scatter(123, workers=3)
await wait(L)
assert len(a.data) == 10
assert len(b.data) == 0
assert dict(w.data) == {future.key: 123}
for i, alias in enumerate([3, [3], "alice"]):
result = await c.submit(lambda x: x + 1, i, workers=alias)
assert result == i + 1
await c.close()
await asyncio.gather(a.close(), b.close(), w.close())
await s.close()
def test_persist_get_sync(c):
dadd = delayed(add)
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
assert xxyy3.compute() == ((1 + 1) + (2 + 2)) + 10
@gen_cluster(client=True)
async def test_persist_get(c, s, a, b):
dadd = delayed(add)
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
await asyncio.sleep(0.5)
result = await c.gather(c.get(xxyy3.dask, xxyy3.__dask_keys__(), sync=False))
assert result[0] == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
def test_client_num_fds(loop):
psutil = pytest.importorskip("psutil")
with cluster() as (s, [a, b]):
proc = psutil.Process()
with Client(s["address"], loop=loop) as c: # first client to start loop
before = proc.num_fds() # measure
for i in range(4):
with Client(s["address"], loop=loop): # start more clients
pass
start = time()
while proc.num_fds() > before:
sleep(0.01)
assert time() < start + 4
@gen_cluster()
async def test_startup_close_startup(s, a, b):
c = await Client(s.address, asynchronous=True)
await c.close()
c = await Client(s.address, asynchronous=True)
await c.close()
def test_startup_close_startup_sync(loop):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
sleep(0.1)
with Client(s["address"]) as c:
pass
with Client(s["address"]) as c:
pass
sleep(0.1)
with Client(s["address"]) as c:
pass
@gen_cluster(client=True)
async def test_badly_serialized_exceptions(c, s, a, b):
def f():
class BadlySerializedException(Exception):
def __reduce__(self):
raise TypeError()
raise BadlySerializedException("hello world")
x = c.submit(f)
try:
result = await x
except Exception as e:
assert "hello world" in str(e)
else:
assert False
@gen_cluster(client=True)
async def test_rebalance(c, s, a, b):
aws = s.workers[a.address]
bws = s.workers[b.address]
x, y = await c.scatter([1, 2], workers=[a.address])
assert len(a.data) == 2
assert len(b.data) == 0
s.validate_state()
await c.rebalance()
s.validate_state()
assert len(b.data) == 1
assert {ts.key for ts in bws.has_what} == set(b.data)
assert bws in s.tasks[x.key].who_has or bws in s.tasks[y.key].who_has
assert len(a.data) == 1
assert {ts.key for ts in aws.has_what} == set(a.data)
assert aws not in s.tasks[x.key].who_has or aws not in s.tasks[y.key].who_has
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 4, client=True)
async def test_rebalance_workers(e, s, a, b, c, d):
w, x, y, z = await e.scatter([1, 2, 3, 4], workers=[a.address])
assert len(a.data) == 4
assert len(b.data) == 0
assert len(c.data) == 0
assert len(d.data) == 0
await e.rebalance([x, y], workers=[a.address, c.address])
assert len(a.data) == 3
assert len(b.data) == 0
assert len(c.data) == 1
assert len(d.data) == 0
assert c.data == {x.key: 2} or c.data == {y.key: 3}
await e.rebalance()
assert len(a.data) == 1
assert len(b.data) == 1
assert len(c.data) == 1
assert len(d.data) == 1
s.validate_state()
@gen_cluster(client=True)
async def test_rebalance_execution(c, s, a, b):
futures = c.map(inc, range(10), workers=a.address)
await c.rebalance(futures)
assert len(a.data) == len(b.data) == 5
s.validate_state()
def test_rebalance_sync(c, s, a, b):
futures = c.map(inc, range(10), workers=[a["address"]])
c.rebalance(futures)
has_what = c.has_what()
assert len(has_what) == 2
assert list(valmap(len, has_what).values()) == [5, 5]
@gen_cluster(client=True)
async def test_rebalance_unprepared(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
await asyncio.sleep(0.1)
await c.rebalance(futures)
s.validate_state()
@gen_cluster(client=True)
async def test_rebalance_raises_missing_data(c, s, a, b):
with pytest.raises(ValueError, match="keys were found to be missing"):
futures = await c.scatter(range(100))
keys = [f.key for f in futures]
del futures
await c.rebalance(keys)
@gen_cluster(client=True)
async def test_receive_lost_key(c, s, a, b):
x = c.submit(inc, 1, workers=[a.address])
await x
await a.close()
start = time()
while x.status == "finished":
assert time() < start + 5
await asyncio.sleep(0.01)
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_unrunnable_task_runs(c, s, a, b):
x = c.submit(inc, 1, workers=[a.ip])
await x
await a.close()
start = time()
while x.status == "finished":
assert time() < start + 5
await asyncio.sleep(0.01)
assert s.tasks[x.key] in s.unrunnable
assert s.get_task_status(keys=[x.key]) == {x.key: "no-worker"}
w = await Worker(s.address, loop=s.loop)
start = time()
while x.status != "finished":
assert time() < start + 2
await asyncio.sleep(0.01)
assert s.tasks[x.key] not in s.unrunnable
result = await x
assert result == 2
await w.close()
@gen_cluster(client=True, nthreads=[])
async def test_add_worker_after_tasks(c, s):
futures = c.map(inc, range(10))
n = await Nanny(s.address, nthreads=2, loop=s.loop, port=0)
await c.gather(futures)
await n.close()
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_workers_register_indirect_data(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
y = c.submit(inc, x, workers=b.ip)
await y
assert b.data[x.key] == 1
assert s.tasks[x.key].who_has == {s.workers[a.address], s.workers[b.address]}
assert s.workers[b.address].has_what == {s.tasks[x.key], s.tasks[y.key]}
s.validate_state()
@gen_cluster(client=True)
async def test_submit_on_cancelled_future(c, s, a, b):
x = c.submit(inc, 1)
await x
await c.cancel(x)
with pytest.raises(CancelledError):
c.submit(inc, x)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate(c, s, *workers):
[a, b] = await c.scatter([1, 2])
await s.replicate(keys=[a.key, b.key], n=5)
s.validate_state()
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers) == 5
assert sum(b.key in w.data for w in workers) == 5
@gen_cluster(client=True)
async def test_replicate_tuple_keys(c, s, a, b):
x = delayed(inc)(1, dask_key_name=("x", 1))
f = c.persist(x)
await c.replicate(f, n=5)
s.validate_state()
assert a.data and b.data
await c.rebalance(f)
s.validate_state()
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate_workers(c, s, *workers):
[a, b] = await c.scatter([1, 2], workers=[workers[0].address])
await s.replicate(
keys=[a.key, b.key], n=5, workers=[w.address for w in workers[:5]]
)
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers[:5]) == 5
assert sum(b.key in w.data for w in workers[:5]) == 5
assert sum(a.key in w.data for w in workers[5:]) == 0
assert sum(b.key in w.data for w in workers[5:]) == 0
await s.replicate(keys=[a.key, b.key], n=1)
assert len(s.tasks[a.key].who_has) == 1
assert len(s.tasks[b.key].who_has) == 1
assert sum(a.key in w.data for w in workers) == 1
assert sum(b.key in w.data for w in workers) == 1
s.validate_state()
await s.replicate(keys=[a.key, b.key], n=None) # all
assert len(s.tasks[a.key].who_has) == 10
assert len(s.tasks[b.key].who_has) == 10
s.validate_state()
await s.replicate(
keys=[a.key, b.key], n=1, workers=[w.address for w in workers[:5]]
)
assert sum(a.key in w.data for w in workers[:5]) == 1
assert sum(b.key in w.data for w in workers[:5]) == 1
assert sum(a.key in w.data for w in workers[5:]) == 5
assert sum(b.key in w.data for w in workers[5:]) == 5
s.validate_state()
class CountSerialization:
def __init__(self):
self.n = 0
def __setstate__(self, n):
self.n = n + 1
def __getstate__(self):
return self.n
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate_tree_branching(c, s, *workers):
obj = CountSerialization()
[future] = await c.scatter([obj])
await s.replicate(keys=[future.key], n=10)
max_count = max(w.data[future.key].n for w in workers)
assert max_count > 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_client_replicate(c, s, *workers):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
await c.replicate([x, y], n=5)
assert len(s.tasks[x.key].who_has) == 5
assert len(s.tasks[y.key].who_has) == 5
await c.replicate([x, y], n=3)
assert len(s.tasks[x.key].who_has) == 3
assert len(s.tasks[y.key].who_has) == 3
await c.replicate([x, y])
s.validate_state()
assert len(s.tasks[x.key].who_has) == 10
assert len(s.tasks[y.key].who_has) == 10
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.2", 1), ("127.0.0.2", 1)],
timeout=None,
)
async def test_client_replicate_host(client, s, a, b, c):
aws = s.workers[a.address]
bws = s.workers[b.address]
cws = s.workers[c.address]
x = client.submit(inc, 1, workers="127.0.0.2")
await wait([x])
assert s.tasks[x.key].who_has == {bws} or s.tasks[x.key].who_has == {cws}
await client.replicate([x], workers=["127.0.0.2"])
assert s.tasks[x.key].who_has == {bws, cws}
await client.replicate([x], workers=["127.0.0.1"])
assert s.tasks[x.key].who_has == {aws, bws, cws}
def test_client_replicate_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
c.replicate([x, y], n=2)
who_has = c.who_has()
assert len(who_has[x.key]) == len(who_has[y.key]) == 2
with pytest.raises(ValueError):
c.replicate([x], n=0)
assert y.result() == 3
@pytest.mark.skipif(WINDOWS, reason="Windows timer too coarse-grained")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 1)
async def test_task_load_adapts_quickly(c, s, a):
future = c.submit(slowinc, 1, delay=0.2) # slow
await wait(future)
assert 0.15 < s.task_prefixes["slowinc"].duration_average < 0.4
futures = c.map(slowinc, range(10), delay=0) # very fast
await wait(futures)
assert 0 < s.task_prefixes["slowinc"].duration_average < 0.1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_even_load_after_fast_functions(c, s, a, b):
x = c.submit(inc, 1, workers=a.address) # very fast
y = c.submit(inc, 2, workers=b.address) # very fast
await wait([x, y])
futures = c.map(inc, range(2, 11))
await wait(futures)
assert any(f.key in a.data for f in futures)
assert any(f.key in b.data for f in futures)
# assert abs(len(a.data) - len(b.data)) <= 3
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_even_load_on_startup(c, s, a, b):
x, y = c.map(inc, [1, 2])
await wait([x, y])
assert len(a.data) == len(b.data) == 1
@pytest.mark.skip
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
async def test_contiguous_load(c, s, a, b):
w, x, y, z = c.map(inc, [1, 2, 3, 4])
await wait([w, x, y, z])
groups = [set(a.data), set(b.data)]
assert {w.key, x.key} in groups
assert {y.key, z.key} in groups
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_balanced_with_submit(c, s, *workers):
L = [c.submit(slowinc, i) for i in range(4)]
await wait(L)
for w in workers:
assert len(w.data) == 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_balanced_with_submit_and_resident_data(c, s, *workers):
[x] = await c.scatter([10], broadcast=True)
L = [c.submit(slowinc, x, pure=False) for i in range(4)]
await wait(L)
for w in workers:
assert len(w.data) == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
async def test_scheduler_saturates_cores(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(slowinc, range(100), delay=delay)
futures = c.map(slowinc, futures, delay=delay / 10)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
async def test_scheduler_saturates_cores_random(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(randominc, range(100), scale=0.1)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_cancel_clears_processing(c, s, *workers):
da = pytest.importorskip("dask.array")
x = c.submit(slowinc, 1, delay=0.2)
while not s.tasks:
await asyncio.sleep(0.01)
await c.cancel(x)
start = time()
while any(v for w in s.workers.values() for v in w.processing):
assert time() < start + 0.2
await asyncio.sleep(0.01)
s.validate_state()
def test_default_get():
with cluster() as (s, [a, b]):
pre_get = dask.base.get_scheduler()
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"], set_as_default=True) as c:
assert dask.base.get_scheduler() == c.get
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c = Client(s["address"], set_as_default=False)
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c.close()
c = Client(s["address"], set_as_default=True)
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == c.get
c.close()
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"]) as c:
assert dask.base.get_scheduler() == c.get
with Client(s["address"], set_as_default=False) as c:
assert dask.base.get_scheduler() != c.get
assert dask.base.get_scheduler() != c.get
with Client(s["address"], set_as_default=True) as c1:
assert dask.base.get_scheduler() == c1.get
with Client(s["address"], set_as_default=True) as c2:
assert dask.base.get_scheduler() == c2.get
assert dask.base.get_scheduler() == c1.get
assert dask.base.get_scheduler() == pre_get
@gen_cluster(client=True)
async def test_get_processing(c, s, a, b):
processing = await c.processing()
assert processing == valmap(tuple, s.processing)
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a.address], allow_other_workers=True
)
await asyncio.sleep(0.2)
x = await c.processing()
assert set(x) == {a.address, b.address}
x = await c.processing(workers=[a.address])
assert isinstance(x[a.address], (list, tuple))
@gen_cluster(client=True)
async def test_get_foo(c, s, a, b):
futures = c.map(inc, range(10))
await wait(futures)
x = await c.scheduler.ncores()
assert x == s.nthreads
x = await c.scheduler.ncores(workers=[a.address])
assert x == {a.address: s.nthreads[a.address]}
x = await c.scheduler.has_what()
assert valmap(sorted, x) == valmap(sorted, s.has_what)
x = await c.scheduler.has_what(workers=[a.address])
assert valmap(sorted, x) == {a.address: sorted(s.has_what[a.address])}
x = await c.scheduler.nbytes(summary=False)
assert x == s.get_nbytes(summary=False)
x = await c.scheduler.nbytes(keys=[futures[0].key], summary=False)
assert x == {futures[0].key: s.tasks[futures[0].key].nbytes}
x = await c.scheduler.who_has()
assert valmap(sorted, x) == valmap(sorted, s.who_has)
x = await c.scheduler.who_has(keys=[futures[0].key])
assert valmap(sorted, x) == {futures[0].key: sorted(s.who_has[futures[0].key])}
def assert_dict_key_equal(expected, actual):
assert set(expected.keys()) == set(actual.keys())
for k in actual.keys():
ev = expected[k]
av = actual[k]
assert list(ev) == list(av)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_get_foo_lost_keys(c, s, u, v, w):
x = c.submit(inc, 1, workers=[u.address])
y = await c.scatter(3, workers=[v.address])
await wait([x, y])
ua, va, wa = u.address, v.address, w.address
d = await c.scheduler.has_what()
assert_dict_key_equal(d, {ua: [x.key], va: [y.key], wa: []})
d = await c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [x.key], va: [y.key]})
d = await c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
d = await c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
await u.close()
await v.close()
d = await c.scheduler.has_what()
assert_dict_key_equal(d, {wa: []})
d = await c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [], va: []})
# The scattered key cannot be recomputed so it is forgotten
d = await c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: []})
# ... but when passed explicitly, it is included in the result
d = await c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [], y.key: []})
@pytest.mark.slow
@gen_cluster(
client=True, Worker=Nanny, clean_kwargs={"threads": False, "processes": False}
)
async def test_bad_tasks_fail(c, s, a, b):
f = c.submit(sys.exit, 0)
with pytest.raises(KilledWorker) as info:
await f
assert info.value.last_worker.nanny in {a.address, b.address}
await asyncio.gather(a.close(), b.close())
def test_get_processing_sync(c, s, a, b):
processing = c.processing()
assert not any(v for v in processing.values())
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a["address"]], allow_other_workers=False
)
sleep(0.2)
aa = a["address"]
bb = b["address"]
processing = c.processing()
assert set(c.processing(aa)) == {aa}
assert set(c.processing([aa])) == {aa}
c.cancel(futures)
def test_close_idempotent(c):
c.close()
c.close()
c.close()
@nodebug
def test_get_returns_early(c):
start = time()
with suppress(RuntimeError):
result = c.get({"x": (throws, 1), "y": (sleep, 1)}, ["x", "y"])
assert time() < start + 0.5
# Futures should be released and forgotten
wait_for(lambda: not c.futures, timeout=0.1)
wait_for(lambda: not any(c.processing().values()), timeout=3)
x = c.submit(inc, 1)
x.result()
with suppress(RuntimeError):
result = c.get({"x": (throws, 1), x.key: (inc, 1)}, ["x", x.key])
assert x.key in c.futures
@pytest.mark.slow
@gen_cluster(Worker=Nanny, client=True)
async def test_Client_clears_references_after_restart(c, s, a, b):
x = c.submit(inc, 1)
assert x.key in c.refcount
await c.restart()
assert x.key not in c.refcount
key = x.key
del x
import gc
gc.collect()
await asyncio.sleep(0)
assert key not in c.refcount
def test_get_stops_work_after_error(c):
with pytest.raises(RuntimeError):
c.get({"x": (throws, 1), "y": (sleep, 1.5)}, ["x", "y"])
start = time()
while any(c.processing().values()):
sleep(0.01)
assert time() < start + 0.5
def test_as_completed_list(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq))
assert set(c.gather(seq2)) == {1, 2, 3, 4, 5}
def test_as_completed_results(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq, with_results=True))
assert set(pluck(1, seq2)) == {1, 2, 3, 4, 5}
assert set(pluck(0, seq2)) == set(seq)
@pytest.mark.parametrize("with_results", [True, False])
def test_as_completed_batches(c, with_results):
n = 50
futures = c.map(slowinc, range(n), delay=0.01)
out = []
for batch in as_completed(futures, with_results=with_results).batches():
assert isinstance(batch, (tuple, list))
sleep(0.05)
out.extend(batch)
assert len(out) == n
if with_results:
assert set(pluck(1, out)) == set(range(1, n + 1))
else:
assert set(out) == set(futures)
def test_as_completed_next_batch(c):
futures = c.map(slowinc, range(2), delay=0.1)
ac = as_completed(futures)
assert not ac.is_empty()
assert ac.next_batch(block=False) == []
assert set(ac.next_batch(block=True)).issubset(futures)
while not ac.is_empty():
assert set(ac.next_batch(block=True)).issubset(futures)
assert ac.is_empty()
assert not ac.has_ready()
@gen_test()
async def test_status():
s = await Scheduler(port=0)
c = await Client(s.address, asynchronous=True)
assert c.status == "running"
x = c.submit(inc, 1)
await c.close()
assert c.status == "closed"
await s.close()
@gen_cluster(client=True)
async def test_persist_optimize_graph(c, s, a, b):
i = 10
for method in [c.persist, c.compute]:
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=False)
await wait(b4)
assert set(map(tokey, b3.__dask_keys__())).issubset(s.tasks)
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=True)
await wait(b4)
assert not any(tokey(k) in s.tasks for k in b2.__dask_keys__())
@gen_cluster(client=True, nthreads=[])
async def test_scatter_raises_if_no_workers(c, s):
with pytest.raises(TimeoutError):
await c.scatter(1, timeout=0.5)
@pytest.mark.slow
def test_reconnect(loop):
w = Worker("127.0.0.1", 9393, loop=loop)
loop.add_callback(w.start)
scheduler_cli = [
"dask-scheduler",
"--host",
"127.0.0.1",
"--port",
"9393",
"--no-dashboard",
]
with popen(scheduler_cli) as s:
c = Client("127.0.0.1:9393", loop=loop)
start = time()
while len(c.nthreads()) != 1:
sleep(0.1)
assert time() < start + 3
x = c.submit(inc, 1)
assert x.result() == 2
start = time()
while c.status != "connecting":
assert time() < start + 5
sleep(0.01)
assert x.status == "cancelled"
with pytest.raises(CancelledError):
x.result()
with popen(scheduler_cli) as s:
start = time()
while c.status != "running":
sleep(0.1)
assert time() < start + 5
start = time()
while len(c.nthreads()) != 1:
sleep(0.05)
assert time() < start + 15
x = c.submit(inc, 1)
assert x.result() == 2
start = time()
while True:
try:
x.result()
assert False
except CommClosedError:
continue
except CancelledError:
break
assert time() < start + 5
sleep(0.1)
sync(loop, w.close)
c.close()
@gen_cluster(client=True, nthreads=[], client_kwargs={"timeout": 0.5})
async def test_reconnect_timeout(c, s):
with captured_logger(logging.getLogger("distributed.client")) as logger:
await s.close()
start = time()
while c.status != "closed":
await c._update_scheduler_info()
await asyncio.sleep(0.05)
assert time() < start + 5, "Timeout waiting for reconnect to fail"
text = logger.getvalue()
assert "Failed to reconnect" in text
@pytest.mark.slow
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.skipif(sys.version_info < (3, 7), reason="TODO: intermittent failures")
@pytest.mark.parametrize("worker,count,repeat", [(Worker, 100, 5), (Nanny, 10, 20)])
def test_open_close_many_workers(loop, worker, count, repeat):
psutil = pytest.importorskip("psutil")
proc = psutil.Process()
with cluster(nworkers=0, active_rpc_timeout=2) as (s, _):
gc.collect()
before = proc.num_fds()
done = Semaphore(0)
running = weakref.WeakKeyDictionary()
workers = set()
status = True
async def start_worker(sleep, duration, repeat=1):
for i in range(repeat):
await asyncio.sleep(sleep)
if not status:
return
w = worker(s["address"], loop=loop)
running[w] = None
await w
workers.add(w)
addr = w.worker_address
running[w] = addr
await asyncio.sleep(duration)
await w.close()
del w
await asyncio.sleep(0)
done.release()
for i in range(count):
loop.add_callback(
start_worker, random.random() / 5, random.random() / 5, repeat=repeat
)
with Client(s["address"], loop=loop) as c:
sleep(1)
for i in range(count):
done.acquire(timeout=5)
gc.collect()
if not running:
break
start = time()
while c.nthreads():
sleep(0.2)
assert time() < start + 10
while len(workers) < count * repeat:
sleep(0.2)
status = False
[c.sync(w.close) for w in list(workers)]
for w in workers:
assert w.status == "closed"
start = time()
while proc.num_fds() > before:
print("fds:", before, proc.num_fds())
sleep(0.1)
assert time() < start + 10
@gen_cluster(client=False, timeout=None)
async def test_idempotence(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
# Submit
x = c.submit(inc, 1)
await x
log = list(s.transition_log)
len_single_submit = len(log) # see last assert
y = f.submit(inc, 1)
assert x.key == y.key
await y
await asyncio.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
# Error
a = c.submit(div, 1, 0)
await wait(a)
assert a.status == "error"
log = list(s.transition_log)
b = f.submit(div, 1, 0)
assert a.key == b.key
await wait(b)
await asyncio.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
s.transition_log.clear()
# Simultaneous Submit
d = c.submit(inc, 2)
e = c.submit(inc, 2)
await wait([d, e])
assert len(s.transition_log) == len_single_submit
await c.close()
await f.close()
def test_scheduler_info(c):
info = c.scheduler_info()
assert isinstance(info, dict)
assert len(info["workers"]) == 2
def test_write_scheduler_file(c):
info = c.scheduler_info()
with tmpfile("json") as scheduler_file:
c.write_scheduler_file(scheduler_file)
with Client(scheduler_file=scheduler_file) as c2:
info2 = c2.scheduler_info()
assert c.scheduler.address == c2.scheduler.address
# test that a ValueError is raised if the scheduler_file
# attribute is already set
with pytest.raises(ValueError):
c.write_scheduler_file(scheduler_file)
def test_get_versions(c):
requests = pytest.importorskip("requests")
v = c.get_versions()
assert v["scheduler"] is not None
assert v["client"] is not None
assert len(v["workers"]) == 2
for k, v in v["workers"].items():
assert v is not None
c.get_versions(check=True)
# smoke test for versions
# that this does not raise
v = c.get_versions(packages=["requests"])
assert v["client"]["packages"]["requests"] == requests.__version__
@gen_cluster(client=True)
async def test_async_get_versions(c, s, a, b):
await c.get_versions(check=True)
def test_threaded_get_within_distributed(c):
import dask.multiprocessing
for get in [dask.local.get_sync, dask.multiprocessing.get, dask.threaded.get]:
def f():
return get({"x": (lambda: 1,)}, "x")
future = c.submit(f)
assert future.result() == 1
@gen_cluster(client=True)
async def test_lose_scattered_data(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
await a.close()
await asyncio.sleep(0.1)
assert x.status == "cancelled"
assert x.key not in s.tasks
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_partially_lose_scattered_data(e, s, a, b, c):
x = await e.scatter(1, workers=a.address)
await e.replicate(x, n=2)
await a.close()
await asyncio.sleep(0.1)
assert x.status == "finished"
assert s.get_task_status(keys=[x.key]) == {x.key: "memory"}
@gen_cluster(client=True)
async def test_scatter_compute_lose(c, s, a, b):
[x] = await c.scatter([[1, 2, 3, 4]], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
z = c.submit(slowadd, x, y, delay=0.2)
await asyncio.sleep(0.1)
await a.close()
with pytest.raises(CancelledError):
await wait(z)
assert x.status == "cancelled"
assert y.status == "finished"
assert z.status == "cancelled"
@gen_cluster(client=True)
async def test_scatter_compute_store_lose(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
x = await c.scatter(1, workers=a.address)
xx = c.submit(inc, x, workers=a.address)
y = c.submit(inc, 1)
z = c.submit(slowadd, xx, y, delay=0.2, workers=b.address)
await wait(z)
await a.close()
start = time()
while x.status == "finished":
await asyncio.sleep(0.01)
assert time() < start + 2
# assert xx.status == 'finished'
assert y.status == "finished"
assert z.status == "finished"
zz = c.submit(inc, z)
await wait(zz)
zkey = z.key
del z
start = time()
while s.get_task_status(keys=[zkey]) != {zkey: "released"}:
await asyncio.sleep(0.01)
assert time() < start + 2
xxkey = xx.key
del xx
start = time()
while x.key in s.tasks and zkey not in s.tasks and xxkey not in s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 2
@gen_cluster(client=True)
async def test_scatter_compute_store_lose_processing(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
[x] = await c.scatter([1], workers=a.address)
y = c.submit(slowinc, x, delay=0.2)
z = c.submit(inc, y)
await asyncio.sleep(0.1)
await a.close()
start = time()
while x.status == "finished":
await asyncio.sleep(0.01)
assert time() < start + 2
assert y.status == "cancelled"
assert z.status == "cancelled"
@gen_cluster(client=False)
async def test_serialize_future(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
future = c1.submit(lambda: 1)
result = await future
for ci in (c1, c2):
for ctxman in ci.as_current, lambda: temp_default_client(ci):
with ctxman():
future2 = pickle.loads(pickle.dumps(future))
assert future2.client is ci
assert tokey(future2.key) in ci.futures
result2 = await future2
assert result == result2
await c1.close()
await c2.close()
@gen_cluster(client=False)
async def test_temp_default_client(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
with temp_default_client(c1):
assert default_client() is c1
assert default_client(c2) is c2
with temp_default_client(c2):
assert default_client() is c2
assert default_client(c1) is c1
await c1.close()
await c2.close()
@gen_cluster(client=True)
async def test_as_current(c, s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
with temp_default_client(c):
assert Client.current() is c
with pytest.raises(ValueError):
Client.current(allow_global=False)
with c1.as_current():
assert Client.current() is c1
assert Client.current(allow_global=True) is c1
with c2.as_current():
assert Client.current() is c2
assert Client.current(allow_global=True) is c2
await c1.close()
await c2.close()
def test_as_current_is_thread_local(s):
l1 = threading.Lock()
l2 = threading.Lock()
l3 = threading.Lock()
l4 = threading.Lock()
l1.acquire()
l2.acquire()
l3.acquire()
l4.acquire()
def run1():
with Client(s.address) as c:
with c.as_current():
l1.acquire()
l2.release()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.acquire()
l4.release()
def run2():
with Client(s.address) as c:
with c.as_current():
l1.release()
l2.acquire()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.release()
l4.acquire()
t1 = threading.Thread(target=run1)
t2 = threading.Thread(target=run2)
t1.start()
t2.start()
t1.join()
t2.join()
@pytest.mark.xfail(
sys.version_info < (3, 7),
reason="Python 3.6 contextvars are not copied on Task creation",
)
@gen_cluster(client=False)
async def test_as_current_is_task_local(s, a, b):
l1 = asyncio.Lock()
l2 = asyncio.Lock()
l3 = asyncio.Lock()
l4 = asyncio.Lock()
await l1.acquire()
await l2.acquire()
await l3.acquire()
await l4.acquire()
async def run1():
async with Client(s.address, asynchronous=True) as c:
with c.as_current():
await l1.acquire()
l2.release()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
await l3.acquire()
l4.release()
async def run2():
async with Client(s.address, asynchronous=True) as c:
with c.as_current():
l1.release()
await l2.acquire()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.release()
await l4.acquire()
await asyncio.gather(run1(), run2())
@nodebug # test timing is fragile
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
total2 = delayed(sum)(L2)
out = e.persist(
L1 + L2 + [total, total2],
workers={
tuple(L1): a.address,
total: b.address,
tuple(L2): [c.address],
total2: b.address,
},
allow_other_workers=L2 + [total2],
)
await wait(out)
assert all(v.key in a.data for v in L1)
assert total.key in b.data
assert s.loose_restrictions == {total2.key} | {v.key for v in L2}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_compute_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
out = e.compute(
L1 + L2 + [total],
workers={tuple(L1): a.address, total: b.address, tuple(L2): [c.address]},
allow_other_workers=L1 + [total],
)
await wait(out)
for v in L1:
assert s.worker_restrictions[v.key] == {a.address}
for v in L2:
assert s.worker_restrictions[v.key] == {c.address}
assert s.worker_restrictions[total.key] == {b.address}
assert s.loose_restrictions == {total.key} | {v.key for v in L1}
@gen_cluster(client=True)
async def test_compute_nested_containers(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
x = da.ones(10, chunks=(5,)) + 1
future = c.compute({"x": [x], "y": 123})
result = await future
assert isinstance(result, dict)
assert (result["x"][0] == np.ones(10) + 1).all()
assert result["y"] == 123
def test_get_restrictions():
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
r1, loose = Client.get_restrictions(L2, "127.0.0.1", False)
assert r1 == {d.key: ["127.0.0.1"] for d in L2}
assert not loose
r1, loose = Client.get_restrictions(L2, ["127.0.0.1"], True)
assert r1 == {d.key: ["127.0.0.1"] for d in L2}
assert set(loose) == {d.key for d in L2}
r1, loose = Client.get_restrictions(L2, {total: "127.0.0.1"}, True)
assert r1 == {total.key: ["127.0.0.1"]}
assert loose == [total.key]
r1, loose = Client.get_restrictions(L2, {(total,): "127.0.0.1"}, True)
assert r1 == {total.key: ["127.0.0.1"]}
assert loose == [total.key]
@gen_cluster(client=True)
async def test_scatter_type(c, s, a, b):
[future] = await c.scatter([1])
assert future.type == int
d = await c.scatter({"x": 1.0})
assert d["x"].type == float
@gen_cluster(client=True)
async def test_retire_workers_2(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
await s.retire_workers(workers=[a.address])
assert b.data == {x.key: 1}
assert s.who_has == {x.key: {b.address}}
assert s.has_what == {b.address: {x.key}}
assert a.address not in s.workers
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_retire_many_workers(c, s, *workers):
futures = await c.scatter(list(range(100)))
await s.retire_workers(workers=[w.address for w in workers[:7]])
results = await c.gather(futures)
assert results == list(range(100))
while len(s.workers) != 3:
await asyncio.sleep(0.01)
assert len(s.has_what) == len(s.nthreads) == 3
assert all(future.done() for future in futures)
assert all(s.tasks[future.key].state == "memory" for future in futures)
for w, keys in s.has_what.items():
assert 15 < len(keys) < 50
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 3)] * 2,
config={"distributed.scheduler.default-task-durations": {"f": "10ms"}},
)
async def test_weight_occupancy_against_data_movement(c, s, a, b):
s.extensions["stealing"]._pc.callback_time = 1000000
def f(x, y=0, z=0):
sleep(0.01)
return x
y = await c.scatter([[1, 2, 3, 4]], workers=[a.address])
z = await c.scatter([1], workers=[b.address])
futures = c.map(f, [1, 2, 3, 4], y=y, z=z)
await wait(futures)
assert sum(f.key in a.data for f in futures) >= 2
assert sum(f.key in b.data for f in futures) >= 1
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 10)],
config={"distributed.scheduler.default-task-durations": {"f": "10ms"}},
)
async def test_distribute_tasks_by_nthreads(c, s, a, b):
s.extensions["stealing"]._pc.callback_time = 1000000
def f(x, y=0):
sleep(0.01)
return x
y = await c.scatter([1], broadcast=True)
futures = c.map(f, range(20), y=y)
await wait(futures)
assert len(b.data) > 2 * len(a.data)
@gen_cluster(client=True, clean_kwargs={"threads": False})
async def test_add_done_callback(c, s, a, b):
S = set()
def f(future):
future.add_done_callback(g)
def g(future):
S.add((future.key, future.status))
u = c.submit(inc, 1, key="u")
v = c.submit(throws, "hello", key="v")
w = c.submit(slowinc, 2, delay=0.3, key="w")
x = c.submit(inc, 3, key="x")
u.add_done_callback(f)
v.add_done_callback(f)
w.add_done_callback(f)
await wait((u, v, w, x))
x.add_done_callback(f)
t = time()
while len(S) < 4 and time() - t < 2.0:
await asyncio.sleep(0.01)
assert S == {(f.key, f.status) for f in (u, v, w, x)}
@gen_cluster(client=True)
async def test_normalize_collection(c, s, a, b):
x = delayed(inc)(1)
y = delayed(inc)(x)
z = delayed(inc)(y)
yy = c.persist(y)
zz = c.normalize_collection(z)
assert len(z.dask) == len(y.dask) + 1
assert isinstance(zz.dask[y.key], Future)
assert len(zz.dask) < len(z.dask)
@gen_cluster(client=True)
async def test_normalize_collection_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=(5,))
y = x + 1
yy = c.persist(y)
z = y.sum()
zdsk = dict(z.dask)
zz = c.normalize_collection(z)
assert z.dask == zdsk # do not mutate input
assert len(z.dask) > len(zz.dask)
assert any(isinstance(v, Future) for v in zz.dask.values())
for k, v in yy.dask.items():
assert zz.dask[k].key == v.key
result1 = await c.compute(z)
result2 = await c.compute(zz)
assert result1 == result2
@pytest.mark.slow
def test_normalize_collection_with_released_futures(c):
da = pytest.importorskip("dask.array")
x = da.arange(2 ** 20, chunks=2 ** 10)
y = x.persist()
wait(y)
sol = y.sum().compute()
# Start releasing futures
del y
# Try to reuse futures. Previously this was a race condition,
# and the call to `.compute()` would error out due to missing
# futures on the scheduler at compute time.
normalized = c.normalize_collection(x)
res = normalized.sum().compute()
assert res == sol
@gen_cluster(client=True)
async def test_auto_normalize_collection(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
assert len(x.dask) == 2
with dask.config.set(optimizations=[c._optimize_insert_futures]):
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
await wait(yy)
start = time()
future = c.compute(y.sum())
await future
end = time()
assert end - start < 1
start = time()
z = c.persist(y + 1)
await wait(z)
end = time()
assert end - start < 1
def test_auto_normalize_collection_sync(c):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
wait(yy)
with dask.config.set(optimizations=[c._optimize_insert_futures]):
start = time()
y.sum().compute()
end = time()
assert end - start < 1
def assert_no_data_loss(scheduler):
for key, start, finish, recommendations, _ in scheduler.transition_log:
if start == "memory" and finish == "released":
for k, v in recommendations.items():
assert not (k == key and v == "waiting")
@gen_cluster(client=True, timeout=None)
async def test_interleave_computations(c, s, a, b):
import distributed
distributed.g = s
xs = [delayed(slowinc)(i, delay=0.02) for i in range(30)]
ys = [delayed(slowdec)(x, delay=0.02) for x in xs]
zs = [delayed(slowadd)(x, y, delay=0.02) for x, y in zip(xs, ys)]
total = delayed(sum)(zs)
future = c.compute(total)
done = ("memory", "released")
await asyncio.sleep(0.1)
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
await asyncio.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
assert_no_data_loss(s)
@pytest.mark.skip(reason="Now prefer first-in-first-out")
@gen_cluster(client=True, timeout=None)
async def test_interleave_computations_map(c, s, a, b):
xs = c.map(slowinc, range(30), delay=0.02)
ys = c.map(slowdec, xs, delay=0.02)
zs = c.map(slowadd, xs, ys, delay=0.02)
done = ("memory", "released")
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
await asyncio.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
@gen_cluster(client=True)
async def test_scatter_dict_workers(c, s, a, b):
await c.scatter({"a": 10}, workers=[a.address, b.address])
assert "a" in a.data or "a" in b.data
@pytest.mark.slow
@gen_test()
async def test_client_timeout():
c = Client("127.0.0.1:57484", asynchronous=True)
s = Scheduler(loop=c.loop, port=57484)
await asyncio.sleep(4)
try:
await s
except EnvironmentError: # port in use
await c.close()
return
start = time()
await c
try:
assert time() < start + 2
finally:
await c.close()
await s.close()
@gen_cluster(client=True)
async def test_submit_list_kwargs(c, s, a, b):
futures = await c.scatter([1, 2, 3])
def f(L=None):
return sum(L)
future = c.submit(f, L=futures)
result = await future
assert result == 1 + 2 + 3
@gen_cluster(client=True)
async def test_map_list_kwargs(c, s, a, b):
futures = await c.scatter([1, 2, 3])
def f(i, L=None):
return i + sum(L)
futures = c.map(f, range(10), L=futures)
results = await c.gather(futures)
assert results == [i + 6 for i in range(10)]
@gen_cluster(client=True)
async def test_dont_clear_waiting_data(c, s, a, b):
start = time()
x = await c.scatter(1)
y = c.submit(slowinc, x, delay=0.5)
while y.key not in s.tasks:
await asyncio.sleep(0.01)
key = x.key
del x
for i in range(5):
assert s.waiting_data[key]
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_get_future_error_simple(c, s, a, b):
f = c.submit(div, 1, 0)
await wait(f)
assert f.status == "error"
function, args, kwargs, deps = await c._get_futures_error(f)
# args contains only solid values, not keys
assert function.__name__ == "div"
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_get_futures_error(c, s, a, b):
x0 = delayed(dec)(2, dask_key_name="x0")
y0 = delayed(dec)(1, dask_key_name="y0")
x = delayed(div)(1, x0, dask_key_name="x")
y = delayed(div)(1, y0, dask_key_name="y")
tot = delayed(sum)(x, y, dask_key_name="tot")
f = c.compute(tot)
await wait(f)
assert f.status == "error"
function, args, kwargs, deps = await c._get_futures_error(f)
assert function.__name__ == "div"
assert args == (1, y0.key)
@gen_cluster(client=True)
async def test_recreate_error_delayed(c, s, a, b):
x0 = delayed(dec)(2)
y0 = delayed(dec)(1)
x = delayed(div)(1, x0)
y = delayed(div)(1, y0)
tot = delayed(sum)(x, y)
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = await c._recreate_error_locally(f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_futures(c, s, a, b):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = await c._recreate_error_locally(f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_collection(c, s, a, b):
b = db.range(10, npartitions=4)
b = b.map(lambda x: 1 / x)
b = b.persist()
f = c.compute(b)
function, args, kwargs = await c._recreate_error_locally(f)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = dd.from_pandas(pd.DataFrame({"a": [0, 1, 2, 3, 4]}), chunksize=2)
def make_err(x):
# because pandas would happily work with NaN
if x == 0:
raise ValueError
return x
df2 = df.a.map(make_err)
f = c.compute(df2)
function, args, kwargs = await c._recreate_error_locally(f)
with pytest.raises(ValueError):
function(*args, **kwargs)
# with persist
df3 = c.persist(df2)
function, args, kwargs = await c._recreate_error_locally(df3)
with pytest.raises(ValueError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_array(c, s, a, b):
da = pytest.importorskip("dask.array")
pytest.importorskip("scipy")
z = (da.linalg.inv(da.zeros((10, 10), chunks=10)) + 1).sum()
zz = z.persist()
func, args, kwargs = await c._recreate_error_locally(zz)
assert "0.,0.,0." in str(args).replace(" ", "") # args contain actual arrays
def test_recreate_error_sync(c):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
with pytest.raises(ZeroDivisionError):
c.recreate_error_locally(f)
assert f.status == "error"
def test_recreate_error_not_error(c):
f = c.submit(dec, 2)
with pytest.raises(ValueError, match="No errored futures passed"):
c.recreate_error_locally(f)
@gen_cluster(client=True)
async def test_retire_workers(c, s, a, b):
assert set(s.workers) == {a.address, b.address}
await c.retire_workers(workers=[a.address], close_workers=True)
assert set(s.workers) == {b.address}
start = time()
while a.status != "closed":
await asyncio.sleep(0.01)
assert time() < start + 5
class MyException(Exception):
pass
@gen_cluster(client=True)
async def test_robust_unserializable(c, s, a, b):
class Foo:
def __getstate__(self):
raise MyException()
with pytest.raises(MyException):
future = c.submit(identity, Foo())
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_robust_undeserializable(c, s, a, b):
class Foo:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
future = c.submit(identity, Foo())
with pytest.raises(MyException):
await future
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_robust_undeserializable_function(c, s, a, b):
class Foo:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
def __call__(self, *args):
return 1
future = c.submit(Foo(), 1)
with pytest.raises(MyException):
await future
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_fire_and_forget(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.1)
import distributed
def f(x):
distributed.foo = 123
try:
fire_and_forget(c.submit(f, future))
start = time()
while not hasattr(distributed, "foo"):
await asyncio.sleep(0.01)
assert time() < start + 2
assert distributed.foo == 123
finally:
del distributed.foo
start = time()
while len(s.tasks) > 1:
await asyncio.sleep(0.01)
assert time() < start + 2
assert set(s.who_wants) == {future.key}
assert set(s.tasks) == {future.key}
@gen_cluster(client=True)
async def test_fire_and_forget_err(c, s, a, b):
fire_and_forget(c.submit(div, 1, 0))
await asyncio.sleep(0.1)
# erred task should clear out quickly
start = time()
while s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 1
def test_quiet_client_close(loop):
with captured_logger(logging.getLogger("distributed")) as logger:
with Client(loop=loop, processes=False, threads_per_worker=4) as c:
futures = c.map(slowinc, range(1000), delay=0.01)
sleep(0.200) # stop part-way
sleep(0.1) # let things settle
out = logger.getvalue()
lines = out.strip().split("\n")
assert len(lines) <= 2
for line in lines:
assert (
not line
or "Reconnecting" in line
or "garbage" in line
or set(line) == {"-"}
), line
@pytest.mark.slow
def test_quiet_client_close_when_cluster_is_closed_before_client(loop):
with captured_logger(logging.getLogger("tornado.application")) as logger:
cluster = LocalCluster(loop=loop, n_workers=1, dashboard_address=":0")
client = Client(cluster, loop=loop)
cluster.close()
client.close()
out = logger.getvalue()
assert "CancelledError" not in out
@gen_cluster()
async def test_close(s, a, b):
c = await Client(s.address, asynchronous=True)
future = c.submit(inc, 1)
await wait(future)
assert c.id in s.wants_what
await c.close()
start = time()
while c.id in s.wants_what or s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 5
def test_threadsafe(c):
def f(_):
d = deque(maxlen=50)
for i in range(100):
future = c.submit(inc, random.randint(0, 100))
d.append(future)
sleep(0.001)
c.gather(list(d))
total = c.submit(sum, list(d))
return total.result()
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(20) as e:
results = list(e.map(f, range(20)))
assert results and all(results)
del results
@pytest.mark.slow
def test_threadsafe_get(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
total += (x + random.randint(0, 20)).sum().compute()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(30) as e:
results = list(e.map(f, range(30)))
assert results and all(results)
@pytest.mark.slow
def test_threadsafe_compute(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
future = c.compute((x + random.randint(0, 20)).sum())
total += future.result()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
e = ThreadPoolExecutor(30)
results = list(e.map(f, range(30)))
assert results and all(results)
@gen_cluster(client=True)
async def test_identity(c, s, a, b):
assert c.id.lower().startswith("client")
assert a.id.lower().startswith("worker")
assert b.id.lower().startswith("worker")
assert s.id.lower().startswith("scheduler")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 2)
async def test_get_client(c, s, a, b):
assert get_client() is c
assert c.asynchronous
def f(x):
client = get_client()
future = client.submit(inc, x)
import distributed
assert not client.asynchronous
assert client is distributed.tmp_client
return future.result()
import distributed
distributed.tmp_client = c
try:
futures = c.map(f, range(5))
results = await c.gather(futures)
assert results == list(map(inc, range(5)))
finally:
del distributed.tmp_client
def test_get_client_no_cluster():
# Clean up any global workers added by other tests. This test requires that
# there are no global workers.
Worker._instances.clear()
msg = "No global client found and no address provided"
with pytest.raises(ValueError, match=r"^{}$".format(msg)):
get_client()
@gen_cluster(client=True)
async def test_serialize_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.arange(10, chunks=(5,)).persist()
def f(x):
assert isinstance(x, da.Array)
return x.sum().compute()
future = c.submit(f, x)
result = await future
assert result == sum(range(10))
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 1, timeout=100)
async def test_secede_simple(c, s, a):
def f():
client = get_client()
secede()
return client.submit(inc, 1).result()
result = await c.submit(f)
assert result == 2
@pytest.mark.slow
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2, timeout=60)
async def test_secede_balances(c, s, a, b):
count = threading.active_count()
def f(x):
client = get_client()
sleep(0.01) # do some work
secede()
futures = client.map(slowinc, range(10), pure=False, delay=0.01)
total = client.submit(sum, futures).result()
return total
futures = c.map(f, range(100))
start = time()
while not all(f.status == "finished" for f in futures):
await asyncio.sleep(0.01)
assert threading.active_count() < count + 50
assert len(a.log) < 2 * len(b.log)
assert len(b.log) < 2 * len(a.log)
results = await c.gather(futures)
assert results == [sum(map(inc, range(10)))] * 100
@gen_cluster(client=True)
async def test_sub_submit_priority(c, s, a, b):
def f():
client = get_client()
client.submit(slowinc, 1, delay=0.2, key="slowinc")
future = c.submit(f, key="f")
await asyncio.sleep(0.1)
if len(s.tasks) == 2:
assert (
s.priorities["f"] > s.priorities["slowinc"]
) # lower values schedule first
def test_get_client_sync(c, s, a, b):
results = c.run(lambda: get_worker().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
results = c.run(lambda: get_client().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
@gen_cluster(client=True)
async def test_serialize_collections_of_futures(c, s, a, b):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = await c.scatter(ddf)
ddf2 = await future
df2 = await c.compute(ddf2)
assert_eq(df, df2)
def test_serialize_collections_of_futures_sync(c):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = c.scatter(ddf)
result = future.result()
assert_eq(result.compute(), df)
assert future.type == dd.DataFrame
assert c.submit(lambda x, y: assert_eq(x.compute(), y), future, df).result()
def _dynamic_workload(x, delay=0.01):
if delay == "random":
sleep(random.random() / 2)
else:
sleep(delay)
if x > 4:
return 4
secede()
client = get_client()
futures = client.map(
_dynamic_workload, [x + i + 1 for i in range(2)], pure=False, delay=delay
)
total = client.submit(sum, futures)
return total.result()
def _test_dynamic_workloads_sync(c, delay):
future = c.submit(_dynamic_workload, 0, delay=delay)
assert future.result(timeout=40) == 52
def test_dynamic_workloads_sync(c):
_test_dynamic_workloads_sync(c, delay=0.02)
@pytest.mark.slow
def test_dynamic_workloads_sync_random(c):
_test_dynamic_workloads_sync(c, delay="random")
@gen_cluster(client=True)
async def test_bytes_keys(c, s, a, b):
key = b"inc-123"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is bytes
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
async def test_unicode_ascii_keys(c, s, a, b):
uni_type = type("")
key = "inc-123"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
async def test_unicode_keys(c, s, a, b):
uni_type = type("")
key = "inc-123\u03bc"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
future2 = c.submit(inc, future)
result2 = await future2
assert result2 == 3
future3 = await c.scatter({"data-123": 123})
result3 = await future3["data-123"]
assert result3 == 123
def test_use_synchronous_client_in_async_context(loop, c):
async def f():
x = await c.scatter(123)
y = c.submit(inc, x)
z = await c.gather(y)
return z
z = sync(loop, f)
assert z == 124
def test_quiet_quit_when_cluster_leaves(loop_in_thread):
loop = loop_in_thread
with LocalCluster(
loop=loop, scheduler_port=0, dashboard_address=None, silence_logs=False
) as cluster:
with captured_logger("distributed.comm") as sio:
with Client(cluster, loop=loop) as client:
futures = client.map(lambda x: x + 1, range(10))
sleep(0.05)
cluster.close()
sleep(0.05)
text = sio.getvalue()
assert not text
def test_warn_executor(loop, s, a, b):
with warnings.catch_warnings(record=True) as record:
with Executor(s["address"], loop=loop) as c:
pass
assert any("Client" in str(r.message) for r in record)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_future(c, s, a, b):
x = c.submit(slowdec, 1, delay=0.5)
future = c.submit(slowinc, 1, delay=0.5)
await asyncio.sleep(0.1)
results = await asyncio.gather(
c.call_stack(future), c.call_stack(keys=[future.key])
)
assert all(list(first(result.values())) == [future.key] for result in results)
assert results[0] == results[1]
result = results[0]
w = a if future.key in a.executing else b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
assert "slowdec" not in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_all(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.8)
while not a.executing and not b.executing:
await asyncio.sleep(0.01)
result = await c.call_stack()
w = a if a.executing else b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing and not b.executing:
await asyncio.sleep(0.001)
result = await c.call_stack(x)
assert result
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_collections_all(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing and not b.executing:
await asyncio.sleep(0.001)
result = await c.call_stack()
assert result
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": 100})
async def test_profile(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
await wait(futures)
x = await c.profile(start=time() + 10, stop=time() + 20)
assert not x["count"]
x = await c.profile(start=0, stop=time())
assert (
x["count"]
== sum(p["count"] for _, p in a.profile_history) + a.profile_recent["count"]
)
y = await c.profile(start=time() - 0.300, stop=time())
assert 0 < y["count"] < x["count"]
assert not any(p["count"] for _, p in b.profile_history)
result = await c.profile(workers=b.address)
assert not result["count"]
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": 100})
async def test_profile_keys(c, s, a, b):
x = c.map(slowinc, range(10), delay=0.05, workers=a.address)
y = c.map(slowdec, range(10), delay=0.05, workers=a.address)
await wait(x + y)
xp = await c.profile("slowinc")
yp = await c.profile("slowdec")
p = await c.profile()
assert p["count"] == xp["count"] + yp["count"]
with captured_logger(logging.getLogger("distributed")) as logger:
prof = await c.profile("does-not-exist")
assert prof == profile.create()
out = logger.getvalue()
assert not out
@gen_cluster()
async def test_client_with_name(s, a, b):
with captured_logger("distributed.scheduler") as sio:
client = await Client(s.address, asynchronous=True, name="foo")
assert "foo" in client.id
await client.close()
text = sio.getvalue()
assert "foo" in text
@gen_cluster(client=True)
async def test_future_defaults_to_default_client(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
future = Future(x.key)
assert future.client is c
@gen_cluster(client=True)
async def test_future_auto_inform(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
client = await Client(s.address, asynchronous=True)
future = Future(x.key, client)
start = time()
while future.status != "finished":
await asyncio.sleep(0.01)
assert time() < start + 1
await client.close()
def test_client_async_before_loop_starts():
with pristine_loop() as loop:
client = Client(asynchronous=True, loop=loop)
assert client.asynchronous
client.close()
@pytest.mark.slow
@gen_cluster(client=True, Worker=Nanny, timeout=60, nthreads=[("127.0.0.1", 3)] * 2)
async def test_nested_compute(c, s, a, b):
def fib(x):
assert get_worker().get_current_task()
if x < 2:
return x
a = delayed(fib)(x - 1)
b = delayed(fib)(x - 2)
c = a + b
return c.compute()
future = c.submit(fib, 8)
result = await future
assert result == 21
assert len(s.transition_log) > 50
@gen_cluster(client=True)
async def test_task_metadata(c, s, a, b):
await c.set_metadata("x", 1)
result = await c.get_metadata("x")
assert result == 1
future = c.submit(inc, 1)
key = future.key
await wait(future)
await c.set_metadata(key, 123)
result = await c.get_metadata(key)
assert result == 123
del future
while key in s.tasks:
await asyncio.sleep(0.01)
with pytest.raises(KeyError):
await c.get_metadata(key)
result = await c.get_metadata(key, None)
assert result is None
await c.set_metadata(["x", "a"], 1)
result = await c.get_metadata("x")
assert result == {"a": 1}
await c.set_metadata(["x", "b"], 2)
result = await c.get_metadata("x")
assert result == {"a": 1, "b": 2}
result = await c.get_metadata(["x", "a"])
assert result == 1
await c.set_metadata(["x", "a", "c", "d"], 1)
result = await c.get_metadata("x")
assert result == {"a": {"c": {"d": 1}}, "b": 2}
@gen_cluster(client=True, Worker=Nanny)
async def test_logs(c, s, a, b):
await wait(c.map(inc, range(5)))
logs = await c.get_scheduler_logs(n=5)
assert logs
for _, msg in logs:
assert "distributed.scheduler" in msg
w_logs = await c.get_worker_logs(n=5)
assert set(w_logs.keys()) == {a.worker_address, b.worker_address}
for log in w_logs.values():
for _, msg in log:
assert "distributed.worker" in msg
n_logs = await c.get_worker_logs(nanny=True)
assert set(n_logs.keys()) == {a.worker_address, b.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
n_logs = await c.get_worker_logs(nanny=True, workers=[a.worker_address])
assert set(n_logs.keys()) == {a.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
@gen_cluster(client=True)
async def test_avoid_delayed_finalize(c, s, a, b):
x = delayed(inc)(1)
future = c.compute(x)
result = await future
assert result == 2
assert list(s.tasks) == [future.key] == [x.key]
@gen_cluster()
async def test_config_scheduler_address(s, a, b):
with dask.config.set({"scheduler-address": s.address}):
with captured_logger("distributed.client") as sio:
c = await Client(asynchronous=True)
assert c.scheduler.address == s.address
text = sio.getvalue()
assert s.address in text
await c.close()
@gen_cluster(client=True)
async def test_warn_when_submitting_large_values(c, s, a, b):
with warnings.catch_warnings(record=True) as record:
future = c.submit(lambda x: x + 1, b"0" * 2000000)
text = str(record[0].message)
assert "2.00 MB" in text
assert "large" in text
assert "..." in text
assert "'000" in text
assert "000'" in text
assert len(text) < 2000
with warnings.catch_warnings(record=True) as record:
data = b"0" * 2000000
for i in range(10):
future = c.submit(lambda x, y: x, data, i)
assert len(record) < 2
@gen_cluster()
async def test_scatter_direct(s, a, b):
c = await Client(s.address, asynchronous=True, heartbeat_interval=10)
last = s.clients[c.id].last_seen
start = time()
while s.clients[c.id].last_seen == last:
await asyncio.sleep(0.10)
assert time() < start + 5
await c.close()
@gen_cluster(client=True)
async def test_unhashable_function(c, s, a, b):
d = {"a": 1}
result = await c.submit(d.get, "a")
assert result == 1
@gen_cluster()
async def test_client_name(s, a, b):
with dask.config.set({"client-name": "hello-world"}):
c = await Client(s.address, asynchronous=True)
assert any("hello-world" in name for name in list(s.clients))
await c.close()
def test_client_doesnt_close_given_loop(loop, s, a, b):
with Client(s["address"], loop=loop) as c:
assert c.submit(inc, 1).result() == 2
with Client(s["address"], loop=loop) as c:
assert c.submit(inc, 2).result() == 3
@gen_cluster(client=True, nthreads=[])
async def test_quiet_scheduler_loss(c, s):
c._periodic_callbacks["scheduler-info"].interval = 10
with captured_logger(logging.getLogger("distributed.client")) as logger:
await s.close()
await c._update_scheduler_info()
text = logger.getvalue()
assert "BrokenPipeError" not in text
def test_dashboard_link(loop, monkeypatch):
monkeypatch.setenv("USER", "myusername")
with cluster(scheduler_kwargs={"dashboard_address": ":12355"}) as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
with dask.config.set(
{"distributed.dashboard.link": "{scheme}://foo-{USER}:{port}/status"}
):
link = "http://foo-myusername:12355/status"
assert link == c.dashboard_link
text = c._repr_html_()
assert link in text
@pytest.mark.asyncio
async def test_dashboard_link_inproc(cleanup):
async with Client(processes=False, asynchronous=True) as c:
with dask.config.set({"distributed.dashboard.link": "{host}"}):
assert "/" not in c.dashboard_link
@gen_test()
async def test_client_timeout_2():
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
start = time()
c = Client("127.0.0.1:3755", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
await c
stop = time()
assert c.status == "closed"
await c.close()
assert stop - start < 1
@gen_test()
async def test_client_active_bad_port():
import tornado.web
import tornado.httpserver
application = tornado.web.Application([(r"/", tornado.web.RequestHandler)])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8080)
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
c = Client("127.0.0.1:8080", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
await c
await c._close(fast=True)
http_server.stop()
@pytest.mark.parametrize("direct", [True, False])
def test_turn_off_pickle(direct):
@gen_cluster()
async def test(s, a, b):
import numpy as np
async with Client(
s.address, asynchronous=True, serializers=["dask", "msgpack"]
) as c:
assert (await c.submit(inc, 1)) == 2
await c.submit(np.ones, 5)
await c.scatter(1)
# Can't send complex data
with pytest.raises(TypeError):
future = await c.scatter(inc)
# can send complex tasks (this uses pickle regardless)
future = c.submit(lambda x: x, inc)
await wait(future)
# but can't receive complex results
with pytest.raises(TypeError):
await c.gather(future, direct=direct)
# Run works
result = await c.run(lambda: 1)
assert list(result.values()) == [1, 1]
result = await c.run_on_scheduler(lambda: 1)
assert result == 1
# But not with complex return values
with pytest.raises(TypeError):
await c.run(lambda: inc)
with pytest.raises(TypeError):
await c.run_on_scheduler(lambda: inc)
test()
@gen_cluster()
async def test_de_serialization(s, a, b):
import numpy as np
c = await Client(
s.address,
asynchronous=True,
serializers=["msgpack", "pickle"],
deserializers=["msgpack"],
)
try:
# Can send complex data
future = await c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = await future
finally:
await c.close()
@gen_cluster()
async def test_de_serialization_none(s, a, b):
import numpy as np
c = await Client(s.address, asynchronous=True, deserializers=["msgpack"])
try:
# Can send complex data
future = await c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = await future
finally:
await c.close()
@gen_cluster()
async def test_client_repr_closed(s, a, b):
c = await Client(s.address, asynchronous=True)
await c.close()
c._repr_html_()
def test_client_repr_closed_sync(loop):
with Client(loop=loop, processes=False, dashboard_address=None) as c:
c.close()
c._repr_html_()
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_nested_prioritization(c, s, w):
x = delayed(inc)(1, dask_key_name=("a", 2))
y = delayed(inc)(2, dask_key_name=("a", 10))
o = dask.order.order(merge(x.__dask_graph__(), y.__dask_graph__()))
fx, fy = c.compute([x, y])
await wait([fx, fy])
assert (o[x.key] < o[y.key]) == (
s.tasks[tokey(fx.key)].priority < s.tasks[tokey(fy.key)].priority
)
@gen_cluster(client=True)
async def test_scatter_error_cancel(c, s, a, b):
# https://github.com/dask/distributed/issues/2038
def bad_fn(x):
raise Exception("lol")
x = await c.scatter(1)
y = c.submit(bad_fn, x)
del x
await wait(y)
assert y.status == "error"
await asyncio.sleep(0.1)
assert y.status == "error" # not cancelled
def test_no_threads_lingering():
active = dict(threading._active)
assert threading.active_count() < 40, list(active.values())
@gen_cluster()
async def test_direct_async(s, a, b):
c = await Client(s.address, asynchronous=True, direct_to_workers=True)
assert c.direct_to_workers
await c.close()
c = await Client(s.address, asynchronous=True, direct_to_workers=False)
assert not c.direct_to_workers
await c.close()
def test_direct_sync(c):
assert not c.direct_to_workers
def f():
return get_client().direct_to_workers
assert c.submit(f).result()
@gen_cluster()
async def test_mixing_clients(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
future = c1.submit(inc, 1)
with pytest.raises(ValueError):
c2.submit(inc, future)
assert not c2.futures # Don't create Futures on second Client
await c1.close()
await c2.close()
@gen_cluster(client=True)
async def test_tuple_keys(c, s, a, b):
x = dask.delayed(inc)(1, dask_key_name=("x", 1))
y = dask.delayed(inc)(x, dask_key_name=("y", 1))
future = c.compute(y)
assert (await future) == 3
@gen_cluster(client=True)
async def test_multiple_scatter(c, s, a, b):
futures = await asyncio.gather(*[c.scatter(1, direct=True) for _ in range(5)])
x = await futures[0]
x = await futures[0]
@gen_cluster(client=True)
async def test_map_large_kwargs_in_graph(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.random.random(100000)
futures = c.map(lambda a, b: a + b, range(100), b=x)
while not s.tasks:
await asyncio.sleep(0.01)
assert len(s.tasks) == 101
assert any(k.startswith("ndarray") for k in s.tasks)
@gen_cluster(client=True)
async def test_retry(c, s, a, b):
def f():
assert dask.config.get("foo")
with dask.config.set(foo=False):
future = c.submit(f)
with pytest.raises(AssertionError):
await future
with dask.config.set(foo=True):
await future.retry()
await future
@gen_cluster(client=True)
async def test_retry_dependencies(c, s, a, b):
def f():
return dask.config.get("foo")
x = c.submit(f)
y = c.submit(inc, x)
with pytest.raises(KeyError):
await y
with dask.config.set(foo=100):
await y.retry()
result = await y
assert result == 101
await y.retry()
await x.retry()
result = await y
assert result == 101
@gen_cluster(client=True)
async def test_released_dependencies(c, s, a, b):
def f(x):
return dask.config.get("foo") + 1
x = c.submit(inc, 1, key="x")
y = c.submit(f, x, key="y")
del x
with pytest.raises(KeyError):
await y
with dask.config.set(foo=100):
await y.retry()
result = await y
assert result == 101
@gen_cluster(client=True, clean_kwargs={"threads": False})
async def test_profile_bokeh(c, s, a, b):
pytest.importorskip("bokeh.plotting")
from bokeh.model import Model
await c.gather(c.map(slowinc, range(10), delay=0.2))
state, figure = await c.profile(plot=True)
assert isinstance(figure, Model)
with tmpfile("html") as fn:
try:
await c.profile(filename=fn)
except PermissionError:
if WINDOWS:
pytest.xfail()
assert os.path.exists(fn)
@gen_cluster(client=True)
async def test_get_mix_futures_and_SubgraphCallable(c, s, a, b):
future = c.submit(add, 1, 2)
subgraph = SubgraphCallable(
{"_2": (add, "_0", "_1"), "_3": (add, future, "_2")}, "_3", ("_0", "_1")
)
dsk = {"a": 1, "b": 2, "c": (subgraph, "a", "b"), "d": (subgraph, "c", "b")}
future2 = c.get(dsk, "d", sync=False)
result = await future2
assert result == 11
# Nested subgraphs
subgraph2 = SubgraphCallable(
{
"_2": (subgraph, "_0", "_1"),
"_3": (subgraph, "_2", "_1"),
"_4": (add, "_3", future2),
},
"_4",
("_0", "_1"),
)
dsk2 = {"e": 1, "f": 2, "g": (subgraph2, "e", "f")}
result = await c.get(dsk2, "g", sync=False)
assert result == 22
@gen_cluster(client=True)
async def test_get_mix_futures_and_SubgraphCallable_dask_dataframe(c, s, a, b):
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = pd.DataFrame({"x": range(1, 11)})
ddf = dd.from_pandas(df, npartitions=2).persist()
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
result = await c.compute(ddf)
assert result.equals(df.astype("f8"))
def test_direct_to_workers(s, loop):
with Client(s["address"], loop=loop, direct_to_workers=True) as client:
future = client.scatter(1)
future.result()
resp = client.run_on_scheduler(lambda dask_scheduler: dask_scheduler.events)
assert "gather" not in str(resp)
@gen_cluster(client=True)
async def test_instances(c, s, a, b):
assert list(Client._instances) == [c]
assert list(Scheduler._instances) == [s]
assert set(Worker._instances) == {a, b}
@gen_cluster(client=True)
async def test_wait_for_workers(c, s, a, b):
future = asyncio.ensure_future(c.wait_for_workers(n_workers=3))
await asyncio.sleep(0.22) # 2 chances
assert not future.done()
w = await Worker(s.address)
start = time()
await future
assert time() < start + 1
await w.close()
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_file_descriptors_dont_leak(Worker):
pytest.importorskip("pandas")
df = dask.datasets.timeseries(freq="10s", dtypes={"x": int, "y": float})
proc = psutil.Process()
start = proc.num_fds()
async with Scheduler(port=0, dashboard_address=":0") as s:
async with Worker(s.address, nthreads=2) as a, Worker(
s.address, nthreads=2
) as b:
async with Client(s.address, asynchronous=True) as c:
await df.sum().persist()
begin = time()
while proc.num_fds() > begin:
await asyncio.sleep(0.01)
assert time() < begin + 5, (start, proc.num_fds())
@pytest.mark.asyncio
async def test_dashboard_link_cluster(cleanup):
class MyCluster(LocalCluster):
@property
def dashboard_link(self):
return "http://foo.com"
async with MyCluster(processes=False, asynchronous=True) as cluster:
async with Client(cluster, asynchronous=True) as client:
assert "http://foo.com" in client._repr_html_()
@pytest.mark.asyncio
async def test_shutdown(cleanup):
async with Scheduler(port=0) as s:
async with Worker(s.address) as w:
async with Client(s.address, asynchronous=True) as c:
await c.shutdown()
assert s.status == "closed"
assert w.status == "closed"
@pytest.mark.asyncio
async def test_shutdown_localcluster(cleanup):
async with LocalCluster(n_workers=1, asynchronous=True, processes=False) as lc:
async with Client(lc, asynchronous=True) as c:
await c.shutdown()
assert lc.scheduler.status == "closed"
@pytest.mark.asyncio
async def test_config_inherited_by_subprocess(cleanup):
def f(x):
return dask.config.get("foo") + 1
with dask.config.set(foo=100):
async with LocalCluster(n_workers=1, asynchronous=True, processes=True) as lc:
async with Client(lc, asynchronous=True) as c:
result = await c.submit(f, 1)
assert result == 101
@gen_cluster(client=True)
async def test_futures_of_sorted(c, s, a, b):
pytest.importorskip("dask.dataframe")
df = await dask.datasets.timeseries(dtypes={"x": int}).persist()
futures = futures_of(df)
for k, f in zip(df.__dask_keys__(), futures):
assert str(k) in str(f)
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "10ms"})
async def test_profile_server(c, s, a, b):
for i in range(5):
try:
x = c.map(slowinc, range(10), delay=0.01, workers=a.address, pure=False)
await wait(x)
await asyncio.gather(
c.run(slowinc, 1, delay=0.5), c.run_on_scheduler(slowdec, 1, delay=0.5)
)
p = await c.profile(server=True) # All worker servers
assert "slowinc" in str(p)
p = await c.profile(scheduler=True) # Scheduler
assert "slowdec" in str(p)
except AssertionError:
if i == 4:
raise
else:
pass
else:
break
@gen_cluster(client=True)
async def test_await_future(c, s, a, b):
future = c.submit(inc, 1)
async def f(): # flake8: noqa
result = await future
assert result == 2
await f()
future = c.submit(div, 1, 0)
async def f():
with pytest.raises(ZeroDivisionError):
await future
await f()
@gen_cluster(client=True)
async def test_as_completed_async_for(c, s, a, b):
futures = c.map(inc, range(10))
ac = as_completed(futures)
results = []
async def f():
async for future in ac:
result = await future
results.append(result)
await f()
assert set(results) == set(range(1, 11))
@gen_cluster(client=True)
async def test_as_completed_async_for_results(c, s, a, b):
futures = c.map(inc, range(10))
ac = as_completed(futures, with_results=True)
results = []
async def f():
async for future, result in ac:
results.append(result)
await f()
assert set(results) == set(range(1, 11))
assert not s.counters["op"].components[0]["gather"]
@gen_cluster(client=True)
async def test_as_completed_async_for_cancel(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(sleep, 0.3)
ac = as_completed([x, y])
async def _():
await asyncio.sleep(0.1)
await y.cancel(asynchronous=True)
c.loop.add_callback(_)
L = []
async def f():
async for future in ac:
L.append(future)
await f()
assert L == [x, y]
def test_async_with(loop):
result = None
client = None
cluster = None
async def f():
async with Client(processes=False, asynchronous=True) as c:
nonlocal result, client, cluster
result = await c.submit(lambda x: x + 1, 10)
client = c
cluster = c.cluster
loop.run_sync(f)
assert result == 11
assert client.status == "closed"
assert cluster.status == "closed"
def test_client_sync_with_async_def(loop):
async def ff():
await asyncio.sleep(0.01)
return 1
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert sync(loop, ff) == 1
assert c.sync(ff) == 1
@pytest.mark.skip(reason="known intermittent failure")
@gen_cluster(client=True)
async def test_dont_hold_on_to_large_messages(c, s, a, b):
np = pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
x = np.random.random(1000000)
xr = weakref.ref(x)
d = da.from_array(x, chunks=(100000,))
d = d.persist()
del x
start = time()
while xr() is not None:
if time() > start + 5:
# Help diagnosing
from types import FrameType
x = xr()
if x is not None:
del x
rc = sys.getrefcount(xr())
refs = gc.get_referrers(xr())
print("refs to x:", rc, refs, gc.isenabled())
frames = [r for r in refs if isinstance(r, FrameType)]
for i, f in enumerate(frames):
print(
"frames #%d:" % i,
f.f_code.co_name,
f.f_code.co_filename,
sorted(f.f_locals),
)
pytest.fail("array should have been destroyed")
await asyncio.sleep(0.200)
@gen_cluster(client=True)
async def test_run_scheduler_async_def(c, s, a, b):
async def f(dask_scheduler):
await asyncio.sleep(0.01)
dask_scheduler.foo = "bar"
await c.run_on_scheduler(f)
assert s.foo == "bar"
async def f(dask_worker):
await asyncio.sleep(0.01)
dask_worker.foo = "bar"
await c.run(f)
assert a.foo == "bar"
assert b.foo == "bar"
@gen_cluster(client=True)
async def test_run_scheduler_async_def_wait(c, s, a, b):
async def f(dask_scheduler):
await asyncio.sleep(0.01)
dask_scheduler.foo = "bar"
await c.run_on_scheduler(f, wait=False)
while not hasattr(s, "foo"):
await asyncio.sleep(0.01)
assert s.foo == "bar"
async def f(dask_worker):
await asyncio.sleep(0.01)
dask_worker.foo = "bar"
await c.run(f, wait=False)
while not hasattr(a, "foo") or not hasattr(b, "foo"):
await asyncio.sleep(0.01)
assert a.foo == "bar"
assert b.foo == "bar"
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
async def test_performance_report(c, s, a, b):
da = pytest.importorskip("dask.array")
async def f():
"""
We wrap this in a function so that the assertions aren't in the
performanace report itself
Also, we want this comment to appear
"""
x = da.random.random((1000, 1000), chunks=(100, 100))
with tmpfile(extension="html") as fn:
async with performance_report(filename=fn):
await c.compute((x + x.T).sum())
with open(fn) as f:
data = f.read()
return data
data = await f()
assert "Also, we want this comment to appear" in data
assert "bokeh" in data
assert "random" in data
assert "Dask Performance Report" in data
assert "x = da.random" in data
assert "Threads: 4" in data
@pytest.mark.asyncio
async def test_client_gather_semaphor_loop(cleanup):
async with Scheduler(port=0) as s:
async with Client(s.address, asynchronous=True) as c:
assert c._gather_semaphore._loop is c.loop.asyncio_loop
@gen_cluster(client=True)
async def test_as_completed_condition_loop(c, s, a, b):
seq = c.map(inc, range(5))
ac = as_completed(seq)
assert ac.condition._loop == c.loop.asyncio_loop
def test_client_connectionpool_semaphore_loop(s, a, b):
with Client(s["address"]) as c:
assert c.rpc.semaphore._loop is c.loop.asyncio_loop
| bsd-3-clause |
tensorflow/models | research/object_detection/utils/visualization_utils.py | 1 | 64213 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
# Set headless-friendly backend.
import matplotlib; matplotlib.use('Agg') # pylint: disable=multiple-statements
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.core import keypoint_ops
from object_detection.core import standard_fields as fields
from object_detection.utils import shape_utils
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def _get_multiplier_for_color_randomness():
"""Returns a multiplier to get semi-random colors from successive indices.
This function computes a prime number, p, in the range [2, 17] that:
- is closest to len(STANDARD_COLORS) / 10
- does not divide len(STANDARD_COLORS)
If no prime numbers in that range satisfy the constraints, p is returned as 1.
Once p is established, it can be used as a multiplier to select
non-consecutive colors from STANDARD_COLORS:
colors = [(p * i) % len(STANDARD_COLORS) for i in range(20)]
"""
num_colors = len(STANDARD_COLORS)
prime_candidates = [5, 7, 11, 13, 17]
# Remove all prime candidates that divide the number of colors.
prime_candidates = [p for p in prime_candidates if num_colors % p]
if not prime_candidates:
return 1
# Return the closest prime number to num_colors / 10.
abs_distance = [np.abs(num_colors / 10. - p) for p in prime_candidates]
num_candidates = len(abs_distance)
inds = [i for _, i in sorted(zip(abs_distance, range(num_candidates)))]
return prime_candidates[inds[0]]
def save_image_array_as_png(image, output_path):
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with tf.gfile.Open(output_path, 'w') as fid:
image_pil.save(fid, 'PNG')
def encode_image_array_as_png_str(image):
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image (numpy array).
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a numpy array with shape [height, width, 3].
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box
(each to be shown on its own line).
use_normalized_coordinates: If True (default), treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
if thickness > 0:
draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
(left, top)],
width=thickness,
fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=color)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = Image.fromarray(image)
draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
display_str_list_list)
np.copyto(image, np.array(image_pil))
def draw_bounding_boxes_on_image(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
The coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings.
a list of strings for each bounding box.
The reason to pass a list of strings for a
bounding box is that it might contain
multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list)
def create_visualization_fn(category_index,
include_masks=False,
include_keypoints=False,
include_keypoint_scores=False,
include_track_ids=False,
**kwargs):
"""Constructs a visualization function that can be wrapped in a py_func.
py_funcs only accept positional arguments. This function returns a suitable
function with the correct positional argument mapping. The positional
arguments in order are:
0: image
1: boxes
2: classes
3: scores
[4]: masks (optional)
[4-5]: keypoints (optional)
[4-6]: keypoint_scores (optional)
[4-7]: track_ids (optional)
-- Example 1 --
vis_only_masks_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=False,
**kwargs)
image = tf.py_func(vis_only_masks_fn,
inp=[image, boxes, classes, scores, masks],
Tout=tf.uint8)
-- Example 2 --
vis_masks_and_track_ids_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=True,
**kwargs)
image = tf.py_func(vis_masks_and_track_ids_fn,
inp=[image, boxes, classes, scores, masks, track_ids],
Tout=tf.uint8)
Args:
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
include_masks: Whether masks should be expected as a positional argument in
the returned function.
include_keypoints: Whether keypoints should be expected as a positional
argument in the returned function.
include_keypoint_scores: Whether keypoint scores should be expected as a
positional argument in the returned function.
include_track_ids: Whether track ids should be expected as a positional
argument in the returned function.
**kwargs: Additional kwargs that will be passed to
visualize_boxes_and_labels_on_image_array.
Returns:
Returns a function that only takes tensors as positional arguments.
"""
def visualization_py_func_fn(*args):
"""Visualization function that can be wrapped in a tf.py_func.
Args:
*args: First 4 positional arguments must be:
image - uint8 numpy array with shape (img_height, img_width, 3).
boxes - a numpy array of shape [N, 4].
classes - a numpy array of shape [N].
scores - a numpy array of shape [N] or None.
-- Optional positional arguments --
instance_masks - a numpy array of shape [N, image_height, image_width].
keypoints - a numpy array of shape [N, num_keypoints, 2].
keypoint_scores - a numpy array of shape [N, num_keypoints].
track_ids - a numpy array of shape [N] with unique track ids.
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid
boxes.
"""
image = args[0]
boxes = args[1]
classes = args[2]
scores = args[3]
masks = keypoints = keypoint_scores = track_ids = None
pos_arg_ptr = 4 # Positional argument for first optional tensor (masks).
if include_masks:
masks = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_keypoints:
keypoints = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_keypoint_scores:
keypoint_scores = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_track_ids:
track_ids = args[pos_arg_ptr]
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
keypoints=keypoints,
keypoint_scores=keypoint_scores,
track_ids=track_ids,
**kwargs)
return visualization_py_func_fn
def draw_heatmaps_on_image(image, heatmaps):
"""Draws heatmaps on an image.
The heatmaps are handled channel by channel and different colors are used to
paint different heatmap channels.
Args:
image: a PIL.Image object.
heatmaps: a numpy array with shape [image_height, image_width, channel].
Note that the image_height and image_width should match the size of input
image.
"""
draw = ImageDraw.Draw(image)
channel = heatmaps.shape[2]
for c in range(channel):
heatmap = heatmaps[:, :, c] * 255
heatmap = heatmap.astype('uint8')
bitmap = Image.fromarray(heatmap, 'L')
bitmap.convert('1')
draw.bitmap(
xy=[(0, 0)],
bitmap=bitmap,
fill=STANDARD_COLORS[c])
def draw_heatmaps_on_image_array(image, heatmaps):
"""Overlays heatmaps to an image (numpy array).
The function overlays the heatmaps on top of image. The heatmap values will be
painted with different colors depending on the channels. Similar to
"draw_heatmaps_on_image_array" function except the inputs are numpy arrays.
Args:
image: a numpy array with shape [height, width, 3].
heatmaps: a numpy array with shape [height, width, channel].
Returns:
An uint8 numpy array representing the input image painted with heatmap
colors.
"""
if not isinstance(image, np.ndarray):
image = image.numpy()
if not isinstance(heatmaps, np.ndarray):
heatmaps = heatmaps.numpy()
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_heatmaps_on_image(image_pil, heatmaps)
return np.array(image_pil)
def draw_heatmaps_on_image_tensors(images,
heatmaps,
apply_sigmoid=False):
"""Draws heatmaps on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
heatmaps: [N, h, w, channel] float32 tensor of heatmaps. Note that the
heatmaps will be resized to match the input image size before overlaying
the heatmaps with input images. Theoretically the heatmap height width
should have the same aspect ratio as the input image to avoid potential
misalignment introduced by the image resize.
apply_sigmoid: Whether to apply a sigmoid layer on top of the heatmaps. If
the heatmaps come directly from the prediction logits, then we should
apply the sigmoid layer to make sure the values are in between [0.0, 1.0].
Returns:
4D image tensor of type uint8, with heatmaps overlaid on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
_, height, width, _ = shape_utils.combined_static_and_dynamic_shape(images)
if apply_sigmoid:
heatmaps = tf.math.sigmoid(heatmaps)
resized_heatmaps = tf.image.resize(heatmaps, size=[height, width])
elems = [images, resized_heatmaps]
def draw_heatmaps(image_and_heatmaps):
"""Draws heatmaps on image."""
image_with_heatmaps = tf.py_function(
draw_heatmaps_on_image_array,
image_and_heatmaps,
tf.uint8)
return image_with_heatmaps
images = tf.map_fn(draw_heatmaps, elems, dtype=tf.uint8, back_prop=False)
return images
def _resize_original_image(image, image_shape):
image = tf.expand_dims(image, 0)
image = tf.image.resize_images(
image,
image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(image, 0), tf.uint8)
def draw_bounding_boxes_on_image_tensors(images,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=None,
true_image_shape=None,
instance_masks=None,
keypoints=None,
keypoint_scores=None,
keypoint_edges=None,
track_ids=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Draws bounding boxes, masks, and keypoints on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
boxes: [N, max_detections, 4] float32 tensor of detection boxes.
classes: [N, max_detections] int tensor of detection classes. Note that
classes are 1-indexed.
scores: [N, max_detections] float32 tensor of detection scores.
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
original_image_spatial_shape: [N, 2] tensor containing the spatial size of
the original image.
true_image_shape: [N, 3] tensor containing the spatial size of unpadded
original_image.
instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with
instance masks.
keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]
with keypoints.
keypoint_scores: A 3D float32 tensor of shape [N, max_detection,
num_keypoints] with keypoint scores.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
track_ids: [N, max_detections] int32 tensor of unique tracks ids (i.e.
instance ids for each object). If provided, the color-coding of boxes is
dictated by these ids, and not classes.
max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.
min_score_thresh: Minimum score threshold for visualization. Default 0.2.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes).
Default is True.
Returns:
4D image tensor of type uint8, with boxes drawn on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
visualization_keyword_args = {
'use_normalized_coordinates': use_normalized_coordinates,
'max_boxes_to_draw': max_boxes_to_draw,
'min_score_thresh': min_score_thresh,
'agnostic_mode': False,
'line_thickness': 4,
'keypoint_edges': keypoint_edges
}
if true_image_shape is None:
true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3])
else:
true_shapes = true_image_shape
if original_image_spatial_shape is None:
original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2])
else:
original_shapes = original_image_spatial_shape
visualize_boxes_fn = create_visualization_fn(
category_index,
include_masks=instance_masks is not None,
include_keypoints=keypoints is not None,
include_keypoint_scores=keypoint_scores is not None,
include_track_ids=track_ids is not None,
**visualization_keyword_args)
elems = [true_shapes, original_shapes, images, boxes, classes, scores]
if instance_masks is not None:
elems.append(instance_masks)
if keypoints is not None:
elems.append(keypoints)
if keypoint_scores is not None:
elems.append(keypoint_scores)
if track_ids is not None:
elems.append(track_ids)
def draw_boxes(image_and_detections):
"""Draws boxes on image."""
true_shape = image_and_detections[0]
original_shape = image_and_detections[1]
if true_image_shape is not None:
image = shape_utils.pad_or_clip_nd(image_and_detections[2],
[true_shape[0], true_shape[1], 3])
if original_image_spatial_shape is not None:
image_and_detections[2] = _resize_original_image(image, original_shape)
image_with_boxes = tf.py_func(visualize_boxes_fn, image_and_detections[2:],
tf.uint8)
return image_with_boxes
images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False)
return images
def draw_side_by_side_evaluation_image(eval_dict,
category_index,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
keypoint_edges=None):
"""Creates a side-by-side image with detections and groundtruth.
Bounding boxes (and instance masks, if available) are visualized on both
subimages.
Args:
eval_dict: The evaluation dictionary returned by
eval_util.result_dict_for_batched_example() or
eval_util.result_dict_for_single_example().
category_index: A category index (dictionary) produced from a labelmap.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and keypoints are in
normalized coordinates (as opposed to absolute coordinates).
Default is True.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
Returns:
A list of [1, H, 2 * W, C] uint8 tensor. The subimage on the left
corresponds to detections, while the subimage on the right corresponds to
groundtruth.
"""
detection_fields = fields.DetectionResultFields()
input_data_fields = fields.InputDataFields()
images_with_detections_list = []
# Add the batch dimension if the eval_dict is for single example.
if len(eval_dict[detection_fields.detection_classes].shape) == 1:
for key in eval_dict:
if (key != input_data_fields.original_image and
key != input_data_fields.image_additional_channels):
eval_dict[key] = tf.expand_dims(eval_dict[key], 0)
num_gt_boxes = [-1] * eval_dict[input_data_fields.original_image].shape[0]
if input_data_fields.num_groundtruth_boxes in eval_dict:
num_gt_boxes = tf.cast(eval_dict[input_data_fields.num_groundtruth_boxes],
tf.int32)
for indx in range(eval_dict[input_data_fields.original_image].shape[0]):
instance_masks = None
if detection_fields.detection_masks in eval_dict:
instance_masks = tf.cast(
tf.expand_dims(
eval_dict[detection_fields.detection_masks][indx], axis=0),
tf.uint8)
keypoints = None
keypoint_scores = None
if detection_fields.detection_keypoints in eval_dict:
keypoints = tf.expand_dims(
eval_dict[detection_fields.detection_keypoints][indx], axis=0)
if detection_fields.detection_keypoint_scores in eval_dict:
keypoint_scores = tf.expand_dims(
eval_dict[detection_fields.detection_keypoint_scores][indx], axis=0)
else:
keypoint_scores = tf.expand_dims(tf.cast(
keypoint_ops.set_keypoint_visibilities(
eval_dict[detection_fields.detection_keypoints][indx]),
dtype=tf.float32), axis=0)
groundtruth_instance_masks = None
if input_data_fields.groundtruth_instance_masks in eval_dict:
groundtruth_instance_masks = tf.cast(
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_instance_masks][indx],
axis=0), tf.uint8)
groundtruth_keypoints = None
groundtruth_keypoint_scores = None
gt_kpt_vis_fld = input_data_fields.groundtruth_keypoint_visibilities
if input_data_fields.groundtruth_keypoints in eval_dict:
groundtruth_keypoints = tf.expand_dims(
eval_dict[input_data_fields.groundtruth_keypoints][indx], axis=0)
if gt_kpt_vis_fld in eval_dict:
groundtruth_keypoint_scores = tf.expand_dims(
tf.cast(eval_dict[gt_kpt_vis_fld][indx], dtype=tf.float32), axis=0)
else:
groundtruth_keypoint_scores = tf.expand_dims(tf.cast(
keypoint_ops.set_keypoint_visibilities(
eval_dict[input_data_fields.groundtruth_keypoints][indx]),
dtype=tf.float32), axis=0)
images_with_detections = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_classes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_scores][indx], axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=instance_masks,
keypoints=keypoints,
keypoint_scores=keypoint_scores,
keypoint_edges=keypoint_edges,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates)
num_gt_boxes_i = num_gt_boxes[indx]
images_with_groundtruth = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx]
[:num_gt_boxes_i],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx]
[:num_gt_boxes_i],
axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx]
[:num_gt_boxes_i],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=groundtruth_keypoints,
keypoint_scores=groundtruth_keypoint_scores,
keypoint_edges=keypoint_edges,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates)
images_to_visualize = tf.concat([images_with_detections,
images_with_groundtruth], axis=2)
if input_data_fields.image_additional_channels in eval_dict:
images_with_additional_channels_groundtruth = (
draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.image_additional_channels][indx],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx]
[:num_gt_boxes_i],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx]
[:num_gt_boxes_i],
axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx]
[num_gt_boxes_i],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape]
[indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=None,
keypoint_edges=None,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates))
images_to_visualize = tf.concat(
[images_to_visualize, images_with_additional_channels_groundtruth],
axis=2)
images_with_detections_list.append(images_to_visualize)
return images_with_detections_list
def draw_densepose_visualizations(eval_dict,
max_boxes_to_draw=20,
min_score_thresh=0.2,
num_parts=24,
dp_coord_to_visualize=0):
"""Draws DensePose visualizations.
Args:
eval_dict: The evaluation dictionary returned by
eval_util.result_dict_for_batched_example().
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
num_parts: The number of different densepose parts.
dp_coord_to_visualize: Whether to visualize v-coordinates (0) or
u-coordinates (0) overlaid on the person masks.
Returns:
A list of [1, H, W, C] uint8 tensor, each element corresponding to an image
in the batch.
Raises:
ValueError: If `dp_coord_to_visualize` is not 0 or 1.
"""
if dp_coord_to_visualize not in (0, 1):
raise ValueError('`dp_coord_to_visualize` must be either 0 for v '
'coordinates), or 1 for u coordinates, but instead got '
'{}'.format(dp_coord_to_visualize))
detection_fields = fields.DetectionResultFields()
input_data_fields = fields.InputDataFields()
if detection_fields.detection_masks not in eval_dict:
raise ValueError('Expected `detection_masks` in `eval_dict`.')
if detection_fields.detection_surface_coords not in eval_dict:
raise ValueError('Expected `detection_surface_coords` in `eval_dict`.')
images_with_detections_list = []
for indx in range(eval_dict[input_data_fields.original_image].shape[0]):
# Note that detection masks have already been resized to the original image
# shapes, but `original_image` has not.
# TODO(ronnyvotel): Consider resizing `original_image` in
# eval_util.result_dict_for_batched_example().
true_shape = eval_dict[input_data_fields.true_image_shape][indx]
original_shape = eval_dict[
input_data_fields.original_image_spatial_shape][indx]
image = eval_dict[input_data_fields.original_image][indx]
image = shape_utils.pad_or_clip_nd(image, [true_shape[0], true_shape[1], 3])
image = _resize_original_image(image, original_shape)
scores = eval_dict[detection_fields.detection_scores][indx]
detection_masks = eval_dict[detection_fields.detection_masks][indx]
surface_coords = eval_dict[detection_fields.detection_surface_coords][indx]
def draw_densepose_py_func(image, detection_masks, surface_coords, scores):
"""Overlays part masks and surface coords on original images."""
surface_coord_image = np.copy(image)
for i, (score, surface_coord, mask) in enumerate(
zip(scores, surface_coords, detection_masks)):
if i == max_boxes_to_draw:
break
if score > min_score_thresh:
draw_part_mask_on_image_array(image, mask, num_parts=num_parts)
draw_float_channel_on_image_array(
surface_coord_image, surface_coord[:, :, dp_coord_to_visualize],
mask)
return np.concatenate([image, surface_coord_image], axis=1)
image_with_densepose = tf.py_func(
draw_densepose_py_func,
[image, detection_masks, surface_coords, scores],
tf.uint8)
images_with_detections_list.append(
image_with_densepose[tf.newaxis, :, :, :])
return images_with_detections_list
def draw_keypoints_on_image_array(image,
keypoints,
keypoint_scores=None,
min_score_thresh=0.5,
color='red',
radius=2,
use_normalized_coordinates=True,
keypoint_edges=None,
keypoint_edge_color='green',
keypoint_edge_width=2):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
keypoint_scores: a numpy array with shape [num_keypoints]. If provided, only
those keypoints with a score above score_threshold will be visualized.
min_score_thresh: A scalar indicating the minimum keypoint score required
for a keypoint to be visualized. Note that keypoint_scores must be
provided for this threshold to take effect.
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
keypoint_edge_color: color to draw the keypoint edges with. Default is red.
keypoint_edge_width: width of the edges drawn between keypoints. Default
value is 2.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil,
keypoints,
keypoint_scores=keypoint_scores,
min_score_thresh=min_score_thresh,
color=color,
radius=radius,
use_normalized_coordinates=use_normalized_coordinates,
keypoint_edges=keypoint_edges,
keypoint_edge_color=keypoint_edge_color,
keypoint_edge_width=keypoint_edge_width)
np.copyto(image, np.array(image_pil))
def draw_keypoints_on_image(image,
keypoints,
keypoint_scores=None,
min_score_thresh=0.5,
color='red',
radius=2,
use_normalized_coordinates=True,
keypoint_edges=None,
keypoint_edge_color='green',
keypoint_edge_width=2):
"""Draws keypoints on an image.
Args:
image: a PIL.Image object.
keypoints: a numpy array with shape [num_keypoints, 2].
keypoint_scores: a numpy array with shape [num_keypoints].
min_score_thresh: a score threshold for visualizing keypoints. Only used if
keypoint_scores is provided.
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
keypoint_edge_color: color to draw the keypoint edges with. Default is red.
keypoint_edge_width: width of the edges drawn between keypoints. Default
value is 2.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints = np.array(keypoints)
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = tuple([im_width * x for x in keypoints_x])
keypoints_y = tuple([im_height * y for y in keypoints_y])
if keypoint_scores is not None:
keypoint_scores = np.array(keypoint_scores)
valid_kpt = np.greater(keypoint_scores, min_score_thresh)
else:
valid_kpt = np.where(np.any(np.isnan(keypoints), axis=1),
np.zeros_like(keypoints[:, 0]),
np.ones_like(keypoints[:, 0]))
valid_kpt = [v for v in valid_kpt]
for keypoint_x, keypoint_y, valid in zip(keypoints_x, keypoints_y, valid_kpt):
if valid:
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color, fill=color)
if keypoint_edges is not None:
for keypoint_start, keypoint_end in keypoint_edges:
if (keypoint_start < 0 or keypoint_start >= len(keypoints) or
keypoint_end < 0 or keypoint_end >= len(keypoints)):
continue
if not (valid_kpt[keypoint_start] and valid_kpt[keypoint_end]):
continue
edge_coordinates = [
keypoints_x[keypoint_start], keypoints_y[keypoint_start],
keypoints_x[keypoint_end], keypoints_y[keypoint_end]
]
draw.line(
edge_coordinates, fill=keypoint_edge_color, width=keypoint_edge_width)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
"""Draws mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with
values between either 0 or 1.
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.4)
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0*alpha*(mask > 0))).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def draw_part_mask_on_image_array(image, mask, alpha=0.4, num_parts=24):
"""Draws part mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with
1-indexed parts (0 for background).
alpha: transparency value between 0 and 1 (default: 0.4)
num_parts: the maximum number of parts that may exist in the image (default
24 for DensePose).
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
pil_image = Image.fromarray(image)
part_colors = np.zeros_like(image)
mask_1_channel = mask[:, :, np.newaxis]
for i, color in enumerate(STANDARD_COLORS[:num_parts]):
rgb = np.array(ImageColor.getrgb(color), dtype=np.uint8)
part_colors += (mask_1_channel == i + 1) * rgb[np.newaxis, np.newaxis, :]
pil_part_colors = Image.fromarray(np.uint8(part_colors)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0 * alpha * (mask > 0))).convert('L')
pil_image = Image.composite(pil_part_colors, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def draw_float_channel_on_image_array(image, channel, mask, alpha=0.9,
cmap='YlGn'):
"""Draws a floating point channel on an image array.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
channel: float32 numpy array with shape (img_height, img_height). The values
should be in the range [0, 1], and will be mapped to colors using the
provided colormap `cmap` argument.
mask: a uint8 numpy array of shape (img_height, img_height) with
1-indexed parts (0 for background).
alpha: transparency value between 0 and 1 (default: 0.9)
cmap: string with the colormap to use.
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if channel.dtype != np.float32:
raise ValueError('`channel` not of type np.float32')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if image.shape[:2] != channel.shape:
raise ValueError('The image has spatial dimensions %s but the channel has '
'dimensions %s' % (image.shape[:2], channel.shape))
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
cm = plt.get_cmap(cmap)
pil_image = Image.fromarray(image)
colored_channel = cm(channel)[:, :, :3]
pil_colored_channel = Image.fromarray(
np.uint8(colored_channel * 255)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0 * alpha * (mask > 0))).convert('L')
pil_image = Image.composite(pil_colored_channel, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
keypoint_scores=None,
keypoint_edges=None,
track_ids=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
mask_alpha=.4,
groundtruth_box_visualization_color='black',
skip_boxes=False,
skip_scores=False,
skip_labels=False,
skip_track_ids=False):
"""Overlay labeled boxes on an image with formatted scores and label names.
This function groups boxes that correspond to the same location
and creates a display string for each detection and overlays these
on the image. Note that this function modifies the image in place, and returns
that same image.
Args:
image: uint8 numpy array with shape (img_height, img_width, 3)
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]. Note that class indices are 1-based,
and match the keys in the label map.
scores: a numpy array of shape [N] or None. If scores=None, then
this function assumes that the boxes to be plotted are groundtruth
boxes and plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a uint8 numpy array of shape [N, image_height, image_width],
can be None.
instance_boundaries: a numpy array of shape [N, image_height, image_width]
with values ranging between 0 and 1, can be None.
keypoints: a numpy array of shape [N, num_keypoints, 2], can
be None.
keypoint_scores: a numpy array of shape [N, num_keypoints], can be None.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
track_ids: a numpy array of shape [N] with unique track ids. If provided,
color-coding of boxes will be determined by these ids, and not the class
indices.
use_normalized_coordinates: whether boxes is to be interpreted as
normalized coordinates or not.
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw
all boxes.
min_score_thresh: minimum score threshold for a box or keypoint to be
visualized.
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
line_thickness: integer (default: 4) controlling line width of the boxes.
mask_alpha: transparency value between 0 and 1 (default: 0.4).
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
skip_boxes: whether to skip the drawing of bounding boxes.
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
skip_track_ids: whether to skip track id when drawing a single detection
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
box_to_keypoint_scores_map = collections.defaultdict(list)
box_to_track_ids_map = {}
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(boxes.shape[0]):
if max_boxes_to_draw == len(box_to_color_map):
break
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if keypoint_scores is not None:
box_to_keypoint_scores_map[box].extend(keypoint_scores[i])
if track_ids is not None:
box_to_track_ids_map[box] = track_ids[i]
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in six.viewkeys(category_index):
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(round(100*scores[i]))
else:
display_str = '{}: {}%'.format(display_str, round(100*scores[i]))
if not skip_track_ids and track_ids is not None:
if not display_str:
display_str = 'ID {}'.format(track_ids[i])
else:
display_str = '{}: ID {}'.format(display_str, track_ids[i])
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
elif track_ids is not None:
prime_multipler = _get_multiplier_for_color_randomness()
box_to_color_map[box] = STANDARD_COLORS[
(prime_multipler * track_ids[i]) % len(STANDARD_COLORS)]
else:
box_to_color_map[box] = STANDARD_COLORS[
classes[i] % len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image,
box_to_instance_masks_map[box],
color=color,
alpha=mask_alpha
)
if instance_boundaries is not None:
draw_mask_on_image_array(
image,
box_to_instance_boundaries_map[box],
color='red',
alpha=1.0
)
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=0 if skip_boxes else line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
keypoint_scores_for_box = None
if box_to_keypoint_scores_map:
keypoint_scores_for_box = box_to_keypoint_scores_map[box]
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
keypoint_scores_for_box,
min_score_thresh=min_score_thresh,
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates,
keypoint_edges=keypoint_edges,
keypoint_edge_color=color,
keypoint_edge_width=line_thickness // 2)
return image
def add_cdf_image_summary(values, name):
"""Adds a tf.summary.image for a CDF plot of the values.
Normalizes `values` such that they sum to 1, plots the cumulative distribution
function and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
name: name for the image summary.
"""
def cdf_plot(values):
"""Numpy function to plot CDF."""
normalized_values = values / np.sum(values)
sorted_values = np.sort(normalized_values)
cumulative_values = np.cumsum(sorted_values)
fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32)
/ cumulative_values.size)
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
ax.plot(fraction_of_examples, cumulative_values)
ax.set_ylabel('cumulative normalized values')
ax.set_xlabel('fraction of examples')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
cdf_plot = tf.py_func(cdf_plot, [values], tf.uint8)
tf.summary.image(name, cdf_plot)
def add_hist_image_summary(values, bins, name):
"""Adds a tf.summary.image for a histogram plot of the values.
Plots the histogram of values and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
bins: bin edges which will be directly passed to np.histogram.
name: name for the image summary.
"""
def hist_plot(values, bins):
"""Numpy function to plot hist."""
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
y, x = np.histogram(values, bins=bins)
ax.plot(x[:-1], y)
ax.set_ylabel('count')
ax.set_xlabel('value')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.fromstring(
fig.canvas.tostring_rgb(), dtype='uint8').reshape(
1, int(height), int(width), 3)
return image
hist_plot = tf.py_func(hist_plot, [values, bins], tf.uint8)
tf.summary.image(name, hist_plot)
class EvalMetricOpsVisualization(six.with_metaclass(abc.ABCMeta, object)):
"""Abstract base class responsible for visualizations during evaluation.
Currently, summary images are not run during evaluation. One way to produce
evaluation images in Tensorboard is to provide tf.summary.image strings as
`value_ops` in tf.estimator.EstimatorSpec's `eval_metric_ops`. This class is
responsible for accruing images (with overlaid detections and groundtruth)
and returning a dictionary that can be passed to `eval_metric_ops`.
"""
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='evaluation_image',
keypoint_edges=None):
"""Creates an EvalMetricOpsVisualization.
Args:
category_index: A category index (dictionary) produced from a labelmap.
max_examples_to_draw: The maximum number of example summaries to produce.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and keypoints are in
normalized coordinates (as opposed to absolute coordinates).
Default is True.
summary_name_prefix: A string prefix for each image summary.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
"""
self._category_index = category_index
self._max_examples_to_draw = max_examples_to_draw
self._max_boxes_to_draw = max_boxes_to_draw
self._min_score_thresh = min_score_thresh
self._use_normalized_coordinates = use_normalized_coordinates
self._summary_name_prefix = summary_name_prefix
self._keypoint_edges = keypoint_edges
self._images = []
def clear(self):
self._images = []
def add_images(self, images):
"""Store a list of images, each with shape [1, H, W, C]."""
if len(self._images) >= self._max_examples_to_draw:
return
# Store images and clip list if necessary.
self._images.extend(images)
if len(self._images) > self._max_examples_to_draw:
self._images[self._max_examples_to_draw:] = []
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns metric ops for use in tf.estimator.EstimatorSpec.
Args:
eval_dict: A dictionary that holds an image, groundtruth, and detections
for a batched example. Note that, we use only the first example for
visualization. See eval_util.result_dict_for_batched_example() for a
convenient method for constructing such a dictionary. The dictionary
contains
fields.InputDataFields.original_image: [batch_size, H, W, 3] image.
fields.InputDataFields.original_image_spatial_shape: [batch_size, 2]
tensor containing the size of the original image.
fields.InputDataFields.true_image_shape: [batch_size, 3]
tensor containing the spatial size of the upadded original image.
fields.InputDataFields.groundtruth_boxes - [batch_size, num_boxes, 4]
float32 tensor with groundtruth boxes in range [0.0, 1.0].
fields.InputDataFields.groundtruth_classes - [batch_size, num_boxes]
int64 tensor with 1-indexed groundtruth classes.
fields.InputDataFields.groundtruth_instance_masks - (optional)
[batch_size, num_boxes, H, W] int64 tensor with instance masks.
fields.InputDataFields.groundtruth_keypoints - (optional)
[batch_size, num_boxes, num_keypoints, 2] float32 tensor with
keypoint coordinates in format [y, x].
fields.InputDataFields.groundtruth_keypoint_visibilities - (optional)
[batch_size, num_boxes, num_keypoints] bool tensor with
keypoint visibilities.
fields.DetectionResultFields.detection_boxes - [batch_size,
max_num_boxes, 4] float32 tensor with detection boxes in range [0.0,
1.0].
fields.DetectionResultFields.detection_classes - [batch_size,
max_num_boxes] int64 tensor with 1-indexed detection classes.
fields.DetectionResultFields.detection_scores - [batch_size,
max_num_boxes] float32 tensor with detection scores.
fields.DetectionResultFields.detection_masks - (optional) [batch_size,
max_num_boxes, H, W] float32 tensor of binarized masks.
fields.DetectionResultFields.detection_keypoints - (optional)
[batch_size, max_num_boxes, num_keypoints, 2] float32 tensor with
keypoints.
fields.DetectionResultFields.detection_keypoint_scores - (optional)
[batch_size, max_num_boxes, num_keypoints] float32 tensor with
keypoints scores.
Returns:
A dictionary of image summary names to tuple of (value_op, update_op). The
`update_op` is the same for all items in the dictionary, and is
responsible for saving a single side-by-side image with detections and
groundtruth. Each `value_op` holds the tf.summary.image string for a given
image.
"""
if self._max_examples_to_draw == 0:
return {}
images = self.images_from_evaluation_dict(eval_dict)
def get_images():
"""Returns a list of images, padded to self._max_images_to_draw."""
images = self._images
while len(images) < self._max_examples_to_draw:
images.append(np.array(0, dtype=np.uint8))
self.clear()
return images
def image_summary_or_default_string(summary_name, image):
"""Returns image summaries for non-padded elements."""
return tf.cond(
tf.equal(tf.size(tf.shape(image)), 4),
lambda: tf.summary.image(summary_name, image),
lambda: tf.constant(''))
if tf.executing_eagerly():
update_op = self.add_images([[images[0]]])
image_tensors = get_images()
else:
update_op = tf.py_func(self.add_images, [[images[0]]], [])
image_tensors = tf.py_func(
get_images, [], [tf.uint8] * self._max_examples_to_draw)
eval_metric_ops = {}
for i, image in enumerate(image_tensors):
summary_name = self._summary_name_prefix + '/' + str(i)
value_op = image_summary_or_default_string(summary_name, image)
eval_metric_ops[summary_name] = (value_op, update_op)
return eval_metric_ops
@abc.abstractmethod
def images_from_evaluation_dict(self, eval_dict):
"""Converts evaluation dictionary into a list of image tensors.
To be overridden by implementations.
Args:
eval_dict: A dictionary with all the necessary information for producing
visualizations.
Returns:
A list of [1, H, W, C] uint8 tensors.
"""
raise NotImplementedError
class VisualizeSingleFrameDetections(EvalMetricOpsVisualization):
"""Class responsible for single-frame object detection visualizations."""
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='Detections_Left_Groundtruth_Right',
keypoint_edges=None):
super(VisualizeSingleFrameDetections, self).__init__(
category_index=category_index,
max_examples_to_draw=max_examples_to_draw,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates,
summary_name_prefix=summary_name_prefix,
keypoint_edges=keypoint_edges)
def images_from_evaluation_dict(self, eval_dict):
return draw_side_by_side_evaluation_image(eval_dict, self._category_index,
self._max_boxes_to_draw,
self._min_score_thresh,
self._use_normalized_coordinates,
self._keypoint_edges)
| apache-2.0 |
sarahgrogan/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
smharper/openmc | tests/regression_tests/tally_slice_merge/test.py | 8 | 6593 | import hashlib
import itertools
import openmc
from tests.testing_harness import PyAPITestHarness
class TallySliceMergeTestHarness(PyAPITestHarness):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Define nuclides and scores to add to both tallies
self.nuclides = ['U235', 'U238']
self.scores = ['fission', 'nu-fission']
# Define filters for energy and spatial domain
low_energy = openmc.EnergyFilter([0., 0.625])
high_energy = openmc.EnergyFilter([0.625, 20.e6])
merged_energies = low_energy.merge(high_energy)
cell_21 = openmc.CellFilter(21)
cell_27 = openmc.CellFilter(27)
distribcell_filter = openmc.DistribcellFilter(21)
mesh = openmc.RegularMesh(name='mesh')
mesh.dimension = [2, 2]
mesh.lower_left = [-50., -50.]
mesh.upper_right = [+50., +50.]
mesh_filter = openmc.MeshFilter(mesh)
self.cell_filters = [cell_21, cell_27]
self.energy_filters = [low_energy, high_energy]
# Initialize cell tallies with filters, nuclides and scores
tallies = []
for energy_filter in self.energy_filters:
for cell_filter in self.cell_filters:
for nuclide in self.nuclides:
for score in self.scores:
tally = openmc.Tally()
tally.estimator = 'tracklength'
tally.scores.append(score)
tally.nuclides.append(nuclide)
tally.filters.append(cell_filter)
tally.filters.append(energy_filter)
tallies.append(tally)
# Merge all cell tallies together
while len(tallies) != 1:
halfway = len(tallies) // 2
zip_split = zip(tallies[:halfway], tallies[halfway:])
tallies = list(map(lambda xy: xy[0].merge(xy[1]), zip_split))
# Specify a name for the tally
tallies[0].name = 'cell tally'
# Initialize a distribcell tally
distribcell_tally = openmc.Tally(name='distribcell tally')
distribcell_tally.estimator = 'tracklength'
distribcell_tally.filters = [distribcell_filter, merged_energies]
for score in self.scores:
distribcell_tally.scores.append(score)
for nuclide in self.nuclides:
distribcell_tally.nuclides.append(nuclide)
mesh_tally = openmc.Tally(name='mesh tally')
mesh_tally.estimator = 'tracklength'
mesh_tally.filters = [mesh_filter, merged_energies]
mesh_tally.scores = self.scores
mesh_tally.nuclides = self.nuclides
# Add tallies to a Tallies object
self._model.tallies = [tallies[0], distribcell_tally, mesh_tally]
def _get_results(self, hash_output=False):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
sp = openmc.StatePoint(self._sp_name)
# Extract the cell tally
tallies = [sp.get_tally(name='cell tally')]
# Slice the tallies by cell filter bins
cell_filter_prod = itertools.product(tallies, self.cell_filters)
tallies = map(lambda tf: tf[0].get_slice(filters=[type(tf[1])],
filter_bins=[(tf[1].bins[0],)]),
cell_filter_prod)
# Slice the tallies by energy filter bins
energy_filter_prod = itertools.product(tallies, self.energy_filters)
tallies = map(lambda tf: tf[0].get_slice(filters=[type(tf[1])],
filter_bins=[(tf[1].bins[0],)]),
energy_filter_prod)
# Slice the tallies by nuclide
nuclide_prod = itertools.product(tallies, self.nuclides)
tallies = map(lambda tn: tn[0].get_slice(nuclides=[tn[1]]), nuclide_prod)
# Slice the tallies by score
score_prod = itertools.product(tallies, self.scores)
tallies = map(lambda ts: ts[0].get_slice(scores=[ts[1]]), score_prod)
tallies = list(tallies)
# Initialize an output string
outstr = ''
# Append sliced Tally Pandas DataFrames to output string
for tally in tallies:
df = tally.get_pandas_dataframe()
outstr += df.to_string()
# Merge all tallies together
while len(tallies) != 1:
halfway = int(len(tallies) / 2)
zip_split = zip(tallies[:halfway], tallies[halfway:])
tallies = list(map(lambda xy: xy[0].merge(xy[1]), zip_split))
# Append merged Tally Pandas DataFrame to output string
df = tallies[0].get_pandas_dataframe()
outstr += df.to_string() + '\n'
# Extract the distribcell tally
distribcell_tally = sp.get_tally(name='distribcell tally')
# Sum up a few subdomains from the distribcell tally
sum1 = distribcell_tally.summation(filter_type=openmc.DistribcellFilter,
filter_bins=[0, 100, 2000, 30000])
# Sum up a few subdomains from the distribcell tally
sum2 = distribcell_tally.summation(filter_type=openmc.DistribcellFilter,
filter_bins=[500, 5000, 50000])
# Merge the distribcell tally slices
merge_tally = sum1.merge(sum2)
# Append merged Tally Pandas DataFrame to output string
df = merge_tally.get_pandas_dataframe()
outstr += df.to_string() + '\n'
# Extract the mesh tally
mesh_tally = sp.get_tally(name='mesh tally')
# Sum up a few subdomains from the mesh tally
sum1 = mesh_tally.summation(filter_type=openmc.MeshFilter,
filter_bins=[(1, 1), (1, 2)])
# Sum up a few subdomains from the mesh tally
sum2 = mesh_tally.summation(filter_type=openmc.MeshFilter,
filter_bins=[(2, 1), (2, 2)])
# Merge the mesh tally slices
merge_tally = sum1.merge(sum2)
# Append merged Tally Pandas DataFrame to output string
df = merge_tally.get_pandas_dataframe()
outstr += df.to_string() + '\n'
# Hash the results if necessary
if hash_output:
sha512 = hashlib.sha512()
sha512.update(outstr.encode('utf-8'))
outstr = sha512.hexdigest()
return outstr
def test_tally_slice_merge():
harness = TallySliceMergeTestHarness('statepoint.10.h5')
harness.main()
| mit |
andipeng/MagnePlane | paper/images/trade_scripts/underwater_structural_trades_plot.py | 4 | 1155 | import numpy as np
import matplotlib.pyplot as plt
depth = np.loadtxt('../data_files/underwater_structural_trades/depth.txt', delimiter = '\t')
A_tube = np.loadtxt('../data_files/underwater_structural_trades/A_tube.txt', delimiter = '\t')
t = np.loadtxt('../data_files/underwater_structural_trades/t.txt', delimiter = '\t')
cost = np.loadtxt('../data_files/underwater_structural_trades/cost.txt', delimiter = '\t')
fig = plt.figure(figsize = (3.25,3.5), tight_layout = True)
ax = plt.axes()
plt.setp(ax.get_xticklabels(), fontsize=8)
plt.setp(ax.get_yticklabels(), fontsize=8)
line1, = plt.plot(A_tube, t[0,:], 'b-', linewidth = 2.0, label = 'depth = 20 m')
line2, = plt.plot(A_tube, t[1,:], 'r-', linewidth = 2.0, label = 'depth = 40 m')
line3, = plt.plot(A_tube, t[2,:], 'g-', linewidth = 2.0, label = 'depth = 60 m')
plt.xlabel('Tube Area ($m^2$)', fontsize = 10, fontweight = 'bold')
plt.ylabel('Tube Thickness (m)', fontsize = 10, fontweight = 'bold')
plt.grid('on')
plt.legend(handles = [line1, line2, line3], loc = 2, fontsize = 8)
plt.savefig('../graphs/underwater_structural_trades/tube_area_vs_depth.png', format = 'png', dpi = 300)
plt.show() | apache-2.0 |
terkkila/scikit-learn | sklearn/utils/estimator_checks.py | 11 | 46945 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
# test if classifiers can cope with y.shape = (n_samples, 1)
yield check_classifiers_input_shapes
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, 'predict_proba'):
estimator.predict_proba(X)
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature(s) (shape=(3, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_classifiers_input_shapes(name, Classifier):
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=1)
X = StandardScaler().fit_transform(X)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
set_random_state(classifier)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
classifier.fit(X, y[:, np.newaxis])
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
assert_equal(len(w), 1, msg)
assert_array_equal(y_pred, classifier.predict(X))
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
Windy-Ground/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
NERSC/QENESAP | PP/tools/sum_states.py | 19 | 8217 | #! /usr/bin/python
###### SUM STATES #######
# Python script for summing and ploting the data from the Density Of States
# files obtained from projwfc.x. It can sum also k-solved dos, and make a plot
# with mathplotlib (if not available, gnuplot, if not avaible, print to file)
# if there is not X11 forwarding, plots in terminal.
# It does something very similar to sumpdos.f90, but with
# some extra features (use "-h" option).
#
# it takes two different inputs, the first one is the pw.x output
# ("-o" option), which is used for parsing the Fermi energy for fitting
# the PDOS curve to the right energy. The other files are the pDOS files
# ("-s" option), that can be given with shell syntax, i.e.
# pdos_atm*Fe*wfc*d* for summing all the d orbitals of Fe.
# It can also handle k solved dos files.
#
# One of the most useful feature, compared to the sumpdos.x, is the
# fact that it also builds the picture directly, so it can be directly
# visualized and exported for inclusion in a document.
# It uses mathplotlib for plotting, but if no mathplotlib is found in
# the $PYTHONPATH, it tries to use gnuplot, if no gnuplot available,
# dumps the output data to a file.
# In the that no X11 forwarding is available (i.e. ssh to the cluster),
# it shows a rough graph in the terminal, so we get an idea of the shape
# of the results.
#
# Example of usage:
# cd ....../espresso-5.0/PP/examples/example02/results/
# ../../../src/sum_states.py -o ni.dos.out -s
# ni.pdos_atm#1\(Ni\)_wfc#2\(d\) -t "Example PP/02" -xr -6 2
#
#
# The procedure for obtaining the DOS files is explained
# i.e. in (espresso-dir)/PP/examples/example02/
#
# Author: Dr. Julen Larrucea
# University of Bremen,
# Bremen Centre for Computational Materials Science, HMI Group
# julenl [at] gmail.com or larrucea [at] hmi.uni-bremen.de
#
# This file is distributed under the terms of the GNU General Public
# License. See the file `License'
# in the root directory of the present distribution,
# or http://www.gnu.org/copyleft/gpl.txt .
#######################
import sys
import os
import fnmatch
import linecache
# Some default variables
version=0.2
pwout=""
selat="*"
graphtitle=""
min_x,max_x=-10,3
min_y,max_y="",""
output_file_name="sum_dos.out"
prt="no"
print " #### sum_states.py version "+str(version)+" #### "
# Check if X11, mathplotlib and gnuplot are available
try:
os.popen("gnuplot -V").read()
prog_gnuplot="yes" # gnuplot is installed
except:
prog_gnuplot="no"
# Parse command line options
if len(sys.argv)>1:
for i in sys.argv:
if i.startswith('-'):
option=i.split('-')[1]
if option=="o":
pwout= sys.argv[sys.argv.index('-o')+1]
if option=="s":
selat= sys.argv[sys.argv.index('-s')+1]
if option=="p":
prt="yes"
if len(sys.argv) > sys.argv.index('-p')+1: # if there is a name after "-p" take it as an output name
if sys.argv[sys.argv.index('-p')+1] != "-": # otherwise default name sum_dos.out
dos_out_name=sys.argv[sys.argv.index('-p')+1]
if option=="t":
graphtitle= sys.argv[sys.argv.index('-t')+1]
if option=="xr":
min_x,max_x= float(sys.argv[sys.argv.index('-xr')+1]),float(sys.argv[sys.argv.index('-xr')+2])
if option=="yr":
min_y,max_y= float(sys.argv[sys.argv.index('-yr')+1]),float(sys.argv[sys.argv.index('-yr')+2])
if option=="v":
print "sum_dos.py version: "+version
sys.exit()
if option=="h":
print '''
-o QE output file name (for grepping Fermi E)
-s Selection of atoms for summing the DOSes. "*" for all, *1*Fe*d* for first Fe atom " (def. "*")
-p Print output to a file and aditionaly provide an output name (def. no output and "sum_dos.out")
-t set title in the head of the graph
-xr set min and max x value for the axes in the graph
-yr set min and max y value for the axes in the graph
-h print this help
-v print version
Example: sum_states.py --s sys.pdos_atm#4\(Fe2\)_wfc#2\(d\) -t "Wustite LDA+U single Fe" -xr -9 4
'''
sys.exit()
# Check for mathplotlib/gnuplot and import mpl if possible
if len(os.popen('echo $DISPLAY').read()) > 1:
graphic_plot="yes"
try:
from pylab import *
mplplot="yes"
print "pylab imported"
except:
print "There is no mathplotlib installed. Using gnuplot."
mplplot="no"
prt="yes"
else:
print "No X11. Trying to plot on terminal"
graphic_plot="no"
if prog_gnuplot=="no":
prt="yes"
# if not specified, try to find the espresso output, in order to parse the Fermi energy
if pwout == "":
for filen in filter(os.path.isfile, os.listdir('.')):
if "Program PWSCF" in linecache.getline(filen, 2):
print "Using " + filen + " as pw.x output. You can specify another one with the -o option."
pwout=filen
# Parse Fermi energy from the pw.x output
if pwout!="":
try:
os.popen("grep -a 'the Fermi energy is' "+pwout ).read()
fermi=float(os.popen("grep -a 'the Fermi energy is' "+pwout ).read().split()[4])
print "Fermi energy = ", fermi, "a.u."
except:
print "WARNING: No Fermi energy found. Using 0 e.V. instead"
fermi=0
else:
print "WARNING: No pw.x output found. Using E Fermi = 0 e.V."
fermi=0
# List of all DOS files to add
dosfiles=[]
for dfile in os.listdir('.'):
if fnmatch.fnmatch(dfile, selat):
dosfiles.append(dfile)
if len(dosfiles)==0:
print "ERROR: Provide a (list of) valid DOS file(s)"
sys.exit()
print "dosfiles list: ",
for dosfile in dosfiles:
print dosfile,
print ""
# Check wetter we have k-solved DOS
if open(dosfiles[0],'r').readline().split()[1]=="E":
ksolved="no"
print "no ksolved"
elif open(dosfiles[0],'r').readline().split()[1]=="ik":
ksolved="yes"
print "ksolved"
# Sum over all k-points and files
mat=[] # matrix with total sum of ldos
for i in range(len(dosfiles)):
mati=[] # temporal matrix for each DOS file "i"
k=0
for line in open(dosfiles[i],'r'):
if len(line) > 10 and line.split()[0] != "#":
if ksolved=="no":
mati.append([float(line.split()[0]),float(line.split()[1]),float(line.split()[2])])
if ksolved=="yes":
ik = int(line.split()[0])
if ik > k: #if it is a different k block
k=int(line.split()[0])
oldmat=[] # temporal matrix for each k-point
if ik == 1:
mati.append([float(line.split()[1]),float(line.split()[2]),float(line.split()[3])]) # append: energy, ldosup, ldosdw
elif ik == k and k > 1:
oldmat.append([float(line.split()[1]),float(line.split()[2]),float(line.split()[3])])
elif len(line) < 5 and k > 1: #if blank line, sum k-frame to the total
for j in range(len(oldmat)):
mati[j]=[mati[j][0],mati[j][1]+oldmat[j][1],mati[j][2]+oldmat[j][2]]
if mat == []: # if it is the first dos file, copy total matrix (mat) = the first dos files's data
mat=mati[:]
else:
for j in range(len(mati)): # if it is not the first file, sum values
mat[j]=[mat[j][0],mat[j][1]+mati[j][1],mat[j][2]+mati[j][2]]
print "...ploting..."
if prt=="yes":
out=open(output_file_name,"w")
x,y1,y2=[],[],[]
for i in mat:
x.append(i[0]-fermi)
y1.append(i[1])
y2.append(-i[2])
if prt=="yes": # print to a file
print>>out, i[0]-fermi, i[1], i[2]
if prt=="yes":
out.close()
if graphic_plot=="yes":
# if there is matplotlib, generate a plot with it
if mplplot=="yes":
plot(x,y1,linewidth=1.0)
plot(x,y2,linewidth=1.0)
print min(y2),max(y1)
plt.title(graphtitle)
plt.xlabel('E (eV)')
plt.ylabel('States')
plt.grid(True)
plt.rcParams.update({'font.size': 22})
plt.fill(x,y1,color='0.8')
plt.fill(x,y2,color='0.9')
if min_x and max_x:
fromx,tox=min_x,max_x
plt.axis([fromx, tox, min(y2), max(y1)])
show()
elif mplplot=="no" and prog_gnuplot=="yes": # If no mathplotlib available, use gnuplot
os.system("echo \"plot '"+ output_file_name + "' using ($1-"+str(fermi)+"):2 w l, '' u ($1"+str(fermi)+"):3 w l\" | gnuplot -persist")
elif graphic_plot=="no": # If no X forwarding available, show graph in terminal
if prog_gnuplot=="yes":
os.system("echo \"set terminal dumb; plot '"+ output_file_name + "' using ($1-"+str(fermi)+"):2 w l, '' u ($1-"+str(fermi)+"):3 w l\" | gnuplot -persist")
| gpl-2.0 |
louispotok/pandas | pandas/tests/groupby/aggregate/test_other.py | 3 | 18116 | # -*- coding: utf-8 -*-
"""
test all other .agg behavior
"""
from __future__ import print_function
import pytest
from collections import OrderedDict
import datetime as dt
from functools import partial
import numpy as np
import pandas as pd
from pandas import (
date_range, DataFrame, Index, MultiIndex, PeriodIndex, period_range, Series
)
from pandas.core.groupby.groupby import SpecificationError
from pandas.io.formats.printing import pprint_thing
import pandas.util.testing as tm
def test_agg_api():
# GH 6337
# http://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error
# different api for agg when passed custom function with mixed frame
df = DataFrame({'data1': np.random.randn(5),
'data2': np.random.randn(5),
'key1': ['a', 'a', 'b', 'b', 'a'],
'key2': ['one', 'two', 'one', 'two', 'one']})
grouped = df.groupby('key1')
def peak_to_peak(arr):
return arr.max() - arr.min()
expected = grouped.agg([peak_to_peak])
expected.columns = ['data1', 'data2']
result = grouped.agg(peak_to_peak)
tm.assert_frame_equal(result, expected)
def test_agg_datetimes_mixed():
data = [[1, '2012-01-01', 1.0],
[2, '2012-01-02', 2.0],
[3, None, 3.0]]
df1 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
data = [[row[0],
(dt.datetime.strptime(row[1], '%Y-%m-%d').date()
if row[1] else None),
row[2]]
for row in data]
df2 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
df1['weights'] = df1['value'] / df1['value'].sum()
gb1 = df1.groupby('date').aggregate(np.sum)
df2['weights'] = df1['value'] / df1['value'].sum()
gb2 = df2.groupby('date').aggregate(np.sum)
assert (len(gb1) == len(gb2))
def test_agg_period_index():
prng = period_range('2012-1-1', freq='M', periods=3)
df = DataFrame(np.random.randn(3, 2), index=prng)
rs = df.groupby(level=0).sum()
assert isinstance(rs.index, PeriodIndex)
# GH 3579
index = period_range(start='1999-01', periods=5, freq='M')
s1 = Series(np.random.rand(len(index)), index=index)
s2 = Series(np.random.rand(len(index)), index=index)
series = [('s1', s1), ('s2', s2)]
df = DataFrame.from_dict(OrderedDict(series))
grouped = df.groupby(df.index.month)
list(grouped)
def test_agg_dict_parameter_cast_result_dtypes():
# GH 12821
df = DataFrame({'class': ['A', 'A', 'B', 'B', 'C', 'C', 'D', 'D'],
'time': date_range('1/1/2011', periods=8, freq='H')})
df.loc[[0, 1, 2, 5], 'time'] = None
# test for `first` function
exp = df.loc[[0, 3, 4, 6]].set_index('class')
grouped = df.groupby('class')
tm.assert_frame_equal(grouped.first(), exp)
tm.assert_frame_equal(grouped.agg('first'), exp)
tm.assert_frame_equal(grouped.agg({'time': 'first'}), exp)
tm.assert_series_equal(grouped.time.first(), exp['time'])
tm.assert_series_equal(grouped.time.agg('first'), exp['time'])
# test for `last` function
exp = df.loc[[0, 3, 4, 7]].set_index('class')
grouped = df.groupby('class')
tm.assert_frame_equal(grouped.last(), exp)
tm.assert_frame_equal(grouped.agg('last'), exp)
tm.assert_frame_equal(grouped.agg({'time': 'last'}), exp)
tm.assert_series_equal(grouped.time.last(), exp['time'])
tm.assert_series_equal(grouped.time.agg('last'), exp['time'])
# count
exp = pd.Series([2, 2, 2, 2],
index=Index(list('ABCD'), name='class'),
name='time')
tm.assert_series_equal(grouped.time.agg(len), exp)
tm.assert_series_equal(grouped.time.size(), exp)
exp = pd.Series([0, 1, 1, 2],
index=Index(list('ABCD'), name='class'),
name='time')
tm.assert_series_equal(grouped.time.count(), exp)
def test_agg_cast_results_dtypes():
# similar to GH12821
# xref #11444
u = [dt.datetime(2015, x + 1, 1) for x in range(12)]
v = list('aaabbbbbbccd')
df = pd.DataFrame({'X': v, 'Y': u})
result = df.groupby('X')['Y'].agg(len)
expected = df.groupby('X')['Y'].count()
tm.assert_series_equal(result, expected)
def test_aggregate_float64_no_int64():
# see gh-11199
df = DataFrame({"a": [1, 2, 3, 4, 5],
"b": [1, 2, 2, 4, 5],
"c": [1, 2, 3, 4, 5]})
expected = DataFrame({"a": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a"]].mean()
tm.assert_frame_equal(result, expected)
expected = DataFrame({"a": [1, 2.5, 4, 5], "c": [1, 2.5, 4, 5]},
index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a", "c"]].mean()
tm.assert_frame_equal(result, expected)
def test_aggregate_api_consistency():
# GH 9052
# make sure that the aggregates via dict
# are consistent
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
grouped = df.groupby(['A', 'B'])
c_mean = grouped['C'].mean()
c_sum = grouped['C'].sum()
d_mean = grouped['D'].mean()
d_sum = grouped['D'].sum()
result = grouped['D'].agg(['sum', 'mean'])
expected = pd.concat([d_sum, d_mean], axis=1)
expected.columns = ['sum', 'mean']
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg([np.sum, np.mean])
expected = pd.concat([c_sum, c_mean, d_sum, d_mean], axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['sum', 'mean']])
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped[['D', 'C']].agg([np.sum, np.mean])
expected = pd.concat([d_sum, d_mean, c_sum, c_mean], axis=1)
expected.columns = MultiIndex.from_product([['D', 'C'],
['sum', 'mean']])
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': 'mean', 'D': 'sum'})
expected = pd.concat([d_sum, c_mean], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': ['mean', 'sum'],
'D': ['mean', 'sum']})
expected = pd.concat([c_mean, c_sum, d_mean, d_sum], axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['mean', 'sum']])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = grouped[['D', 'C']].agg({'r': np.sum,
'r2': np.mean})
expected = pd.concat([d_sum, c_sum, d_mean, c_mean], axis=1)
expected.columns = MultiIndex.from_product([['r', 'r2'],
['D', 'C']])
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_dict_renaming_deprecation():
# 15931
df = pd.DataFrame({'A': [1, 1, 1, 2, 2],
'B': range(5),
'C': range(5)})
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False) as w:
df.groupby('A').agg({'B': {'foo': ['sum', 'max']},
'C': {'bar': ['count', 'min']}})
assert "using a dict with renaming" in str(w[0].message)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df.groupby('A')[['B', 'C']].agg({'ma': 'max'})
with tm.assert_produces_warning(FutureWarning) as w:
df.groupby('A').B.agg({'foo': 'count'})
assert "using a dict on a Series for aggregation" in str(w[0].message)
def test_agg_compat():
# GH 12334
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
g = df.groupby(['A', 'B'])
expected = pd.concat([g['D'].sum(), g['D'].std()], axis=1)
expected.columns = MultiIndex.from_tuples([('C', 'sum'),
('C', 'std')])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g['D'].agg({'C': ['sum', 'std']})
tm.assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([g['D'].sum(), g['D'].std()], axis=1)
expected.columns = ['C', 'D']
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g['D'].agg({'C': 'sum', 'D': 'std'})
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_nested_dicts():
# API change for disallowing these types of nested dicts
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
g = df.groupby(['A', 'B'])
msg = r'cannot perform renaming for r[1-2] with a nested dictionary'
with tm.assert_raises_regex(SpecificationError, msg):
g.aggregate({'r1': {'C': ['mean', 'sum']},
'r2': {'D': ['mean', 'sum']}})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g.agg({'C': {'ra': ['mean', 'std']},
'D': {'rb': ['mean', 'std']}})
expected = pd.concat([g['C'].mean(), g['C'].std(),
g['D'].mean(), g['D'].std()],
axis=1)
expected.columns = pd.MultiIndex.from_tuples(
[('ra', 'mean'), ('ra', 'std'),
('rb', 'mean'), ('rb', 'std')])
tm.assert_frame_equal(result, expected, check_like=True)
# same name as the original column
# GH9052
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
expected = g['D'].agg({'result1': np.sum, 'result2': np.mean})
expected = expected.rename(columns={'result1': 'D'})
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = g['D'].agg({'D': np.sum, 'result2': np.mean})
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_item_by_item_raise_typeerror():
df = DataFrame(np.random.randint(10, size=(20, 10)))
def raiseException(df):
pprint_thing('----------------------------------------')
pprint_thing(df.to_string())
raise TypeError('test')
with tm.assert_raises_regex(TypeError, 'test'):
df.groupby(0).agg(raiseException)
def test_series_agg_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
def test_series_agg_multi_pure_python():
data = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def bad(x):
assert (len(x.values.base) > 0)
return 'foo'
result = data.groupby(['A', 'B']).agg(bad)
expected = data.groupby(['A', 'B']).agg(lambda x: 'foo')
tm.assert_frame_equal(result, expected)
def test_agg_consistency():
# agg with ([]) and () not consistent
# GH 6715
def P1(a):
try:
return np.percentile(a.dropna(), q=1)
except Exception:
return np.nan
df = DataFrame({'col1': [1, 2, 3, 4],
'col2': [10, 25, 26, 31],
'date': [dt.date(2013, 2, 10), dt.date(2013, 2, 10),
dt.date(2013, 2, 11), dt.date(2013, 2, 11)]})
g = df.groupby('date')
expected = g.agg([P1])
expected.columns = expected.columns.levels[0]
result = g.agg(P1)
tm.assert_frame_equal(result, expected)
def test_agg_callables():
# GH 7929
df = DataFrame({'foo': [1, 2], 'bar': [3, 4]}).astype(np.int64)
class fn_class(object):
def __call__(self, x):
return sum(x)
equiv_callables = [sum,
np.sum,
lambda x: sum(x),
lambda x: x.sum(),
partial(sum),
fn_class(), ]
expected = df.groupby("foo").agg(sum)
for ecall in equiv_callables:
result = df.groupby('foo').agg(ecall)
tm.assert_frame_equal(result, expected)
def test_agg_over_numpy_arrays():
# GH 3788
df = pd.DataFrame([[1, np.array([10, 20, 30])],
[1, np.array([40, 50, 60])],
[2, np.array([20, 30, 40])]],
columns=['category', 'arraydata'])
result = df.groupby('category').agg(sum)
expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]]
expected_index = pd.Index([1, 2], name='category')
expected_column = ['arraydata']
expected = pd.DataFrame(expected_data,
index=expected_index,
columns=expected_column)
tm.assert_frame_equal(result, expected)
def test_agg_timezone_round_trip():
# GH 15426
ts = pd.Timestamp("2016-01-01 12:00:00", tz='US/Pacific')
df = pd.DataFrame({'a': 1,
'b': [ts + dt.timedelta(minutes=nn)
for nn in range(10)]})
result1 = df.groupby('a')['b'].agg(np.min).iloc[0]
result2 = df.groupby('a')['b'].agg(lambda x: np.min(x)).iloc[0]
result3 = df.groupby('a')['b'].min().iloc[0]
assert result1 == ts
assert result2 == ts
assert result3 == ts
dates = [pd.Timestamp("2016-01-0%d 12:00:00" % i, tz='US/Pacific')
for i in range(1, 5)]
df = pd.DataFrame({'A': ['a', 'b'] * 2, 'B': dates})
grouped = df.groupby('A')
ts = df['B'].iloc[0]
assert ts == grouped.nth(0)['B'].iloc[0]
assert ts == grouped.head(1)['B'].iloc[0]
assert ts == grouped.first()['B'].iloc[0]
assert ts == grouped.apply(lambda x: x.iloc[0])[0]
ts = df['B'].iloc[2]
assert ts == grouped.last()['B'].iloc[0]
assert ts == grouped.apply(lambda x: x.iloc[-1])[0]
def test_sum_uint64_overflow():
# see gh-14758
# Convert to uint64 and don't overflow
df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object)
df = df + 9223372036854775807
index = pd.Index([9223372036854775808,
9223372036854775810,
9223372036854775812],
dtype=np.uint64)
expected = pd.DataFrame({1: [9223372036854775809,
9223372036854775811,
9223372036854775813]},
index=index)
expected.index.name = 0
result = df.groupby(0).sum()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("structure, expected", [
(tuple, pd.DataFrame({'C': {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})),
(list, pd.DataFrame({'C': {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})),
(lambda x: tuple(x), pd.DataFrame({'C': {(1, 1): (1, 1, 1),
(3, 4): (3, 4, 4)}})),
(lambda x: list(x), pd.DataFrame({'C': {(1, 1): [1, 1, 1],
(3, 4): [3, 4, 4]}}))
])
def test_agg_structs_dataframe(structure, expected):
df = pd.DataFrame({'A': [1, 1, 1, 3, 3, 3],
'B': [1, 1, 1, 4, 4, 4],
'C': [1, 1, 1, 3, 4, 4]})
result = df.groupby(['A', 'B']).aggregate(structure)
expected.index.names = ['A', 'B']
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("structure, expected", [
(tuple, pd.Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name='C')),
(list, pd.Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name='C')),
(lambda x: tuple(x), pd.Series([(1, 1, 1), (3, 4, 4)],
index=[1, 3], name='C')),
(lambda x: list(x), pd.Series([[1, 1, 1], [3, 4, 4]],
index=[1, 3], name='C'))
])
def test_agg_structs_series(structure, expected):
# Issue #18079
df = pd.DataFrame({'A': [1, 1, 1, 3, 3, 3],
'B': [1, 1, 1, 4, 4, 4],
'C': [1, 1, 1, 3, 4, 4]})
result = df.groupby('A')['C'].aggregate(structure)
expected.index.name = 'A'
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(reason="GH-18869: agg func not called on empty groups.")
def test_agg_category_nansum(observed):
categories = ['a', 'b', 'c']
df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'],
categories=categories),
'B': [1, 2, 3]})
result = df.groupby("A", observed=observed).B.agg(np.nansum)
expected = pd.Series([3, 3, 0],
index=pd.CategoricalIndex(['a', 'b', 'c'],
categories=categories,
name='A'),
name='B')
if observed:
expected = expected[expected != 0]
tm.assert_series_equal(result, expected)
| bsd-3-clause |
anthonyng2/Machine-Learning-For-Finance | Classification Based Machine Learning for Algorithmic Trading/Predict Next Day Return/spyder_LogisticRegression.py | 1 | 1467 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 5 16:51:39 2017
@author: AnthonyN
https://www.quantstart.com/articles/Forecasting-Financial-Time-Series-Part-1
Predicting Price Returns
"""
import numpy as np
import pandas as pd
lags = 5
start_test = pd.to_datetime('2017-06-18')
from sklearn.linear_model import LogisticRegression
ts = pd.read_csv('data\XMA.csv', index_col='Date')
ts.index = pd.to_datetime(ts.index)
tslag = ts[['XMA']].copy()
for i in range(0,lags):
tslag["Lag_" + str(i+1)] = tslag["XMA"].shift(i+1)
tslag["returns"] = tslag["XMA"].pct_change()
# Create the lagged percentage returns columns
for i in range(0,lags):
tslag["Lag_" + str(i+1)] = tslag["Lag_" + str(i+1)].pct_change()
tslag.fillna(0, inplace=True)
tslag["Direction"] = np.sign(tslag["returns"])
# Use the prior two days of returns as predictor values, with direction as the response
X = tslag[["Lag_1", "Lag_2"]]
y = tslag["Direction"]
# Create training and test sets
X_train = X[X.index < start_test]
X_test = X[X.index >= start_test]
y_train = y[y.index < start_test]
y_test = y[y.index >= start_test]
# Create prediction DataFrame
pred = pd.DataFrame(index=y_test.index)
lr = LogisticRegression()
lr.fit(X_train, y_train)
y_pred = lr.predict(X_test)
# pred = (1.0 + y_pred * y_test)/2.0
pred = (1.0 + (y_pred == y_test))/2.0
hit_rate = np.mean(pred)
print('Logistic Regresstion {:.4f}'.format(hit_rate)) | mit |
nelson-liu/scikit-learn | sklearn/utils/estimator_checks.py | 3 | 60988 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_dict_equal
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import SkipTestWarning
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.utils.validation import has_fit_parameter
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'GaussianProcessRegressor',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_sample_weights_pandas_series
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classifiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
yield check_classifiers_regression_target
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
yield check_non_transformer_estimators_n_iter
@ignore_warnings(category=DeprecationWarning)
def check_supervised_y_no_nan(name, Estimator):
# Checks that the Estimator targets are not NaN.
rng = np.random.RandomState(888)
X = rng.randn(10, 5)
y = np.ones(10) * np.inf
y = multioutput_estimator_convert_y_2d(name, y)
errmsg = "Input contains NaN, infinity or a value too large for " \
"dtype('float64')."
try:
Estimator().fit(X, y)
except ValueError as e:
if str(e) != errmsg:
raise ValueError("Estimator {0} raised warning as expected, but "
"does not match expected error message"
.format(name))
else:
raise ValueError("Estimator {0} should have raised error on fitting "
"array y with NaN value.".format(name))
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
yield check_supervised_y_no_nan
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
if name != "GaussianProcessRegressor":
# Test if NotFittedError is raised
yield check_estimators_unfitted
yield check_non_transformer_estimators_n_iter
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if name not in external_solver:
yield check_transformer_n_iter
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
yield check_non_transformer_estimators_n_iter
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
yield check_get_params_invariance
yield check_dict_unchanged
yield check_no_fit_attributes_set_in_init
def check_estimator(Estimator):
"""Check if estimator adheres to scikit-learn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check. Estimator is a class object (not an instance).
"""
name = Estimator.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
try:
check(name, Estimator)
except SkipTest as message:
# the only SkipTest thrown currently results from not
# being able to import pandas.
warnings.warn(message, SkipTestWarning)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_testing_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
# MLP
if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']:
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "decision_function_shape" in params:
# SVC
estimator.set_params(decision_function_shape='ovo')
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_testing_parameters(estimator)
# fit and predict
try:
with ignore_warnings(category=DeprecationWarning):
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
@ignore_warnings(category=DeprecationWarning)
def check_sample_weights_pandas_series(name, Estimator):
# check that estimators will accept a 'sample_weight' parameter of
# type pandas.Series in the 'fit' function.
estimator = Estimator()
if has_fit_parameter(estimator, "sample_weight"):
try:
import pandas as pd
X = pd.DataFrame([[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3]])
y = pd.Series([1, 1, 1, 2, 2, 2])
weights = pd.Series([1] * 6)
try:
estimator.fit(X, y, sample_weight=weights)
except ValueError:
raise ValueError("Estimator {0} raises error if "
"'sample_weight' parameter is of "
"type pandas.Series".format(name))
except ImportError:
raise SkipTest("pandas is not installed: not testing for "
"input of type pandas.Series to class weight.")
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_dict_unchanged(name, Estimator):
# this estimator raises
# ValueError: Found array with 0 feature(s) (shape=(23, 0))
# while a minimum of 1 is required.
# error
if name in ['SpectralCoclustering']:
return
rnd = np.random.RandomState(0)
if name in ['RANSACRegressor']:
X = 3 * rnd.uniform(size=(20, 3))
else:
X = 2 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
if hasattr(estimator, "n_best"):
estimator.n_best = 1
set_random_state(estimator, 1)
# should be just `estimator.fit(X, y)`
# after merging #6141
if name in ['SpectralBiclustering']:
estimator.fit(X)
else:
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
dict_before = estimator.__dict__.copy()
getattr(estimator, method)(X)
assert_dict_equal(estimator.__dict__, dict_before,
'Estimator changes __dict__ during %s' % method)
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
assert_raise_message(ValueError, "Reshape your data",
getattr(estimator, method), X[0])
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings(category=DeprecationWarning)
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
@ignore_warnings(category=DeprecationWarning)
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with ignore_warnings(category=DeprecationWarning):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
transformer = Transformer()
set_random_state(transformer)
set_testing_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, Estimator.__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
methods = ["predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_testing_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = ("0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* "
"is required.")
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
@ignore_warnings
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_random_state(estimator)
set_testing_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
if Estimator.__module__.startswith('sklearn.'):
assert_true(b"version" in pickled_estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with ignore_warnings(category=DeprecationWarning):
alg = Alg()
if not hasattr(alg, 'partial_fit'):
# check again as for mlp this depends on algorithm
return
set_testing_parameters(alg)
try:
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
except NotImplementedError:
return
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with ignore_warnings(category=DeprecationWarning):
alg = Alg()
set_testing_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name == 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
set_testing_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_testing_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
if hasattr(classifier, "predict_log_proba"):
# predict_log_proba is a transformation of predict_proba
y_log_prob = classifier.predict_log_proba(X)
assert_array_almost_equal(y_log_prob, np.log(y_prob), 8)
assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob))
@ignore_warnings(category=DeprecationWarning)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
@ignore_warnings(category=DeprecationWarning)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_testing_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
@ignore_warnings(category=DeprecationWarning)
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_testing_parameters(regressor_1)
set_testing_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
@ignore_warnings(category=DeprecationWarning)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
regressor = Regressor()
set_testing_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_testing_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the original estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_no_fit_attributes_set_in_init(name, Estimator):
"""Check that Estimator.__init__ doesn't set trailing-_ attributes."""
estimator = Estimator()
for attr in dir(estimator):
if attr.endswith("_") and not attr.startswith("__"):
# This check is for properties, they can be listed in dir
# while at the same time have hasattr return False as long
# as the property getter raises an AttributeError
assert_false(
hasattr(estimator, attr),
"By convention, attributes ending with '_' are "
'estimated from data in scikit-learn. Consequently they '
'should not be initialized in the constructor of an '
'estimator but in the fit method. Attribute {!r} '
'was found in estimator {}'.format(attr, name))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_testing_parameters(estimator_1)
set_testing_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with ignore_warnings(category=DeprecationWarning):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self'
and p.kind != p.VAR_KEYWORD
and p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if "MultiTask" in name:
return np.reshape(y, (-1, 1))
return y
@ignore_warnings(category=DeprecationWarning)
def check_non_transformer_estimators_n_iter(name, Estimator):
# Test that estimators that are not transformers with a parameter
# max_iter, return the attribute of n_iter_ at least 1.
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
not_run_check_n_iter = ['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV', 'LinearSVC',
'LogisticRegression']
# Tested in test_transformer_n_iter
not_run_check_n_iter += CROSS_DECOMPOSITION
if name in not_run_check_n_iter:
return
# LassoLars stops early for the default alpha=1.0 the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, 'max_iter'):
iris = load_iris()
X, y_ = iris.data, iris.target
y_ = multioutput_estimator_convert_y_2d(name, y_)
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
# HuberRegressor depends on scipy.optimize.fmin_l_bfgs_b
# which doesn't return a n_iter for old versions of SciPy.
if not (name == 'HuberRegressor' and estimator.n_iter_ is None):
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=DeprecationWarning)
def check_transformer_n_iter(name, Estimator):
# Test that transformers with a parameter max_iter, return the
# attribute of n_iter_ at least 1.
estimator = Estimator()
if hasattr(estimator, "max_iter"):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater_equal(iter_, 1)
else:
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=DeprecationWarning)
def check_get_params_invariance(name, estimator):
# Checks if get_params(deep=False) is a subset of get_params(deep=True)
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
def transform(self, X):
return X
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV', 'RandomizedSearchCV', 'SelectFromModel'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
def check_classifiers_regression_target(name, Estimator):
# Check if classifier throws an exception when fed regression targets
boston = load_boston()
X, y = boston.data, boston.target
e = Estimator()
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg, e.fit, X, y)
| bsd-3-clause |
caidongyun/BuildingMachineLearningSystemsWithPython | ch06/03_clean.py | 22 | 5972 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
#
# This script tries to improve the classifier by cleaning the tweets a bit
#
import time
start_time = time.time()
import re
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.cross_validation import ShuffleSplit
from sklearn.pipeline import Pipeline
from utils import plot_pr
from utils import load_sanders_data
from utils import tweak_labels
from utils import log_false_positives
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from utils import load_sent_word_net
sent_word_net = load_sent_word_net()
phase = "03"
emo_repl = {
# positive emoticons
"<3": " good ",
":d": " good ", # :D in lower case
":dd": " good ", # :DD in lower case
"8)": " good ",
":-)": " good ",
":)": " good ",
";)": " good ",
"(-:": " good ",
"(:": " good ",
# negative emoticons:
":/": " bad ",
":>": " sad ",
":')": " sad ",
":-(": " bad ",
":(": " bad ",
":S": " bad ",
":-S": " bad ",
}
emo_repl_order = [k for (k_len, k) in reversed(
sorted([(len(k), k) for k in list(emo_repl.keys())]))]
re_repl = {
r"\br\b": "are",
r"\bu\b": "you",
r"\bhaha\b": "ha",
r"\bhahaha\b": "ha",
r"\bdon't\b": "do not",
r"\bdoesn't\b": "does not",
r"\bdidn't\b": "did not",
r"\bhasn't\b": "has not",
r"\bhaven't\b": "have not",
r"\bhadn't\b": "had not",
r"\bwon't\b": "will not",
r"\bwouldn't\b": "would not",
r"\bcan't\b": "can not",
r"\bcannot\b": "can not",
}
def create_ngram_model(params=None):
def preprocessor(tweet):
global emoticons_replaced
tweet = tweet.lower()
for k in emo_repl_order:
tweet = tweet.replace(k, emo_repl[k])
for r, repl in re_repl.items():
tweet = re.sub(r, repl, tweet)
return tweet
tfidf_ngrams = TfidfVectorizer(preprocessor=preprocessor,
analyzer="word")
clf = MultinomialNB()
pipeline = Pipeline([('tfidf', tfidf_ngrams), ('clf', clf)])
if params:
pipeline.set_params(**params)
return pipeline
def train_model(clf, X, Y, name="NB ngram", plot=False):
# create it again for plotting
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = []
precisions, recalls, thresholds = [], [], []
clfs = [] # just to later get the median
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf.fit(X_train, y_train)
clfs.append(clf)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
scores.append(test_score)
proba = clf.predict_proba(X_test)
fpr, tpr, roc_thresholds = roc_curve(y_test, proba[:, 1])
precision, recall, pr_thresholds = precision_recall_curve(
y_test, proba[:, 1])
pr_scores.append(auc(recall, precision))
precisions.append(precision)
recalls.append(recall)
thresholds.append(pr_thresholds)
if plot:
scores_to_sort = pr_scores
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
plot_pr(pr_scores[median], name, phase, precisions[median],
recalls[median], label=name)
log_false_positives(clfs[median], X_test, y_test, name)
summary = (np.mean(scores), np.std(scores),
np.mean(pr_scores), np.std(pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors)
def print_incorrect(clf, X, Y):
Y_hat = clf.predict(X)
wrong_idx = Y_hat != Y
X_wrong = X[wrong_idx]
Y_wrong = Y[wrong_idx]
Y_hat_wrong = Y_hat[wrong_idx]
for idx in range(len(X_wrong)):
print("clf.predict('%s')=%i instead of %i" %
(X_wrong[idx], Y_hat_wrong[idx], Y_wrong[idx]))
def get_best_model():
best_params = dict(tfidf__ngram_range=(1, 2),
tfidf__min_df=1,
tfidf__stop_words=None,
tfidf__smooth_idf=False,
tfidf__use_idf=False,
tfidf__sublinear_tf=True,
tfidf__binary=False,
clf__alpha=0.01,
)
best_clf = create_ngram_model(best_params)
return best_clf
if __name__ == "__main__":
X_orig, Y_orig = load_sanders_data()
classes = np.unique(Y_orig)
for c in classes:
print("#%s: %i" % (c, sum(Y_orig == c)))
print("== Pos vs. neg ==")
pos_neg = np.logical_or(Y_orig == "positive", Y_orig == "negative")
X = X_orig[pos_neg]
Y = Y_orig[pos_neg]
Y = tweak_labels(Y, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs neg", plot=True)
print("== Pos/neg vs. irrelevant/neutral ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive", "negative"])
# best_clf = grid_search_model(create_union_model, X, Y, name="sent vs
# rest", plot=True)
train_model(get_best_model(), X, Y, name="pos+neg vs rest", plot=True)
print("== Pos vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs rest",
plot=True)
print("== Neg vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["negative"])
train_model(get_best_model(), X, Y, name="neg vs rest",
plot=True)
print("time spent:", time.time() - start_time)
| mit |
janusnic/21v-python | unit_20/matplotlib/contour_demo.py | 4 | 3496 | #!/usr/bin/env python
"""
Illustrate simple contour plotting, contours on an image with
a colorbar for the contours, and labelled contours.
See also contour_image.py.
"""
import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
delta = 0.025
x = np.arange(-3.0, 3.0, delta)
y = np.arange(-2.0, 2.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
# difference of Gaussians
Z = 10.0 * (Z2 - Z1)
# Create a simple contour plot with labels using default colors. The
# inline argument to clabel will control whether the labels are draw
# over the line segments of the contour, removing the lines beneath
# the label
plt.figure()
CS = plt.contour(X, Y, Z)
plt.clabel(CS, inline=1, fontsize=10)
plt.title('Simplest default with labels')
# contour labels can be placed manually by providing list of positions
# (in data coordinate). See ginput_manual_clabel.py for interactive
# placement.
plt.figure()
CS = plt.contour(X, Y, Z)
manual_locations = [(-1, -1.4), (-0.62, -0.7), (-2, 0.5), (1.7, 1.2), (2.0, 1.4), (2.4, 1.7)]
plt.clabel(CS, inline=1, fontsize=10, manual=manual_locations)
plt.title('labels at selected locations')
# You can force all the contours to be the same color.
plt.figure()
CS = plt.contour(X, Y, Z, 6,
colors='k', # negative contours will be dashed by default
)
plt.clabel(CS, fontsize=9, inline=1)
plt.title('Single color - negative contours dashed')
# You can set negative contours to be solid instead of dashed:
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
plt.figure()
CS = plt.contour(X, Y, Z, 6,
colors='k', # negative contours will be dashed by default
)
plt.clabel(CS, fontsize=9, inline=1)
plt.title('Single color - negative contours solid')
# And you can manually specify the colors of the contour
plt.figure()
CS = plt.contour(X, Y, Z, 6,
linewidths=np.arange(.5, 4, .5),
colors=('r', 'green', 'blue', (1, 1, 0), '#afeeee', '0.5')
)
plt.clabel(CS, fontsize=9, inline=1)
plt.title('Crazy lines')
# Or you can use a colormap to specify the colors; the default
# colormap will be used for the contour lines
plt.figure()
im = plt.imshow(Z, interpolation='bilinear', origin='lower',
cmap=cm.gray, extent=(-3, 3, -2, 2))
levels = np.arange(-1.2, 1.6, 0.2)
CS = plt.contour(Z, levels,
origin='lower',
linewidths=2,
extent=(-3, 3, -2, 2))
# Thicken the zero contour.
zc = CS.collections[6]
plt.setp(zc, linewidth=4)
plt.clabel(CS, levels[1::2], # label every second level
inline=1,
fmt='%1.1f',
fontsize=14)
# make a colorbar for the contour lines
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.title('Lines with colorbar')
#plt.hot() # Now change the colormap for the contour lines and colorbar
plt.flag()
# We can still add a colorbar for the image, too.
CBI = plt.colorbar(im, orientation='horizontal', shrink=0.8)
# This makes the original colorbar look a bit out of place,
# so let's improve its position.
l, b, w, h = plt.gca().get_position().bounds
ll, bb, ww, hh = CB.ax.get_position().bounds
CB.ax.set_position([ll, b + 0.1*h, ww, h*0.8])
plt.show()
| mit |
kamiseko/factor-test | barraRiskModel.py | 1 | 7088 | #!/Tsan/bin/python
# -*- coding: utf-8 -*-
# Libraries to use
from __future__ import division
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
import statsmodels
import cvxopt as cv
from cvxopt import solvers
# Import My own library for factor testing
import factorFilterFunctions as ff
# own method to cal moving weighted Covariance Matrix
def calEWMcovariance(facRetdf, decay=0.94):
'''
To calculate EWM covariance matrix of given facRetdf
output: Dataframe, the ewm cov-matrix of the factors
input:
facRetdf: Dataframe, factor return dataframe
decay: float, decay-factors
Decay factors were set at:
− 0.94 (1-day) from 112 days of data;
− 0.97 (1-month) from 227 days of data.
'''
m, n = facRetdf.shape
facDF = facRetdf - facRetdf.mean()
for i in xrange(m):
facDF.iloc[i] = np.sqrt(decay**(m-1-i)) * facDF.iloc[i]
ewmCovMatrix = facDF.T.dot(facDF) * (1-decay)/(1-decay**m)
return ewmCovMatrix
# get intersection
def getInterStk(dfList):
'''
To get the columns intersections of several dataframe.
output: List, the intersection of the columns of dataframes.
input:
dfList: List, which contains dataframe u want to get the intersection, len(dfList) should be more than 1.
'''
columnsList = map(lambda x: set(x.columns.tolist()), dfList)
stkList = reduce(lambda x, y: x & y, columnsList)
return stkList
# independentfactor should be a list contains of dataframe
def orthoFactor(factordf, independentfactor, WLS =False, weightdf = None):
'''
Muti variable regression for return.
returndf and dataframes in factorDict should have same index and same columns
output: Dataframe, the orthogonalized result of factordf
input:
factordf: Dataframe, factor to be orthogonalized
independentfactor: List, the values are the factor dataframe as independence in regression(all \
with same columns and index)
WLS: True to use WLS , False to use OLS. If True, then weightdf should not be none.
weightdf: Dataframe , which has no nan and the shape is same as dataframes in factorDict.
'''
emptydf = pd.DataFrame(index=factordf.index, columns=factordf.columns, data=None, dtype=float)
dfNum = len(independentfactor)
if dfNum == 0:
print 'Input is an empty list!'
raise ValueError
for date in factordf.index:
factordfSlice = factordf.loc[date]
mapfunction = map(lambda x: x.loc[date], independentfactor)
if dfNum > 1:
totaldf = pd.concat(mapfunction, axis=1)
else:
totaldf = independentfactor[0].loc[date]
if WLS:
w = weightdf.loc[date]
result = sm.WLS(factordfSlice.T, totaldf, weights=1/w).fit()
else:
result = sm.OLS(factordfSlice.T, totaldf).fit()
emptydf .loc[date] = result.resid
return emptydf
# construct the multiple factor structural risk model
def multiFactorReg(returndf,factorDict,WLS =False, weightdf = None):
'''
Multi variable regression for return.
returndf and dataframes in factorDict should have same index and same columns.
output: 4 Dataframe, respectively idiosyncratic return for each stock, factor Return, factor P-value and
R-Square of the linear regression model.
input:
returndf: Dataframe, can either be return or acticve return.
factorDict: Dictionary, the keys are the names of factors and the values are the corresponding factor dataframe(all\
with same columns and index).
WLS: True to use WLS , False to use OLS. If True, then weightdf should not be none.
weightdf: Dataframe , which has no nan and the shape is same as dataframes in factorDict.
'''
specificReturn = pd.DataFrame(index=returndf.index, columns=returndf.columns, data=None, dtype=float)
factorReturn = pd.DataFrame(index=returndf.index, columns=factorDict.keys(), data=None, dtype=float)
factorPvalue = pd.DataFrame(index=returndf.index, columns=factorDict.keys(), data=None, dtype=float)
RSquare = pd.DataFrame(index=returndf.index, columns=['R-Square'], data=None, dtype=float)
dfNum = len(factorDict.keys())
if dfNum == 0:
print 'Input is an empty list!'
raise ValueError
for date in returndf.index:
returndfSlice = returndf.loc[date]
mapfunction = map(lambda x: x.loc[date], factorDict.values())
if dfNum > 1:
totaldf = pd.concat(mapfunction, axis=1)
else:
totaldf = factorDict.values()[0].loc[date]
if WLS:
w = weightdf.loc[date]
result = sm.WLS(returndfSlice.T, totaldf, weights=1/w).fit()
else:
result = sm.OLS(returndfSlice.T, totaldf).fit()
specificReturn .loc[date] = result.resid
factorReturn .loc[date] = result.params.values
factorPvalue . loc[date] = result.pvalues.values
RSquare .loc[date] = result.rsquared
return specificReturn, factorReturn, factorPvalue, RSquare
# calculate adjusted factor covriance matrix
def calFinlCov(ewmMatrix, benchMarkWeight, benchMarkRet, riskExposureDF, finalSpMat, compara=1 / 0.94 - 1):
'''
To calculate final to optimize covriance, with factor covriance matrix adjusted by Barra method,
P32 ,chapter-2,Barra Risk Model Handbook.
Output: Dataframe, namely adjusted factor covariance matrix(k*k, k is the factor amount).
Inpput:
ewmMatrix: Dataframe, the factor covariance matrix calculated directell by the outcome of multiFactorReg.
benchMarkWeight: Dataframe or Series, benchMarkWeight of the stocks on a given date!.
benchMarkRet: Dataframe or Series, notice the index !
riskExposureDF: Dataframe , riskExposureDF of stocks on factors in specific date.
finalSpMat: Dataframe, the digonal covriance matrix of specific returns.
'''
# calculate monthly scaled variance forecast for the market index by DEWIV
alphaS = 21 * benchMarkRet.ewm(ignore_na=True, min_periods=0, com=compara).cov(pairwise=True)[-200:].iloc[-1]
# calculate monthly specific risk of the market index
alphaSP = reduce(lambda x, y: x.dot(y), [benchMarkWeight, finalSpMat, benchMarkWeight.T])
# calculate total variance of the market index
alphaM = (
reduce(lambda x, y: x.dot(y), [benchMarkWeight, riskExposureDF, ewmMatrix, riskExposureDF.T, benchMarkWeight.T])
+ alphaSP)
# can not use np.dot on two series to construct a matrix
benchMarkWeightDF = pd.DataFrame(benchMarkWeight)
lastPart = reduce(lambda x, y: x.dot(y), [ewmMatrix, riskExposureDF.T, benchMarkWeightDF,
benchMarkWeightDF.T, riskExposureDF, ewmMatrix])
finalCovMatrix = ewmMatrix + ((alphaS - alphaM) / (alphaS - alphaSP)) * lastPart
return finalCovMatrix | mit |
sinhrks/scikit-learn | examples/linear_model/plot_ridge_coeffs.py | 157 | 2785 | """
==============================================================
Plot Ridge coefficients as a function of the L2 regularization
==============================================================
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color in the left plot represents one different dimension of the
coefficient vector, and this is displayed as a function of the
regularization parameter. The right plot shows how exact the solution
is. This example illustrates how a well defined solution is
found by Ridge regression and how regularization affects the
coefficients and their values. The plot on the right shows how
the difference of the coefficients from the estimator changes
as a function of regularization.
In this example the dependent variable Y is set as a function
of the input features: y = X*w + c. The coefficient vector w is
randomly sampled from a normal distribution, whereas the bias term c is
set to a constant.
As alpha tends toward zero the coefficients found by Ridge
regression stabilize towards the randomly sampled vector w.
For big alpha (strong regularisation) the coefficients
are smaller (eventually converging at 0) leading to a
simpler and biased solution.
These dependencies can be observed on the left plot.
The right plot shows the mean squared error between the
coefficients found by the model and the chosen vector w.
Less regularised models retrieve the exact
coefficients (error is equal to 0), stronger regularised
models increase the error.
Please note that in this example the data is non-noisy, hence
it is possible to extract the exact coefficients.
"""
# Author: Kornel Kielczewski -- <[email protected]>
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
clf = Ridge()
X, y, w = make_regression(n_samples=10, n_features=10, coef=True,
random_state=1, bias=3.5)
coefs = []
errors = []
alphas = np.logspace(-6, 6, 200)
# Train the model with different regularisation strengths
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
errors.append(mean_squared_error(clf.coef_, w))
# Display results
plt.figure(figsize=(20, 6))
plt.subplot(121)
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.subplot(122)
ax = plt.gca()
ax.plot(alphas, errors)
ax.set_xscale('log')
plt.xlabel('alpha')
plt.ylabel('error')
plt.title('Coefficient error as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
marcocaccin/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 227 | 5170 | """
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
| bsd-3-clause |
pyrolysis/low-order-particle | oak-200-20000.py | 1 | 10573 | """
Compare volume average temperature profiles, Tv, from 1-D model and 3-D Comsol
simulation of white oak particles with Feret diameters of DF = 200 um to 20 mm.
Surface area to volume diameter, Dsv, is used for the 1-D model.
Requirements: Python 3, NumPy, SciPy, Matplotlib, funcHeatCond, funcOther
"""
import numpy as np
import matplotlib.pyplot as py
from funcHeatCond import hc2
from funcOther import vol, Tvol, dsv
# Parameters
# -----------------------------------------------------------------------------
Gb = 0.72 # basic specific gravity, Wood Handbook Table 4-7, (-)
k = 0.16 # thermal conductivity, W/mK
x = 0 # moisture content, %
h = 350 # heat transfer coefficient, W/m^2*K
Ti = 293 # initial particle temp, K
Tinf = 773 # ambient temp, K
# Comsol Data for Particle Geometry and Temperatures
# -----------------------------------------------------------------------------
# geometry and temperature data for DF = 200 um
sa200 = 5.355e-8 # surface area of Comsol particle, m^2
v200 = 8.895e-13 # volume of Comsol particle, m^3
file200 = 'comsol/200tempsOak.txt' # time and temperatures
t200, Tv200, _, _, _, _, _ = np.loadtxt(file200, skiprows=5, unpack=True)
# geometry and temperature data for DF = 400 um
sa400 = 1.879e-7 # surface area of Comsol particle, m^2
v400 = 5.553e-12 # volume of Comsol particle, m^3
file400 = 'comsol/400tempsOak.txt' # time and temperatures
t400, Tv400, _, _, _, _, _ = np.loadtxt(file400, skiprows=5, unpack=True)
# geometry and temperature data for DF = 700 um
sa700 = 4.836e-7 # surface area of Comsol particle, m^2
v700 = 2.11e-11 # volume of Comsol particle, m^3
file700 = 'comsol/700tempsOak.txt' # time and temperatures
t700, Tv700, _, _, _, _, _ = np.loadtxt(file700, skiprows=5, unpack=True)
# geometry and temperature data for DF = 1400 um
sa1400 = 1.394e-6 # surface area of Comsol particle, m^2
v1400 = 8.442e-11 # volume of Comsol particle, m^3
file1400 = 'comsol/1400tempsOak.txt' # time and temperatures
t1400, Tv1400, _, _, _, _, _ = np.loadtxt(file1400, skiprows=5, unpack=True)
# geometry and temperature data for DF = 2800 um
sa2800 = 4.614e-6 # surface area of Comsol particle, m^2
v2800 = 4.011e-10 # volume of Comsol particle, m^3
file2800 = 'comsol/2800tempsOak.txt' # time and temperatures
t2800, Tv2800, _, _, _, _, _ = np.loadtxt(file2800, skiprows=5, unpack=True)
# geometry and temperature data for DF = 5400 um
sa5400 = 1.716e-5 # surface area of Comsol particle, m^2
v5400 = 2.877e-9 # volume of Comsol particle, m^3
file5400 = 'comsol/5400tempsOak.txt' # time and temperatures
t5400, Tv5400, _, _, _, _, _ = np.loadtxt(file5400, skiprows=5, unpack=True)
# geometry and temperature data for DF = 10000 um
sa10000 = 5.885e-5 # surface area of Comsol particle, m^2
v10000 = 1.827e-8 # volume of Comsol particle, m^3
file10000 = 'comsol/10000tempsOak.txt' # time and temperatures
t10000, Tv10000, _, _, _, _, _ = np.loadtxt(file10000, skiprows=5, unpack=True)
# geometry and temperature data for DF = 20000 um
sa20000 = 2.354e-4 # surface area of Comsol particle, m^2
v20000 = 1.462e-7 # volume of Comsol particle, m^3
file20000 = 'comsol/20000tempsOak.txt' # time and temperatures
t20000, Tv20000, _, _, _, _, _ = np.loadtxt(file20000, skiprows=5, unpack=True)
# 1-D Transient Heat Conduction using Dsv
# -----------------------------------------------------------------------------
# number of nodes from center of particle (m=0) to surface (m)
m = 1000
# time vector from 0 to max time
tmax = 2.0 # max time, s
nt = 1000 # number of time steps
dt = tmax/nt # time step, s
t = np.arange(0, tmax+dt, dt) # time vector, s
tmax2 = 20.0 # max time for large particles, s
t2 = np.arange(0, tmax2+dt, dt) # time vector for large particles, s
# 1-D Transient Heat Conduction for DF = 200 um
# -----------------------------------------------------------------------------
# surface area to volume equivalent sphere diameter Dsv, m
dsv200 = dsv(sa200, v200)
# intraparticle temperature array [T] in Kelvin for Dsv case, b = 2 for sphere
# row = time step, column = node point from 0 to m
Tsv200 = hc2(dsv200, x, k, Gb, h, Ti, Tinf, 2, m, t)
# volume average temperature at each time step
vol200 = vol(dsv200, m) # volumes in the sphere
Tvol200 = Tvol(Tsv200, vol200) # Dsv volume average temperature profile
# 1-D Transient Heat Conduction for DF = 400 um
# -----------------------------------------------------------------------------
# surface area to volume equivalent sphere diameter Dsv, m
dsv400 = dsv(sa400, v400)
# intraparticle temperature array [T] in Kelvin for Dsv case, b = 2 for sphere
# row = time step, column = node point from 0 to m
Tsv400 = hc2(dsv400, x, k, Gb, h, Ti, Tinf, 2, m, t)
# volume average temperature at each time step
vol400 = vol(dsv400, m) # volumes in the sphere
Tvol400 = Tvol(Tsv400, vol400) # Dsv volume average temperature profile
# 1-D Transient Heat Conduction for DF = 700 um
# -----------------------------------------------------------------------------
# surface area to volume equivalent sphere diameter Dsv, m
dsv700 = dsv(sa700, v700)
# intraparticle temperature array [T] in Kelvin for Dsv case, b = 2 for sphere
# row = time step, column = node point from 0 to m
Tsv700 = hc2(dsv700, x, k, Gb, h, Ti, Tinf, 2, m, t)
# volume average temperature at each time step
vol700 = vol(dsv700, m) # volumes in the sphere
Tvol700 = Tvol(Tsv700, vol700) # Dsv volume average temperature profile
# 1-D Transient Heat Conduction for DF = 1400 um
# -----------------------------------------------------------------------------
# surface area to volume equivalent sphere diameter Dsv, m
dsv1400 = dsv(sa1400, v1400)
# intraparticle temperature array [T] in Kelvin for Dsv case, b = 2 for sphere
# row = time step, column = node point from 0 to m
Tsv1400 = hc2(dsv1400, x, k, Gb, h, Ti, Tinf, 2, m, t)
# volume average temperature at each time step
vol1400 = vol(dsv1400, m) # volumes in the sphere
Tvol1400 = Tvol(Tsv1400, vol1400) # Dsv volume average temperature profile
# 1-D Transient Heat Conduction for DF = 2800 um
# -----------------------------------------------------------------------------
# surface area to volume equivalent sphere diameter Dsv, m
dsv2800 = dsv(sa2800, v2800)
# intraparticle temperature array [T] in Kelvin for Dsv case, b = 2 for sphere
# row = time step, column = node point from 0 to m
Tsv2800 = hc2(dsv2800, x, k, Gb, h, Ti, Tinf, 2, m, t)
# volume average temperature at each time step
vol2800 = vol(dsv2800, m) # volumes in the sphere
Tvol2800 = Tvol(Tsv2800, vol2800) # Dsv volume average temperature profile
# 1-D Transient Heat Conduction for DF = 5400 um
# -----------------------------------------------------------------------------
# surface area to volume equivalent sphere diameter Dsv, m
dsv5400 = dsv(sa5400, v5400)
# intraparticle temperature array [T] in Kelvin for Dsv case, b = 2 for sphere
# row = time step, column = node point from 0 to m
Tsv5400 = hc2(dsv5400, x, k, Gb, h, Ti, Tinf, 2, m, t2)
# volume average temperature at each time step
vol5400 = vol(dsv5400, m) # volumes in the sphere
Tvol5400 = Tvol(Tsv5400, vol5400) # Dsv volume average temperature profile
# 1-D Transient Heat Conduction for DF = 10000 um
# -----------------------------------------------------------------------------
# surface area to volume equivalent sphere diameter Dsv, m
dsv10000 = dsv(sa10000, v10000)
# intraparticle temperature array [T] in Kelvin for Dsv case, b = 2 for sphere
# row = time step, column = node point from 0 to m
Tsv10000 = hc2(dsv10000, x, k, Gb, h, Ti, Tinf, 2, m, t2)
# volume average temperature at each time step
vol10000 = vol(dsv10000, m) # volumes in the sphere
Tvol10000 = Tvol(Tsv10000, vol10000) # Dsv volume average temperature profile
# 1-D Transient Heat Conduction for DF = 20000 um
# -----------------------------------------------------------------------------
# surface area to volume equivalent sphere diameter Dsv, m
dsv20000 = dsv(sa20000, v20000)
# intraparticle temperature array [T] in Kelvin for Dsv case, b = 2 for sphere
# row = time step, column = node point from 0 to m
Tsv20000 = hc2(dsv20000, x, k, Gb, h, Ti, Tinf, 2, m, t2)
# volume average temperature at each time step
vol20000 = vol(dsv20000, m) # volumes in the sphere
Tvol20000 = Tvol(Tsv20000, vol20000) # Dsv volume average temperature profile
# Plot Results
# -----------------------------------------------------------------------------
py.ion()
py.close('all')
def despine():
ax = py.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
py.tick_params(axis='both', bottom='off', top='off', left='off', right='off')
py.figure(1)
py.plot(t200, Tv200, 'co', mec='c', mew=2, mfc='none', label='Tv')
py.plot(t400, Tv400, 'co', mec='c', mew=2, mfc='none')
py.plot(t700, Tv700, 'co', mec='c', mew=2, mfc='none')
py.plot(t1400, Tv1400, 'co', mec='c', mew=2, mfc='none')
py.plot(t2800, Tv2800, 'co', mec='c', mew=2, mfc='none')
py.plot(t, Tvol200, 'r', lw=2, label='0.2 mm')
py.plot(t, Tvol400, 'g', lw=2, label ='0.4 mm')
py.plot(t, Tvol700, 'b', lw=2, label='0.7 mm')
py.plot(t, Tvol1400, 'm', lw=2, label='1.4 mm')
py.plot(t, Tvol2800, 'y', lw=2, label='2.8 mm')
py.axhline(Tinf, c='k', ls='--')
py.ylim(250, 800)
py.xlim(0, tmax)
py.title('White Oak with DF = 200-2800 um')
py.ylabel('Temperature (K)')
py.xlabel('Time (s)')
py.legend(loc='best', numpoints=1, frameon=False)
py.grid()
despine()
py.figure(2)
py.plot(t5400, Tv5400, 'co', mec='c', mew=2, mfc='none', label='Tv')
py.plot(t10000, Tv10000, 'co', mec='c', mew=2, mfc='none')
py.plot(t20000, Tv20000, 'co', mec='c', mew=2, mfc='none')
py.plot(t2, Tvol5400, lw=2, label ='5.4 mm')
py.plot(t2, Tvol10000, lw=2, label='10 mm')
py.plot(t2, Tvol20000, lw=2, label='20 mm')
py.axhline(Tinf, c='k', ls='--')
py.ylim(250, 800)
py.xlim(0, tmax2)
py.title('White Oak with DF = 5.4-20 mm')
py.ylabel('Temperature (K)')
py.xlabel('Time (s)')
py.legend(loc='best', numpoints=1, frameon=False)
py.grid()
despine()
| mit |
stefanv/scipy3 | scipy/io/examples/read_array_demo1.py | 2 | 1440 | #=========================================================================
# NAME: read_array_demo1
#
# DESCRIPTION: Examples to read 2 columns from a multicolumn ascii text
# file, skipping the first line of header. First example reads into
# 2 separate arrays. Second example reads into a single array. Data are
# then plotted.
#
# Here is the format of the file test.txt:
# --------
# Some header to skip
# 1 2 3
# 2 4 6
# 3 6 9
# 4 8 12
#
# USAGE:
# python read_array_demo1.py
#
# PARAMETERS:
#
# DEPENDENCIES:
# matplotlib (pylab)
# test.txt
#
#
# AUTHOR: Simon J. Hook
# DATE : 09/23/2005
#
# MODIFICATION HISTORY:
#
# COMMENT:
#
#============================================================================
from scipy import *
from scipy.io import read_array
from pylab import *
def main():
# First example, read first and second column from ascii file. Skip first
# line of header.
# Note use of (1,-1) in lines to skip first line and then read to end of file
# Note use of (0,) in columns to pick first column, since its a tuple need trailing comma
x=read_array("test.txt",lines=(1,-1), columns=(0,))
y=read_array("test.txt",lines=(1,-1), columns=(1,))
#Second example, read the file into a single arry
z=read_array("test.txt",lines=(1,-1), columns=(0,2))
# Plot the data
plot(x,y,'r--',z[:,0],z[:,1])
show()
# The one and only main function
if __name__ == "__main__":
main()
| bsd-3-clause |
Chuban/moose | modules/stochastic_tools/python/stochastic/histogram.py | 4 | 2840 | #!/usr/bin/env python
import os
import argparse
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from scipy import stats
import mooseutils
def command_line_options():
"""
Command-line options for histogram tool.
"""
parser = argparse.ArgumentParser(description="Command-line utility for creating histograms from VectorPostprocessor data.")
parser.add_argument('filename', type=str, help="The VectorPostprocessor data file pattern to open, for sample 'foo_x_*.csv'.")
parser.add_argument('-t', '--timesteps', default=[-1], nargs='+', type=int, help="List of timesteps to consider, by default only the final timestep is shown.")
parser.add_argument('-v', '--vectors', default=[], nargs='+', type=str, help="List of vector names to consider, by default all vectors are shown.")
parser.add_argument('--bins', default=None, type=int, help="Number of bins to consider.")
parser.add_argument('--alpha', default=0.5, type=float, help="Set the bar chart opacity alpha setting.")
parser.add_argument('--xlabel', default='Value', type=str, help="The X-axis label.")
parser.add_argument('--ylabel', default='Probability', type=str, help="The X-axis label.")
parser.add_argument('--uniform', default=None, type=float, nargs=2, help="Show a uniform distribution between a and b (e.g., --uniform 8 10).")
parser.add_argument('--weibull', default=None, type=float, nargs=2, help="Show a Weibull distribution with given shape and scale parameters (e.g., --uniform 1 5).")
return parser.parse_args()
if __name__ == '__main__':
# Command-line options
opt = command_line_options()
opt.filename = os.path.abspath(opt.filename)
# Read data and set the default vector names
data = mooseutils.VectorPostprocessorReader(opt.filename)
if not opt.vectors:
opt.vectors = data.variables()[1:] # 1: ignores peacock index
# Plot the results
times = data.times()
for t in [times[idx] for idx in opt.timesteps]:
for vec in opt.vectors:
plt.hist(data[vec][t], bins=opt.bins, normed=True, alpha=opt.alpha,
label="{} (t={})".format(vec, t), ec='black')
# Add distributions
if opt.uniform:
loc = opt.uniform[0]
scale = opt.uniform[1] - loc
x = np.linspace(loc, loc + scale, 100)
plt.plot(x, stats.uniform.pdf(x, loc=loc, scale=scale), label='Uniform Exact')
if opt.weibull:
shape = opt.weibull[0]
scale = opt.weibull[1]
xlim = plt.gca().get_xlim()
x = np.linspace(xlim[0], xlim[1], 100)
plt.plot(x, stats.weibull_min.pdf(x, shape, scale=scale), label='Weibull Exact')
# Setup the axis and show the plot
plt.xlabel(opt.xlabel)
plt.ylabel(opt.ylabel)
plt.grid(True)
plt.legend()
plt.show()
| lgpl-2.1 |
Grievoushead/TwitterPy-DataMining | analyze_tweets.py | 1 | 5183 | '''
Author: Adil Moujahid
Description: Script for analyzing tweets to compare the popularity of 3 programming languages: Python, Javascript and ruby
Reference: http://adilmoujahid.com/posts/2014/07/twitter-analytics/
'''
import json
import pandas as pd
import matplotlib.pyplot as plt
import re
def word_in_text(word, text):
word = word.lower()
text = text.lower()
match = re.search(word, text)
if match:
return True
return False
def extract_link(text):
regex = r'https?://[^\s<>"]+|www\.[^\s<>"]+'
match = re.search(regex, text)
if match:
return match.group()
return ''
def main():
#Reading Tweets
print 'Reading Tweets\n'
tweets_data_path = 'twitter_data.txt'
tweets_data = []
tweets_file = open(tweets_data_path, "r")
for line in tweets_file:
try:
tweet = json.loads(line)
tweets_data.append(tweet)
except:
continue
#Structuring Tweets
print 'Structuring Tweets\n'
tweets = pd.DataFrame()
tweets['text'] = map(lambda tweet: tweet['text'], tweets_data)
tweets['lang'] = map(lambda tweet: tweet['lang'], tweets_data)
tweets['country'] = map(lambda tweet: tweet['place']['country'] if tweet['place'] != None else None, tweets_data)
#Analyzing Tweets by Language
print 'Analyzing tweets by language\n'
tweets_by_lang = tweets['lang'].value_counts()
fig, ax = plt.subplots()
ax.tick_params(axis='x', labelsize=15)
ax.tick_params(axis='y', labelsize=10)
ax.set_xlabel('Languages', fontsize=15)
ax.set_ylabel('Number of tweets' , fontsize=15)
ax.set_title('Top 5 languages', fontsize=15, fontweight='bold')
tweets_by_lang[:5].plot(ax=ax, kind='bar', color='red')
plt.savefig('tweet_by_lang', format='png')
#Analyzing Tweets by Country
# print 'Analyzing tweets by country\n'
# tweets_by_country = tweets['country'].value_counts()
# fig, ax = plt.subplots()
# ax.tick_params(axis='x', labelsize=15)
# ax.tick_params(axis='y', labelsize=10)
# ax.set_xlabel('Countries', fontsize=15)
# ax.set_ylabel('Number of tweets' , fontsize=15)
# ax.set_title('Top 5 countries', fontsize=15, fontweight='bold')
# tweets_by_country[:5].plot(ax=ax, kind='bar', color='blue')
# plt.savefig('tweet_by_country', format='png')
#Adding programming languages columns to the tweets DataFrame
print 'Adding programming languages tags to the data\n'
tweets['python'] = tweets['text'].apply(lambda tweet: word_in_text('python', tweet))
tweets['javascript'] = tweets['text'].apply(lambda tweet: word_in_text('javascript', tweet))
tweets['ruby'] = tweets['text'].apply(lambda tweet: word_in_text('ruby', tweet))
#Analyzing Tweets by programming language: First attempt
print 'Analyzing tweets by programming language: First attempt\n'
prg_langs = ['python', 'javascript', 'ruby']
tweets_by_prg_lang = [tweets['python'].value_counts()[True], tweets['javascript'].value_counts()[True], tweets['ruby'].value_counts()[True]]
x_pos = list(range(len(prg_langs)))
width = 0.8
fig, ax = plt.subplots()
plt.bar(x_pos, tweets_by_prg_lang, width, alpha=1, color='g')
ax.set_ylabel('Number of tweets', fontsize=15)
ax.set_title('Ranking: python vs. javascript vs. ruby (Raw data)', fontsize=10, fontweight='bold')
ax.set_xticks([p + 0.4 * width for p in x_pos])
ax.set_xticklabels(prg_langs)
plt.grid()
plt.savefig('tweet_by_prg_language_1', format='png')
#Targeting relevant tweets
print 'Targeting relevant tweets\n'
tweets['programming'] = tweets['text'].apply(lambda tweet: word_in_text('programming', tweet))
tweets['tutorial'] = tweets['text'].apply(lambda tweet: word_in_text('tutorial', tweet))
tweets['relevant'] = tweets['text'].apply(lambda tweet: word_in_text('programming', tweet) or word_in_text('tutorial', tweet))
#Analyzing Tweets by programming language: Second attempt
print 'Analyzing tweets by programming language: Second attempt\n'
tweets_by_prg_lang = [tweets[tweets['relevant'] == True]['python'].value_counts()[True],
tweets[tweets['relevant'] == True]['javascript'].value_counts()[True],
tweets[tweets['relevant'] == True]['ruby'].value_counts()[True]]
x_pos = list(range(len(prg_langs)))
width = 0.8
fig, ax = plt.subplots()
plt.bar(x_pos, tweets_by_prg_lang, width,alpha=1,color='g')
ax.set_ylabel('Number of tweets', fontsize=15)
ax.set_title('Ranking: python vs. javascript vs. ruby (Relevant data)', fontsize=10, fontweight='bold')
ax.set_xticks([p + 0.4 * width for p in x_pos])
ax.set_xticklabels(prg_langs)
plt.grid()
plt.savefig('tweet_by_prg_language_2', format='png')
#Extracting Links
tweets['link'] = tweets['text'].apply(lambda tweet: extract_link(tweet))
tweets_relevant = tweets[tweets['relevant'] == True]
tweets_relevant_with_link = tweets_relevant[tweets_relevant['link'] != '']
print '\nBelow are some Python links that we extracted\n'
print tweets_relevant_with_link[tweets_relevant_with_link['python'] == True]['link'].head()
print tweets_relevant_with_link[tweets_relevant_with_link['javascript'] == True]['link']
print tweets_relevant_with_link[tweets_relevant_with_link['ruby'] == True]['link']
if __name__=='__main__':
main()
| mit |
lazywei/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
vrv/tensorflow | tensorflow/python/client/notebook.py | 109 | 4791 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import socket
import sys
from tensorflow.python.platform import app
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
FLAGS = None
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print ("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--password",
type=str,
default=None,
help="""\
Password to require. If set, the server will allow public access. Only
used if notebook config file does not exist.\
""")
parser.add_argument(
"--notebook_dir",
type=str,
default="experimental/brain/notebooks",
help="root location where to store notebooks")
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = ([sys.argv[0]] +
[x for x in sys.argv[1:] if x.startswith("--flagfile")])
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
JustinNoel1/ML-Course | linear-regression/python/polyreg.py | 1 | 3187 | # This file implements polynomial regression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.metrics import mean_squared_error
#Set number of samples and seed
NUM_SAMPLES = 100
np.random.seed(42)
# Our `True' function
def f(x):
return 7 *(x**3 -1.3 *x**2+0.5*x - 0.056)
# initialize sample data
data = np.array([[x,f(x) ] for x in np.random.random(NUM_SAMPLES)])
# grid of coordinates for true function
gridx = np.linspace(0, 1, NUM_SAMPLES)
gridy = np.array([f(x) for x in gridx])
datax = data[:,0]
normaly = data[:,1]+0.3*np.random.randn(NUM_SAMPLES)
#Plot sampled data points
plt.scatter(datax, normaly )
plt.title("Scatter plot of synthetic data with normal errors")
plt.plot(gridx, gridy, label = "True function", color = 'Red')
plt.legend(loc = 2)
plt.savefig("poly_scatter_normal.png")
plt.cla()
gen_poly = True
# Run polynomial regression repeatedly for increasing degrees
if gen_poly:
lm = LinearRegression()
for deg in range(1, 8):
poly = PolynomialFeatures(degree = deg)
newdatax = poly.fit_transform(datax.reshape(NUM_SAMPLES,1))
for i in range(1, NUM_SAMPLES+1):
lm.fit(newdatax[:i], normaly[:i].reshape(i, 1))
predictions = lm.predict(newdatax)
mse = mean_squared_error(predictions, normaly.reshape(NUM_SAMPLES,1))
#Plot everything
plt.ylim(-0.75, 1.25)
plt.scatter(datax, normaly)
plt.title("Degree {} polynomial regression on {} points with normal error".format(deg, i))
plt.plot(gridx, gridy, label = "True function", color = 'Red')
gridpred = lm.predict(poly.fit_transform(gridx.reshape(NUM_SAMPLES, 1)))
plt.plot(gridx.flatten(), gridpred.flatten(), label = "Polynomial regressor curve MSE = {:0.4f}".format(mse), color = 'Green')
plt.legend(loc = 2)
plt.savefig("polyreg_normal_{:02d}{:03d}.png".format(deg,i))
plt.cla()
# Run degree 10 polynomial regression repeatedly using a random sample of 30 points
gen_var = True
if gen_var:
lm = LinearRegression()
poly = PolynomialFeatures(degree = 10)
newdatax = poly.fit_transform(datax.reshape(NUM_SAMPLES,1))
for i in range(30):
samp = np.random.choice(range(NUM_SAMPLES), 30)
lm.fit(newdatax[samp], normaly[samp].reshape(30, 1))
predictions = lm.predict(newdatax)
mse = mean_squared_error(predictions, normaly.reshape(NUM_SAMPLES,1))
#Plot everything
plt.ylim(-0.75, 1.25)
plt.scatter(datax, normaly)
plt.title("Degree {} polynomial regression on 30 random points with normal error".format(10))
plt.plot(gridx, gridy, label = "True function", color = 'Red')
gridpred = lm.predict(poly.fit_transform(gridx.reshape(NUM_SAMPLES, 1)))
plt.plot(gridx.flatten(), gridpred.flatten(), label = "Polynomial regressor curve MSE = {:0.4f}".format(mse), color = 'Green')
plt.legend(loc = 2)
plt.savefig("polyreg_var_{:03d}.png".format(i))
plt.cla()
| apache-2.0 |
xavierwu/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 292 | 4498 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
| bsd-3-clause |
MSeifert04/astropy | astropy/visualization/wcsaxes/patches.py | 4 | 3408 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib.patches import Polygon
from astropy import units as u
from astropy.coordinates.representation import UnitSphericalRepresentation
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product
__all__ = ['SphericalCircle']
def _rotate_polygon(lon, lat, lon0, lat0):
"""
Given a polygon with vertices defined by (lon, lat), rotate the polygon
such that the North pole of the spherical coordinates is now at (lon0,
lat0). Therefore, to end up with a polygon centered on (lon0, lat0), the
polygon should initially be drawn around the North pole.
"""
# Create a representation object
polygon = UnitSphericalRepresentation(lon=lon, lat=lat)
# Determine rotation matrix to make it so that the circle is centered
# on the correct longitude/latitude.
m1 = rotation_matrix(-(0.5 * np.pi * u.radian - lat0), axis='y')
m2 = rotation_matrix(-lon0, axis='z')
transform_matrix = matrix_product(m2, m1)
# Apply 3D rotation
polygon = polygon.to_cartesian()
polygon = polygon.transform(transform_matrix)
polygon = UnitSphericalRepresentation.from_cartesian(polygon)
return polygon.lon, polygon.lat
class SphericalCircle(Polygon):
"""
Create a patch representing a spherical circle - that is, a circle that is
formed of all the points that are within a certain angle of the central
coordinates on a sphere. Here we assume that latitude goes from -90 to +90
This class is needed in cases where the user wants to add a circular patch
to a celestial image, since otherwise the circle will be distorted, because
a fixed interval in longitude corresponds to a different angle on the sky
depending on the latitude.
Parameters
----------
center : tuple or `~astropy.units.Quantity`
This can be either a tuple of two `~astropy.units.Quantity` objects, or
a single `~astropy.units.Quantity` array with two elements.
radius : `~astropy.units.Quantity`
The radius of the circle
resolution : int, optional
The number of points that make up the circle - increase this to get a
smoother circle.
vertex_unit : `~astropy.units.Unit`
The units in which the resulting polygon should be defined - this
should match the unit that the transformation (e.g. the WCS
transformation) expects as input.
Notes
-----
Additional keyword arguments are passed to `~matplotlib.patches.Polygon`
"""
def __init__(self, center, radius, resolution=100, vertex_unit=u.degree, **kwargs):
# Extract longitude/latitude, either from a tuple of two quantities, or
# a single 2-element Quantity.
longitude, latitude = center
# Start off by generating the circle around the North pole
lon = np.linspace(0., 2 * np.pi, resolution + 1)[:-1] * u.radian
lat = np.repeat(0.5 * np.pi - radius.to_value(u.radian), resolution) * u.radian
lon, lat = _rotate_polygon(lon, lat, longitude, latitude)
# Extract new longitude/latitude in the requested units
lon = lon.to_value(vertex_unit)
lat = lat.to_value(vertex_unit)
# Create polygon vertices
vertices = np.array([lon, lat]).transpose()
super().__init__(vertices, **kwargs)
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/sklearn/tree/export.py | 35 | 16873 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# License: BSD 3 clause
import numpy as np
import warnings
from ..externals import six
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
class Sentinel(object):
def __repr__():
return '"tree.dot"'
SENTINEL = Sentinel()
def export_graphviz(decision_tree, out_file=SENTINEL, max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default='tree.dot')
Handle or name of the output file. If ``None``, the result is
returned as a string. This will the default from version 0.20.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Returns
-------
dot_data : string
String representation of the input tree in GraphViz dot format.
Only returned if ``out_file`` is None.
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
if len(sorted_values) == 1:
alpha = 0
else:
alpha = int(np.round(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]), 0))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(np.round(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] -
colors['bounds'][0])), 0))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1 and len(np.unique(tree.value)) != 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
return_string = False
try:
if out_file == SENTINEL:
warnings.warn("out_file can be set to None starting from 0.18. "
"This will be the default in 0.20.",
DeprecationWarning)
out_file = "tree.dot"
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
if out_file is None:
return_string = True
out_file = six.StringIO()
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
if return_string:
return out_file.getvalue()
finally:
if own_file:
out_file.close()
| mit |
raghavrv/scikit-learn | sklearn/__init__.py | 7 | 5148 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
import os
from contextlib import contextmanager as _contextmanager
_ASSUME_FINITE = bool(os.environ.get('SKLEARN_ASSUME_FINITE', False))
def get_config():
"""Retrieve current values for configuration set by :func:`set_config`
Returns
-------
config : dict
Keys are parameter names that can be passed to :func:`set_config`.
"""
return {'assume_finite': _ASSUME_FINITE}
def set_config(assume_finite=None):
"""Set global scikit-learn configuration
Parameters
----------
assume_finite : bool, optional
If True, validation for finiteness will be skipped,
saving time, but leading to potential crashes. If
False, validation for finiteness will be performed,
avoiding error.
"""
global _ASSUME_FINITE
if assume_finite is not None:
_ASSUME_FINITE = assume_finite
@_contextmanager
def config_context(**new_config):
"""Context manager for global scikit-learn configuration
Parameters
----------
assume_finite : bool, optional
If True, validation for finiteness will be skipped,
saving time, but leading to potential crashes. If
False, validation for finiteness will be performed,
avoiding error.
Notes
-----
All settings, not just those presently modified, will be returned to
their previous values when the context manager is exited. This is not
thread-safe.
Examples
--------
>>> import sklearn
>>> from sklearn.utils.validation import assert_all_finite
>>> with sklearn.config_context(assume_finite=True):
... assert_all_finite([float('nan')])
>>> with sklearn.config_context(assume_finite=True):
... with sklearn.config_context(assume_finite=False):
... assert_all_finite([float('nan')])
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Input contains NaN, ...
"""
old_config = get_config().copy()
set_config(**new_config)
try:
yield
finally:
set_config(**old_config)
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module=r'^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.19.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'exceptions', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'learning_curve', 'linear_model', 'manifold', 'metrics',
'mixture', 'model_selection', 'multiclass', 'multioutput',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
hammerlab/varcode | test/test_cli_genes.py | 1 | 1065 | from varcode.cli.genes_script import main as run_script
from .data import ov_wustle_variants, db_snp_variants
from tempfile import NamedTemporaryFile
import pandas as pd
def test_varcode_effects_script():
"""
Load a variant collection with combines the ovarian cancer test VCF
and a small number of variants from dbSNP
"""
commandline_args = ["--genome", "grch37"]
commandline_args.extend(["--maf", ov_wustle_variants.path])
for variant in db_snp_variants:
commandline_args.append("--variant")
commandline_args.append(str(variant.contig))
commandline_args.append(str(variant.start))
commandline_args.append(str(variant.original_ref))
commandline_args.append(str(variant.original_alt))
with NamedTemporaryFile(mode="r+", delete=True) as f:
commandline_args.extend(["--output-csv", f.name])
run_script(commandline_args)
f.flush()
combined_variants = pd.read_csv(f.name)
assert len(combined_variants) == (len(ov_wustle_variants) + len(db_snp_variants))
| apache-2.0 |
Windy-Ground/scikit-learn | sklearn/cluster/tests/test_k_means.py | 63 | 26190 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
| bsd-3-clause |
mmottahedi/neuralnilm_prototype | scripts/e464.py | 2 | 6820 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation)
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.disaggregate import disaggregate
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer,
DropoutLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
447: first attempt at disaggregation
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 1000
N_SEQ_PER_BATCH = 64
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
window=("2013-03-18", None),
train_buildings=[1],
validation_buildings=[1],
n_seq_per_batch=N_SEQ_PER_BATCH,
standardise_input=True,
standardise_targets=True,
independently_center_inputs=True,
ignore_incomplete=True,
offset_probability=0.5,
ignore_offset_activations=True
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-2,
learning_rate_changes_by_iteration={
1000: 1e-3,
5000: 1e-4
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=32)
)
def exp_a(name, target_appliance, seq_length):
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
target_appliance=target_appliance,
logger=logging.getLogger(name),
seq_length=seq_length
))
source = SameLocation(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
NUM_FILTERS = 4
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'label': 'dense0',
'type': DenseLayer,
'num_units': (seq_length - 3),
'nonlinearity': rectify
},
{
'type': DropoutLayer,
'p': 0.5
},
{
'label': 'dense2',
'type': DenseLayer,
'num_units': 16,
'nonlinearity': rectify
},
{
'type': DropoutLayer,
'p': 0.5
},
{
'type': DenseLayer,
'num_units': (seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'type': ReshapeLayer,
'shape': (N_SEQ_PER_BATCH, seq_length - 3, NUM_FILTERS)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def main():
APPLIANCES = [
('a', 'fridge freezer', 800),
('b', 'coffee maker', 512),
('c', 'dish washer', 2000),
('d', 'hair dryer', 256),
('e', 'kettle', 256),
('f', 'oven', 2000),
('g', 'toaster', 256),
('h', 'light', 2000),
('i', 'washer dryer', 2000)
]
for experiment, appliance, seq_length in APPLIANCES[:1]:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, 'a', full_exp_name)
func_call = func_call[:-1] + ", '{}', {})".format(appliance, seq_length)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=20000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source
del net
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e464.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| mit |
NullScope/BorderlessStone | setup.py | 6 | 6544 | #! /usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import os
import stat
from setuptools import setup, find_packages
from distutils.command.build_py import build_py
from distutils.command.sdist import sdist
from PyInstaller import get_version
import PyInstaller.utils.git
DESC = ('Converts (packages) Python programs into stand-alone executables, '
'under Windows, Linux, Mac OS X, AIX and Solaris.')
LONG_DESC = """
PyInstaller is a program that converts (packages) Python
programs into stand-alone executables, under Windows, Linux, Mac OS X,
AIX and Solaris. Its main advantages over similar tools are that
PyInstaller works with any version of Python since 2.3, it builds smaller
executables thanks to transparent compression, it is fully multi-platform,
and uses the OS support to load the dynamic libraries, thus ensuring full
compatibility.
The main goal of PyInstaller is to be compatible with 3rd-party packages
out-of-the-box. This means that, with PyInstaller, all the required tricks
to make external packages work are already integrated within PyInstaller
itself so that there is no user intervention required. You'll never be
required to look for tricks in wikis and apply custom modification to your
files or your setup scripts. As an example, libraries like PyQt, Django or
matplotlib are fully supported, without having to handle plugins or
external data files manually.
"""
CLASSIFIERS = """
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Console
Classifier: Intended Audience :: Developers
Classifier: Intended Audience :: Other Audience
Classifier: Intended Audience :: System Administrators
Classifier: License :: OSI Approved :: GNU General Public License v2 (GPLv2)
Classifier: Natural Language :: English
Classifier: Operating System :: MacOS :: MacOS X
Classifier: Operating System :: Microsoft :: Windows
Classifier: Operating System :: POSIX
Classifier: Operating System :: POSIX :: AIX
Classifier: Operating System :: POSIX :: Linux
Classifier: Operating System :: POSIX :: SunOS/Solaris
Classifier: Programming Language :: C
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.4
Classifier: Programming Language :: Python :: 2.5
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 2 :: Only
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Topic :: Software Development
Classifier: Topic :: Software Development :: Build Tools
Classifier: Topic :: System :: Installation/Setup
Classifier: Topic :: System :: Software Distribution
Classifier: Topic :: Utilities
""".splitlines()
# Make the distribution files to always report the git-revision used
# then building the distribution packages. This is done by replacing
# PyInstaller/utils/git.py within the dist/build by a fake-module
# which always returns the current git-revision. The original
# source-file is unchanged.
#
# This has to be done in 'build_py' for bdist-commands and in 'sdist'
# for sdist-commands.
def _write_git_version_file(filename):
"""
Fake PyInstaller.utils.git.py to always return the current revision.
"""
git_version = PyInstaller.utils.git.get_repo_revision()
st = os.stat(filename)
# remove the file first for the case it's hard-linked to the
# original file
os.remove(filename)
git_mod = open(filename, 'w')
template = "def get_repo_revision(): return %r"
try:
git_mod.write(template % git_version)
finally:
git_mod.close()
os.chmod(filename, stat.S_IMODE(st.st_mode))
class my_build_py(build_py):
def build_module(self, module, module_file, package):
res = build_py.build_module(self, module, module_file, package)
if module == 'git' and package == 'PyInstaller.utils':
filename = self.get_module_outfile(
self.build_lib, package.split('.'), module)
_write_git_version_file(filename)
return res
class my_sdist(sdist):
def make_release_tree(self, base_dir, files):
res = sdist.make_release_tree(self, base_dir, files)
build_py = self.get_finalized_command('build_py')
filename = build_py.get_module_outfile(
base_dir, ['PyInstaller', 'utils'], 'git')
_write_git_version_file(filename)
return res
setup(
install_requires=['distribute'],
name='PyInstaller',
version=get_version(),
description=DESC,
long_description=LONG_DESC,
keywords='packaging, standalone executable, pyinstaller, macholib, freeze, py2exe, py2app, bbfreeze',
author='Giovanni Bajo, Hartmut Goebel, Martin Zibricky',
author_email='[email protected]',
maintainer='Giovanni Bajo, Hartmut Goebel, Martin Zibricky',
maintainer_email='[email protected]',
license=('GPL license with a special exception which allows to use '
'PyInstaller to build and distribute non-free programs '
'(including commercial ones)'),
url='http://www.pyinstaller.org',
download_url='https://sourceforge.net/projects/pyinstaller/files',
classifiers=CLASSIFIERS,
zip_safe=False,
packages=find_packages(),
package_data={
# This includes precompiled bootloaders.
'PyInstaller': ['bootloader/*/*'],
# This file is necessary for rthooks (runtime hooks).
'PyInstaller.loader': ['rthooks.dat'],
},
include_package_data=True,
cmdclass = {
'sdist': my_sdist,
'build_py': my_build_py,
},
entry_points="""
[console_scripts]
pyinstaller=PyInstaller.main:run
pyi-archive_viewer=PyInstaller.cliutils.archive_viewer:run
pyi-bindepend=PyInstaller.cliutils.bindepend:run
pyi-build=PyInstaller.cliutils.build:run
pyi-grab_version=PyInstaller.cliutils.grab_version:run
pyi-make_comserver=PyInstaller.cliutils.make_comserver:run
pyi-makespec=PyInstaller.cliutils.makespec:run
pyi-set_version=PyInstaller.cliutils.set_version:run
"""
)
| lgpl-3.0 |
liberatorqjw/scikit-learn | sklearn/ensemble/__init__.py | 44 | 1228 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
Jakob37/RASP | Scripts/UtilScripts/plot_time_log.py | 1 | 4926 | #!/usr/bin/env python3
"""
RASP: Rapid Amplicon Sequence Pipeline
Copyright (C) 2016, Jakob Willforss and Björn Canbäck
All rights reserved.
This file is part of RASP.
RASP is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
RASP is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with RASP. If not, <http://www.gnu.org/licenses/>.
"""
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
from matplotlib import cm
import argparse
import numpy as np
program_description = """
Plots time elapsed in different parts of the pipeline
Displays those processes that take up more than a certain
percentage of the execute_test time (decided by the filter_threshold)
"""
COLOR_SCALE = cm.gist_earth
FILTER_THRESHOLD = 0.01
BAR_WIDTH = 0.7
LABEL_FONT_SIZE = 16
TITLE_FONT_SIZE = 20
def main():
args = parse_arguments()
input_fh = open(args.input, 'r')
title, xlabel, legend_label, ylabel, datapoints = extract_data(input_fh)
time_col = 1
tot_time = sum([float(dp[time_col]) for dp in datapoints])
title += ' Total time: {} seconds'.format(int(tot_time))
filter_short_prog_runs(datapoints, tot_time, FILTER_THRESHOLD)
x_values, y_values, point_labels = get_expanded_datapoints(datapoints)
fig, leg = bar_plot(x_values, y_values, point_labels, title, 'Run time (seconds)')
fig.savefig(args.output, format='PDF', bbox_extra_artists=(leg,), bbox_inches='tight')
input_fh.close()
def get_expanded_datapoints(datapoints):
"""
Takes a list of datapoints (triple tuples)
and returns the sub-values as three separate lists
"""
x_values = [d_point[0] for d_point in datapoints]
y_values = [float(d_point[1]) for d_point in datapoints]
point_labels = [d_point[2] for d_point in datapoints]
return x_values, y_values, point_labels
def filter_short_prog_runs(datapoints, total_time, filter_threshold_percentage):
"""
Removes datapoints for programs for which the running time is lower than
a certain percentage of the total running time
"""
for pos in range(len(datapoints) - 1, -1, -1):
if float(datapoints[pos][1]) < filter_threshold_percentage * total_time:
del datapoints[pos]
def parse_arguments():
"""Parses the command line arguments"""
parser = argparse.ArgumentParser(description=program_description)
parser.add_argument('-i', '--input', help='The input file', required=True)
parser.add_argument('-o', '--output', help='Path of output make_plot', required=True)
args = parser.parse_args()
return args
def extract_data(input_fh):
"""
Retrieves x, y and label data from given textfile
Returns labels and a list with data-point tuples (x, y, short name)
"""
datapoints = []
line_nbr = 1
for line in input_fh:
line = line.rstrip()
if line_nbr == 1:
title = line
elif line_nbr == 2:
xlabel, legend_label, ylabel = line.split('\t')
else:
x, y, acr = line.split('\t')
datapoints.append((x, y, acr))
line_nbr += 1
return title, xlabel, legend_label, ylabel, datapoints
def bar_plot(xvalues, yvalues, shorts, title, ylab):
"""Takes data and labels, creates and returns plot and legend"""
fig = pyplot.figure()
ax = pyplot.subplot(111)
remove_axes(ax)
ind = np.arange(len(xvalues))
barlist = pyplot.bar(ind, yvalues, align='center', width=BAR_WIDTH)
color_bars(barlist)
pyplot.ylabel(ylab, fontsize=LABEL_FONT_SIZE)
pyplot.xticks(ind, shorts, fontsize=LABEL_FONT_SIZE)
pyplot.title(title, fontsize=TITLE_FONT_SIZE)
leg = pyplot.legend(barlist, xvalues, loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol=2)
return fig, leg
def remove_axes(ax):
"""Removes upper and right axes from axis object"""
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
return ax
def color_bars(barlist):
"""Colors a list with plotted bar-objects"""
for bar_nbr in range(len(barlist)):
color = get_color(len(barlist), bar_nbr)
barlist[bar_nbr].set_facecolor(color)
def get_color(sample_count, nbr):
"""Calculates and returns color based on """
scale_color_count = 255
color_nbr = int((scale_color_count / (sample_count + 1)) * (nbr + 1))
return COLOR_SCALE(color_nbr)
if __name__ == '__main__':
main()
| gpl-3.0 |
elingg/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/dataframe_test.py | 18 | 3978 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the DataFrame class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
def setup_test_df():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor a", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
df["b"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor b", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2")
df["c"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor c", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
return df
class DataFrameTest(test.TestCase):
"""Test of `DataFrame`."""
def test_create(self):
df = setup_test_df()
self.assertEqual(df.columns(), frozenset(["a", "b", "c"]))
def test_select_columns(self):
df = setup_test_df()
df2 = df.select_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["a", "c"]))
def test_exclude_columns(self):
df = setup_test_df()
df2 = df.exclude_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["b"]))
def test_get_item(self):
df = setup_test_df()
c1 = df["b"]
self.assertEqual(
mocks.MockTensor("Mock Tensor 2", dtypes.int32), c1.build())
def test_del_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
del df["b"]
self.assertEqual(2, len(df))
self.assertEqual(df.columns(), frozenset(["a", "c"]))
def test_set_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn",
mocks.MockTensor("Tensor ", dtypes.int32))
df["quack"] = col1
self.assertEqual(4, len(df))
col2 = df["quack"]
self.assertEqual(col1, col2)
def test_set_item_column_multi(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn", [])
col2 = mocks.MockSeries("MooColumn", [])
df["quack", "moo"] = [col1, col2]
self.assertEqual(5, len(df))
col3 = df["quack"]
self.assertEqual(col1, col3)
col4 = df["moo"]
self.assertEqual(col2, col4)
def test_set_item_pandas(self):
# TODO(jamieas)
pass
def test_set_item_numpy(self):
# TODO(jamieas)
pass
def test_build(self):
df = setup_test_df()
result = df.build()
expected = {
"a": mocks.MockTensor("Mock Tensor 1", dtypes.int32),
"b": mocks.MockTensor("Mock Tensor 2", dtypes.int32),
"c": mocks.MockTensor("Mock Tensor 1", dtypes.int32)
}
self.assertEqual(expected, result)
if __name__ == "__main__":
test.main()
| apache-2.0 |
vene/ambra | scripts/semeval_reproduce_task2.py | 1 | 5151 | from __future__ import print_function
import sys
import json
import numpy as np
from scipy.stats import sem
from sklearn.base import clone
from sklearn.utils import shuffle
from sklearn.cross_validation import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import FeatureUnion
from sklearn.feature_selection import chi2
from sklearn.feature_extraction.text import TfidfVectorizer
from ambra.cross_validation import cross_val_score
from ambra.tools import PossiblePipeline, Proj, IntervalSelectKBest
from ambra.features import LengthFeatures, StylisticFeatures
from ambra.features import NgramLolAnalyzer
from ambra.interval_scoring import semeval_interval_scorer
from ambra.classifiers import IntervalLogisticRegression
fname = sys.argv[1]
with open(fname) as f:
entries = json.load(f)
# some buggy docs are empty
entries = [entry for entry in entries if len(entry['lemmas'])]
X = np.array(entries)
Y = np.array([doc['interval'] for doc in entries])
Y_possible = np.array([doc['all_fine_intervals'] for doc in entries])
X, Y, Y_possible = shuffle(X, Y, Y_possible, random_state=0)
print("Length features")
print("===============")
pipe = PossiblePipeline([('vect', Proj(LengthFeatures(), key='lemmas')),
('scale', StandardScaler(with_mean=False,
with_std=True)),
('clf', IntervalLogisticRegression(C=0.0008030857221,
n_neighbors=10,
limit_pairs=1,
random_state=0))])
scores = cross_val_score(pipe, X, Y, cv=KFold(len(X), n_folds=5),
scoring=semeval_interval_scorer,
scorer_params=dict(Y_possible=Y_possible),
n_jobs=4)
print("{:.3f} +/- {:.4f}".format(scores.mean(), sem(scores)))
print()
print("Stylistic features")
print("==================")
union = FeatureUnion([('lenghts', Proj(LengthFeatures(), key='lemmas')),
('style', StylisticFeatures())])
pipe = PossiblePipeline([('vect', union),
('scale', StandardScaler(with_mean=False,
with_std=True)),
('clf', IntervalLogisticRegression(C=0.02154434690032,
n_neighbors=10,
limit_pairs=1,
random_state=0))])
scores = cross_val_score(pipe, X, Y, cv=KFold(len(X), n_folds=5),
scoring=semeval_interval_scorer,
scorer_params=dict(Y_possible=Y_possible),
n_jobs=4)
print("{:.3f} +/- {:.4f}".format(scores.mean(), sem(scores)))
print()
print("Full")
print("====")
vectorizer = TfidfVectorizer(use_idf=False, norm='l1',
analyzer=NgramLolAnalyzer(lower=False))
vectorizer_low = TfidfVectorizer(use_idf=False, norm='l1',
analyzer=NgramLolAnalyzer(lower=True))
union = FeatureUnion([('lenghts', Proj(LengthFeatures(), key='lemmas')),
('style', StylisticFeatures()),
('pos', Proj(clone(vectorizer), key='pos')),
('tokens', Proj(clone(vectorizer_low), key='tokens'))])
final_pipe = PossiblePipeline([('union', union),
('scale', StandardScaler(with_mean=False,
with_std=True)),
('fs', IntervalSelectKBest(chi2)),
('clf', IntervalLogisticRegression(
n_neighbors=10,
limit_pairs=0.01, # make larger if possible
random_state=0))])
final_pipe.set_params(**{'union__tokens__transf__min_df': 5,
'union__tokens__transf__max_df': 0.9,
'union__pos__transf__analyzer__ngram_range': (2, 2),
'union__pos__transf__max_df': 0.8,
'fs__k': 2000,
'union__pos__transf__min_df': 1,
'clf__C': 2.592943797404667e-05,
'union__tokens__transf__analyzer__ngram_range': (1, 1)}
)
scores = cross_val_score(final_pipe, X, Y, cv=KFold(len(X), n_folds=5),
scoring=semeval_interval_scorer,
scorer_params=dict(Y_possible=Y_possible),
n_jobs=4
)
print("{:.3f} +/- {:.4f}".format(scores.mean(), sem(scores)))
final_pipe.fit(X, Y)
feature_names = final_pipe.steps[0][1].get_feature_names()
feature_names = np.array(feature_names)[final_pipe.steps[2][1].get_support()]
coef = final_pipe.steps[-1][-1].coef_.ravel()
for idx in np.argsort(-np.abs(coef))[:100]:
print("{:.2f}\t{}".format(coef[idx], feature_names[idx]))
| bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.