repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
hhuuggoo/kitchensink | kitchensink/admin/__init__.py | 1 | 2237 | import logging
import sys
import time
from contextlib import contextmanager
import pandas as pd
from ..rpc import RPC
from ..taskqueue.objs import current_job_id
from .. import settings
from ..serialization import serializer, deserializer
logger = logging.getLogger(__name__)
def cancel_all():
keys = settings.redis_conn.keys("rq:job*")
if keys:
settings.redis_conn.delete(*keys)
def retrieve_profile(jids):
connection = settings.redis_conn
all_messages = []
for jid in jids:
key = "rq:profile:%s" % jid
msgs = connection.lrange(key, 0, -1)
if msgs:
connection.ltrim(key, len(msgs), -1)
big_message = {}
for msg in msgs:
msg = deserializer('cloudpickle')(msg)
big_message.update(msg)
all_messages.append(big_message)
data = pd.DataFrame(all_messages)
start_spread = data.pop('start')
end_spread = data.pop('end')
runtimes = end_spread - start_spread
total_runtimes = runtimes.sum()
if all_messages:
result = data.sum()
result['start_spread'] = start_spread.max() - start_spread.min()
result['end_spread'] = end_spread.max() - end_spread.min()
result['total_runtimes'] = total_runtimes
result['last_finish'] = end_spread.max()
return result
else:
return None
#from dabaez
def save_profile(key, value, jid):
connection = settings.redis_conn
msg = {key : value}
msg = serializer('cloudpickle')(msg)
key = "rq:profile:%s" % jid
connection.lpush(key, msg)
connection.expire(key, settings.profile_ttl)
def timethis(what, jid=None):
@contextmanager
def benchmark():
start = time.time()
yield
end = time.time()
if benchmark.jid is None:
jid = current_job_id()
else:
jid = benchmark.jid
if settings.is_server and settings.profile and jid:
save_profile(what, end-start, jid)
else:
print("%s : %0.3f seconds" % (what, end-start))
benchmark.jid = jid
return benchmark()
def make_rpc():
rpc = RPC()
rpc.register_function(cancel_all)
rpc.register_function(retrieve_profile)
return rpc
| bsd-3-clause |
anielsen001/scipy | scipy/integrate/_bvp.py | 61 | 39966 | """Boundary value problem solver."""
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy.linalg import norm, pinv
from scipy.sparse import coo_matrix, csc_matrix
from scipy.sparse.linalg import splu
from scipy.optimize import OptimizeResult
EPS = np.finfo(float).eps
def estimate_fun_jac(fun, x, y, p, f0=None):
"""Estimate derivatives of an ODE system rhs with forward differences.
Returns
-------
df_dy : ndarray, shape (n, n, m)
Derivatives with respect to y. An element (i, j, q) corresponds to
d f_i(x_q, y_q) / d (y_q)_j.
df_dp : ndarray with shape (n, k, m) or None
Derivatives with respect to p. An element (i, j, q) corresponds to
d f_i(x_q, y_q, p) / d p_j. If `p` is empty, None is returned.
"""
n, m = y.shape
if f0 is None:
f0 = fun(x, y, p)
dtype = y.dtype
df_dy = np.empty((n, n, m), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(y))
for i in range(n):
y_new = y.copy()
y_new[i] += h[i]
hi = y_new[i] - y[i]
f_new = fun(x, y_new, p)
df_dy[:, i, :] = (f_new - f0) / hi
k = p.shape[0]
if k == 0:
df_dp = None
else:
df_dp = np.empty((n, k, m), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(p))
for i in range(k):
p_new = p.copy()
p_new[i] += h[i]
hi = p_new[i] - p[i]
f_new = fun(x, y, p_new)
df_dp[:, i, :] = (f_new - f0) / hi
return df_dy, df_dp
def estimate_bc_jac(bc, ya, yb, p, bc0=None):
"""Estimate derivatives of boundary conditions with forward differences.
Returns
-------
dbc_dya : ndarray, shape (n + k, n)
Derivatives with respect to ya. An element (i, j) corresponds to
d bc_i / d ya_j.
dbc_dyb : ndarray, shape (n + k, n)
Derivatives with respect to yb. An element (i, j) corresponds to
d bc_i / d ya_j.
dbc_dp : ndarray with shape (n + k, k) or None
Derivatives with respect to p. An element (i, j) corresponds to
d bc_i / d p_j. If `p` is empty, None is returned.
"""
n = ya.shape[0]
k = p.shape[0]
if bc0 is None:
bc0 = bc(ya, yb, p)
dtype = ya.dtype
dbc_dya = np.empty((n, n + k), dtype=dtype)
h = EPS**0.5 * (1 + np.abs(ya))
for i in range(n):
ya_new = ya.copy()
ya_new[i] += h[i]
hi = ya_new[i] - ya[i]
bc_new = bc(ya_new, yb, p)
dbc_dya[i] = (bc_new - bc0) / hi
dbc_dya = dbc_dya.T
h = EPS**0.5 * (1 + np.abs(yb))
dbc_dyb = np.empty((n, n + k), dtype=dtype)
for i in range(n):
yb_new = yb.copy()
yb_new[i] += h[i]
hi = yb_new[i] - yb[i]
bc_new = bc(ya, yb_new, p)
dbc_dyb[i] = (bc_new - bc0) / hi
dbc_dyb = dbc_dyb.T
if k == 0:
dbc_dp = None
else:
h = EPS**0.5 * (1 + np.abs(p))
dbc_dp = np.empty((k, n + k), dtype=dtype)
for i in range(k):
p_new = p.copy()
p_new[i] += h[i]
hi = p_new[i] - p[i]
bc_new = bc(ya, yb, p_new)
dbc_dp[i] = (bc_new - bc0) / hi
dbc_dp = dbc_dp.T
return dbc_dya, dbc_dyb, dbc_dp
def compute_jac_indices(n, m, k):
"""Compute indices for the collocation system Jacobian construction.
See `construct_global_jac` for the explanation.
"""
i_col = np.repeat(np.arange((m - 1) * n), n)
j_col = (np.tile(np.arange(n), n * (m - 1)) +
np.repeat(np.arange(m - 1) * n, n**2))
i_bc = np.repeat(np.arange((m - 1) * n, m * n + k), n)
j_bc = np.tile(np.arange(n), n + k)
i_p_col = np.repeat(np.arange((m - 1) * n), k)
j_p_col = np.tile(np.arange(m * n, m * n + k), (m - 1) * n)
i_p_bc = np.repeat(np.arange((m - 1) * n, m * n + k), k)
j_p_bc = np.tile(np.arange(m * n, m * n + k), n + k)
i = np.hstack((i_col, i_col, i_bc, i_bc, i_p_col, i_p_bc))
j = np.hstack((j_col, j_col + n,
j_bc, j_bc + (m - 1) * n,
j_p_col, j_p_bc))
return i, j
def stacked_matmul(a, b):
"""Stacked matrix multiply: out[i,:,:] = np.dot(a[i,:,:], b[i,:,:]).
In our case a[i, :, :] and b[i, :, :] are always square.
"""
# Empirical optimization. Use outer Python loop and BLAS for large
# matrices, otherwise use a single einsum call.
if a.shape[1] > 50:
out = np.empty_like(a)
for i in range(a.shape[0]):
out[i] = np.dot(a[i], b[i])
return out
else:
return np.einsum('...ij,...jk->...ik', a, b)
def construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy, df_dy_middle, df_dp,
df_dp_middle, dbc_dya, dbc_dyb, dbc_dp):
"""Construct the Jacobian of the collocation system.
There are n * m + k functions: m - 1 collocations residuals, each
containing n components, followed by n + k boundary condition residuals.
There are n * m + k variables: m vectors of y, each containing n
components, followed by k values of vector p.
For example, let m = 4, n = 2 and k = 1, then the Jacobian will have
the following sparsity structure:
1 1 2 2 0 0 0 0 5
1 1 2 2 0 0 0 0 5
0 0 1 1 2 2 0 0 5
0 0 1 1 2 2 0 0 5
0 0 0 0 1 1 2 2 5
0 0 0 0 1 1 2 2 5
3 3 0 0 0 0 4 4 6
3 3 0 0 0 0 4 4 6
3 3 0 0 0 0 4 4 6
Zeros denote identically zero values, other values denote different kinds
of blocks in the matrix (see below). The blank row indicates the separation
of collocation residuals from boundary conditions. And the blank column
indicates the separation of y values from p values.
Refer to [1]_ (p. 306) for the formula of n x n blocks for derivatives
of collocation residuals with respect to y.
Parameters
----------
n : int
Number of equations in the ODE system.
m : int
Number of nodes in the mesh.
k : int
Number of the unknown parameters.
i_jac, j_jac : ndarray
Row and column indices returned by `compute_jac_indices`. They
represent different blocks in the Jacobian matrix in the following
order (see the scheme above):
* 1: m - 1 diagonal n x n blocks for the collocation residuals.
* 2: m - 1 off-diagonal n x n blocks for the collocation residuals.
* 3 : (n + k) x n block for the dependency of the boundary
conditions on ya.
* 4: (n + k) x n block for the dependency of the boundary
conditions on yb.
* 5: (m - 1) * n x k block for the dependency of the collocation
residuals on p.
* 6: (n + k) x k block for the dependency of the boundary
conditions on p.
df_dy : ndarray, shape (n, n, m)
Jacobian of f with respect to y computed at the mesh nodes.
df_dy_middle : ndarray, shape (n, n, m - 1)
Jacobian of f with respect to y computed at the middle between the
mesh nodes.
df_dp : ndarray with shape (n, k, m) or None
Jacobian of f with respect to p computed at the mesh nodes.
df_dp_middle: ndarray with shape (n, k, m - 1) or None
Jacobian of f with respect to p computed at the middle between the
mesh nodes.
dbc_dya, dbc_dyb : ndarray, shape (n, n)
Jacobian of bc with respect to ya and yb.
dbc_dp: ndarray with shape (n, k) or None
Jacobian of bc with respect to p.
Returns
-------
J : csc_matrix, shape (n * m + k, n * m + k)
Jacobian of the collocation system in a sparse form.
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
df_dy = np.transpose(df_dy, (2, 0, 1))
df_dy_middle = np.transpose(df_dy_middle, (2, 0, 1))
h = h[:, np.newaxis, np.newaxis]
dtype = df_dy.dtype
# Computing diagonal n x n blocks.
dPhi_dy_0 = np.empty((m - 1, n, n), dtype=dtype)
dPhi_dy_0[:] = -np.identity(n)
dPhi_dy_0 -= h / 6 * (df_dy[:-1] + 2 * df_dy_middle)
T = stacked_matmul(df_dy_middle, df_dy[:-1])
dPhi_dy_0 -= h**2 / 12 * T
# Computing off-diagonal n x n blocks.
dPhi_dy_1 = np.empty((m - 1, n, n), dtype=dtype)
dPhi_dy_1[:] = np.identity(n)
dPhi_dy_1 -= h / 6 * (df_dy[1:] + 2 * df_dy_middle)
T = stacked_matmul(df_dy_middle, df_dy[1:])
dPhi_dy_1 += h**2 / 12 * T
values = np.hstack((dPhi_dy_0.ravel(), dPhi_dy_1.ravel(), dbc_dya.ravel(),
dbc_dyb.ravel()))
if k > 0:
df_dp = np.transpose(df_dp, (2, 0, 1))
df_dp_middle = np.transpose(df_dp_middle, (2, 0, 1))
T = stacked_matmul(df_dy_middle, df_dp[:-1] - df_dp[1:])
df_dp_middle += 0.125 * h * T
dPhi_dp = -h/6 * (df_dp[:-1] + df_dp[1:] + 4 * df_dp_middle)
values = np.hstack((values, dPhi_dp.ravel(), dbc_dp.ravel()))
J = coo_matrix((values, (i_jac, j_jac)))
return csc_matrix(J)
def collocation_fun(fun, y, p, x, h):
"""Evaluate collocation residuals.
This function lies in the core of the method. The solution is sought
as a cubic C1 continuous spline with derivatives matching the ODE rhs
at given nodes `x`. Collocation conditions are formed from the equality
of the spline derivatives and rhs of the ODE system in the middle points
between nodes.
Such method is classified to Lobbato IIIA family in ODE literature.
Refer to [1]_ for the formula and some discussion.
Returns
-------
col_res : ndarray, shape (n, m - 1)
Collocation residuals at the middle points of the mesh intervals.
y_middle : ndarray, shape (n, m - 1)
Values of the cubic spline evaluated at the middle points of the mesh
intervals.
f : ndarray, shape (n, m)
RHS of the ODE system evaluated at the mesh nodes.
f_middle : ndarray, shape (n, m - 1)
RHS of the ODE system evaluated at the middle points of the mesh
intervals (and using `y_middle`).
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
f = fun(x, y, p)
y_middle = (0.5 * (y[:, 1:] + y[:, :-1]) -
0.125 * h * (f[:, 1:] - f[:, :-1]))
f_middle = fun(x[:-1] + 0.5 * h, y_middle, p)
col_res = y[:, 1:] - y[:, :-1] - h / 6 * (f[:, :-1] + f[:, 1:] +
4 * f_middle)
return col_res, y_middle, f, f_middle
def prepare_sys(n, m, k, fun, bc, fun_jac, bc_jac, x, h):
"""Create the function and the Jacobian for the collocation system."""
x_middle = x[:-1] + 0.5 * h
i_jac, j_jac = compute_jac_indices(n, m, k)
def col_fun(y, p):
return collocation_fun(fun, y, p, x, h)
def sys_jac(y, p, y_middle, f, f_middle, bc0):
if fun_jac is None:
df_dy, df_dp = estimate_fun_jac(fun, x, y, p, f)
df_dy_middle, df_dp_middle = estimate_fun_jac(
fun, x_middle, y_middle, p, f_middle)
else:
df_dy, df_dp = fun_jac(x, y, p)
df_dy_middle, df_dp_middle = fun_jac(x_middle, y_middle, p)
if bc_jac is None:
dbc_dya, dbc_dyb, dbc_dp = estimate_bc_jac(bc, y[:, 0], y[:, -1],
p, bc0)
else:
dbc_dya, dbc_dyb, dbc_dp = bc_jac(y[:, 0], y[:, -1], p)
return construct_global_jac(n, m, k, i_jac, j_jac, h, df_dy,
df_dy_middle, df_dp, df_dp_middle, dbc_dya,
dbc_dyb, dbc_dp)
return col_fun, sys_jac
def solve_newton(n, m, h, col_fun, bc, jac, y, p, B, bvp_tol):
"""Solve the nonlinear collocation system by a Newton method.
This is a simple Newton method with a backtracking line search. As
advised in [1]_, an affine-invariant criterion function F = ||J^-1 r||^2
is used, where J is the Jacobian matrix at the current iteration and r is
the vector or collocation residuals (values of the system lhs).
The method alters between full Newton iterations and the fixed-Jacobian
iterations based
There are other tricks proposed in [1]_, but they are not used as they
don't seem to improve anything significantly, and even break the
convergence on some test problems I tried.
All important parameters of the algorithm are defined inside the function.
Parameters
----------
n : int
Number of equations in the ODE system.
m : int
Number of nodes in the mesh.
h : ndarray, shape (m-1,)
Mesh intervals.
col_fun : callable
Function computing collocation residuals.
bc : callable
Function computing boundary condition residuals.
jac : callable
Function computing the Jacobian of the whole system (including
collocation and boundary condition residuals). It is supposed to
return csc_matrix.
y : ndarray, shape (n, m)
Initial guess for the function values at the mesh nodes.
p : ndarray, shape (k,)
Initial guess for the unknown parameters.
B : ndarray with shape (n, n) or None
Matrix to force the S y(a) = 0 condition for a problems with the
singular term. If None, the singular term is assumed to be absent.
bvp_tol : float
Tolerance to which we want to solve a BVP.
Returns
-------
y : ndarray, shape (n, m)
Final iterate for the function values at the mesh nodes.
p : ndarray, shape (k,)
Final iterate for the unknown parameters.
singular : bool
True, if the LU decomposition failed because Jacobian turned out
to be singular.
References
----------
.. [1] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
Boundary Value Problems for Ordinary Differential Equations"
"""
# We know that the solution residuals at the middle points of the mesh
# are connected with collocation residuals r_middle = 1.5 * col_res / h.
# As our BVP solver tries to decrease relative residuals below a certain
# tolerance it seems reasonable to terminated Newton iterations by
# comparison of r_middle / (1 + np.abs(f_middle)) with a certain threshold,
# which we choose to be 1.5 orders lower than the BVP tolerance. We rewrite
# the condition as col_res < tol_r * (1 + np.abs(f_middle)), then tol_r
# should be computed as follows:
tol_r = 2/3 * h * 5e-2 * bvp_tol
# We also need to control residuals of the boundary conditions. But it
# seems that they become very small eventually as the solver progresses,
# i. e. the tolerance for BC are not very important. We set it 1.5 orders
# lower than the BVP tolerance as well.
tol_bc = 5e-2 * bvp_tol
# Maximum allowed number of Jacobian evaluation and factorization, in
# other words the maximum number of full Newton iterations. A small value
# is recommended in the literature.
max_njev = 4
# Maximum number of iterations, considering that some of them can be
# performed with the fixed Jacobian. In theory such iterations are cheap,
# but it's not that simple in Python.
max_iter = 8
# Minimum relative improvement of the criterion function to accept the
# step (Armijo constant).
sigma = 0.2
# Step size decrease factor for backtracking.
tau = 0.5
# Maximum number of backtracking steps, the minimum step is then
# tau ** n_trial.
n_trial = 4
col_res, y_middle, f, f_middle = col_fun(y, p)
bc_res = bc(y[:, 0], y[:, -1], p)
res = np.hstack((col_res.ravel(order='F'), bc_res))
njev = 0
singular = False
recompute_jac = True
for iteration in range(max_iter):
if recompute_jac:
J = jac(y, p, y_middle, f, f_middle, bc_res)
njev += 1
try:
LU = splu(J)
except RuntimeError:
singular = True
break
step = LU.solve(res)
cost = np.dot(step, step)
y_step = step[:m * n].reshape((n, m), order='F')
p_step = step[m * n:]
alpha = 1
for trial in range(n_trial + 1):
y_new = y - alpha * y_step
if B is not None:
y_new[:, 0] = np.dot(B, y_new[:, 0])
p_new = p - alpha * p_step
col_res, y_middle, f, f_middle = col_fun(y_new, p_new)
bc_res = bc(y_new[:, 0], y_new[:, -1], p_new)
res = np.hstack((col_res.ravel(order='F'), bc_res))
step_new = LU.solve(res)
cost_new = np.dot(step_new, step_new)
if cost_new < (1 - 2 * alpha * sigma) * cost:
break
if trial < n_trial:
alpha *= tau
y = y_new
p = p_new
if njev == max_njev:
break
if (np.all(np.abs(col_res) < tol_r * (1 + np.abs(f_middle))) and
np.all(bc_res < tol_bc)):
break
# If the full step was taken, then we are going to continue with
# the same Jacobian. This is the approach of BVP_SOLVER.
if alpha == 1:
step = step_new
cost = cost_new
recompute_jac = False
else:
recompute_jac = True
return y, p, singular
def print_iteration_header():
print("{:^15}{:^15}{:^15}{:^15}".format(
"Iteration", "Max residual", "Total nodes", "Nodes added"))
def print_iteration_progress(iteration, residual, total_nodes, nodes_added):
print("{:^15}{:^15.2e}{:^15}{:^15}".format(
iteration, residual, total_nodes, nodes_added))
class BVPResult(OptimizeResult):
pass
TERMINATION_MESSAGES = {
0: "The algorithm converged to the desired accuracy.",
1: "The maximum number of mesh nodes is exceeded.",
2: "A singular Jacobian encountered when solving the collocation system."
}
def estimate_rms_residuals(fun, sol, x, h, p, r_middle, f_middle):
"""Estimate rms values of collocation residuals using Lobatto quadrature.
The residuals are defined as the difference between the derivatives of
our solution and rhs of the ODE system. We use relative residuals, i.e.
normalized by 1 + np.abs(f). RMS values are computed as sqrt from the
normalized integrals of the squared relative residuals over each interval.
Integrals are estimated using 5-point Lobatto quadrature [1]_, we use the
fact that residuals at the mesh nodes are identically zero.
In [2] they don't normalize integrals by interval lengths, which gives
a higher rate of convergence of the residuals by the factor of h**0.5.
I chose to do such normalization for an ease of interpretation of return
values as RMS estimates.
Returns
-------
rms_res : ndarray, shape (m - 1,)
Estimated rms values of the relative residuals over each interval.
References
----------
.. [1] http://mathworld.wolfram.com/LobattoQuadrature.html
.. [2] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
"""
x_middle = x[:-1] + 0.5 * h
s = 0.5 * h * (3/7)**0.5
x1 = x_middle + s
x2 = x_middle - s
y1 = sol(x1)
y2 = sol(x2)
y1_prime = sol(x1, 1)
y2_prime = sol(x2, 1)
f1 = fun(x1, y1, p)
f2 = fun(x2, y2, p)
r1 = y1_prime - f1
r2 = y2_prime - f2
r_middle /= 1 + np.abs(f_middle)
r1 /= 1 + np.abs(f1)
r2 /= 1 + np.abs(f2)
r1 = np.sum(np.real(r1 * np.conj(r1)), axis=0)
r2 = np.sum(np.real(r2 * np.conj(r2)), axis=0)
r_middle = np.sum(np.real(r_middle * np.conj(r_middle)), axis=0)
return (0.5 * (32 / 45 * r_middle + 49 / 90 * (r1 + r2))) ** 0.5
def create_spline(y, yp, x, h):
"""Create a cubic spline given values and derivatives.
Formulas for the coefficients are taken from interpolate.CubicSpline.
Returns
-------
sol : PPoly
Constructed spline as a PPoly instance.
"""
from scipy.interpolate import PPoly
n, m = y.shape
c = np.empty((4, n, m - 1), dtype=y.dtype)
slope = (y[:, 1:] - y[:, :-1]) / h
t = (yp[:, :-1] + yp[:, 1:] - 2 * slope) / h
c[0] = t / h
c[1] = (slope - yp[:, :-1]) / h - t
c[2] = yp[:, :-1]
c[3] = y[:, :-1]
c = np.rollaxis(c, 1)
return PPoly(c, x, extrapolate=True, axis=1)
def modify_mesh(x, insert_1, insert_2):
"""Insert nodes into a mesh.
Nodes removal logic is not established, its impact on the solver is
presumably negligible. So only insertion is done in this function.
Parameters
----------
x : ndarray, shape (m,)
Mesh nodes.
insert_1 : ndarray
Intervals to each insert 1 new node in the middle.
insert_2 : ndarray
Intervals to each insert 2 new nodes, such that divide an interval
into 3 equal parts.
Returns
-------
x_new : ndarray
New mesh nodes.
Notes
-----
`insert_1` and `insert_2` should not have common values.
"""
# Because np.insert implementation apparently varies with a version of
# numpy, we use a simple and reliable approach with sorting.
return np.sort(np.hstack((
x,
0.5 * (x[insert_1] + x[insert_1 + 1]),
(2 * x[insert_2] + x[insert_2 + 1]) / 3,
(x[insert_2] + 2 * x[insert_2 + 1]) / 3
)))
def wrap_functions(fun, bc, fun_jac, bc_jac, k, a, S, D, dtype):
"""Wrap functions for unified usage in the solver."""
if fun_jac is None:
fun_jac_wrapped = None
if bc_jac is None:
bc_jac_wrapped = None
if k == 0:
def fun_p(x, y, _):
return np.asarray(fun(x, y), dtype)
def bc_wrapped(ya, yb, _):
return np.asarray(bc(ya, yb), dtype)
if fun_jac is not None:
def fun_jac_p(x, y, _):
return np.asarray(fun_jac(x, y), dtype), None
if bc_jac is not None:
def bc_jac_wrapped(ya, yb, _):
dbc_dya, dbc_dyb = bc_jac(ya, yb)
return (np.asarray(dbc_dya, dtype),
np.asarray(dbc_dyb, dtype), None)
else:
def fun_p(x, y, p):
return np.asarray(fun(x, y, p), dtype)
def bc_wrapped(x, y, p):
return np.asarray(bc(x, y, p), dtype)
if fun_jac is not None:
def fun_jac_p(x, y, p):
df_dy, df_dp = fun_jac(x, y, p)
return np.asarray(df_dy, dtype), np.asarray(df_dp, dtype)
if bc_jac is not None:
def bc_jac_wrapped(ya, yb, p):
dbc_dya, dbc_dyb, dbc_dp = bc_jac(ya, yb, p)
return (np.asarray(dbc_dya, dtype), np.asarray(dbc_dyb, dtype),
np.asarray(dbc_dp, dtype))
if S is None:
fun_wrapped = fun_p
else:
def fun_wrapped(x, y, p):
f = fun_p(x, y, p)
if x[0] == a:
f[:, 0] = np.dot(D, f[:, 0])
f[:, 1:] += np.dot(S, y[:, 1:]) / (x[1:] - a)
else:
f += np.dot(S, y) / (x - a)
return f
if fun_jac is not None:
if S is None:
fun_jac_wrapped = fun_jac_p
else:
Sr = S[:, :, np.newaxis]
def fun_jac_wrapped(x, y, p):
df_dy, df_dp = fun_jac_p(x, y, p)
if x[0] == a:
df_dy[:, :, 0] = np.dot(D, df_dy[:, :, 0])
df_dy[:, :, 1:] += Sr / (x[1:] - a)
else:
df_dy += Sr / (x - a)
return df_dy, df_dp
return fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped
def solve_bvp(fun, bc, x, y, p=None, S=None, fun_jac=None, bc_jac=None,
tol=1e-3, max_nodes=1000, verbose=0):
"""Solve a boundary-value problem for a system of ODEs.
This function numerically solves a first order system of ODEs subject to
two-point boundary conditions::
dy / dx = f(x, y, p) + S * y / (x - a), a <= x <= b
bc(y(a), y(b), p) = 0
Here x is a 1-dimensional independent variable, y(x) is a n-dimensional
vector-valued function and p is a k-dimensional vector of unknown
parameters which is to be found along with y(x). For the problem to be
determined there must be n + k boundary conditions, i.e. bc must be
(n + k)-dimensional function.
The last singular term in the right-hand side of the system is optional.
It is defined by an n-by-n matrix S, such that the solution must satisfy
S y(a) = 0. This condition will be forced during iterations, so it must not
contradict boundary conditions. See [2]_ for the explanation how this term
is handled when solving BVPs numerically.
Problems in a complex domain can be solved as well. In this case y and p
are considered to be complex, and f and bc are assumed to be complex-valued
functions, but x stays real. Note that f and bc must be complex
differentiable (satisfy Cauchy-Riemann equations [4]_), otherwise you
should rewrite your problem for real and imaginary parts separately. To
solve a problem in a complex domain, pass an initial guess for y with a
complex data type (see below).
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(x, y)``,
or ``fun(x, y, p)`` if parameters are present. All arguments are
ndarray: ``x`` with shape (m,), ``y`` with shape (n, m), meaning that
``y[:, i]`` corresponds to ``x[i]``, and ``p`` with shape (k,). The
return value must be an array with shape (n, m) and with the same
layout as ``y``.
bc : callable
Function evaluating residuals of the boundary conditions. The calling
signature is ``bc(ya, yb)``, or ``bc(ya, yb, p)`` if parameters are
present. All arguments are ndarray: ``ya`` and ``yb`` with shape (n,),
and ``p`` with shape (k,). The return value must be an array with
shape (n + k,).
x : array_like, shape (m,)
Initial mesh. Must be a strictly increasing sequence of real numbers
with ``x[0]=a`` and ``x[-1]=b``.
y : array_like, shape (n, m)
Initial guess for the function values at the mesh nodes, i-th column
corresponds to ``x[i]``. For problems in a complex domain pass `y`
with a complex data type (even if the initial guess is purely real).
p : array_like with shape (k,) or None, optional
Initial guess for the unknown parameters. If None (default), it is
assumed that the problem doesn't depend on any parameters.
S : array_like with shape (n, n) or None
Matrix defining the singular term. If None (default), the problem is
solved without the singular term.
fun_jac : callable or None, optional
Function computing derivatives of f with respect to y and p. The
calling signature is ``fun_jac(x, y)``, or ``fun_jac(x, y, p)`` if
parameters are present. The return must contain 1 or 2 elements in the
following order:
* df_dy : array_like with shape (n, n, m) where an element
(i, j, q) equals to d f_i(x_q, y_q, p) / d (y_q)_j.
* df_dp : array_like with shape (n, k, m) where an element
(i, j, q) equals to d f_i(x_q, y_q, p) / d p_j.
Here q numbers nodes at which x and y are defined, whereas i and j
number vector components. If the problem is solved without unknown
parameters df_dp should not be returned.
If `fun_jac` is None (default), the derivatives will be estimated
by the forward finite differences.
bc_jac : callable or None, optional
Function computing derivatives of bc with respect to ya, yb and p.
The calling signature is ``bc_jac(ya, yb)``, or ``bc_jac(ya, yb, p)``
if parameters are present. The return must contain 2 or 3 elements in
the following order:
* dbc_dya : array_like with shape (n, n) where an element (i, j)
equals to d bc_i(ya, yb, p) / d ya_j.
* dbc_dyb : array_like with shape (n, n) where an element (i, j)
equals to d bc_i(ya, yb, p) / d yb_j.
* dbc_dp : array_like with shape (n, k) where an element (i, j)
equals to d bc_i(ya, yb, p) / d p_j.
If the problem is solved without unknown parameters dbc_dp should not
be returned.
If `bc_jac` is None (default), the derivatives will be estimated by
the forward finite differences.
tol : float, optional
Desired tolerance of the solution. If we define ``r = y' - f(x, y)``
where y is the found solution, then the solver tries to achieve on each
mesh interval ``norm(r / (1 + abs(f)) < tol``, where ``norm`` is
estimated in a root mean squared sense (using a numerical quadrature
formula). Default is 1e-3.
max_nodes : int, optional
Maximum allowed number of the mesh nodes. If exceeded, the algorithm
terminates. Default is 1000.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations.
Returns
-------
Bunch object with the following fields defined:
sol : PPoly
Found solution for y as `scipy.interpolate.PPoly` instance, a C1
continuous cubic spline.
p : ndarray or None, shape (k,)
Found parameters. None, if the parameters were not present in the
problem.
x : ndarray, shape (m,)
Nodes of the final mesh.
y : ndarray, shape (n, m)
Solution values at the mesh nodes.
yp : ndarray, shape (n, m)
Solution derivatives at the mesh nodes.
rms_residuals : ndarray, shape (m - 1,)
RMS values of the relative residuals over each mesh interval (see the
description of `tol` parameter).
niter : int
Number of completed iterations.
status : int
Reason for algorithm termination:
* 0: The algorithm converged to the desired accuracy.
* 1: The maximum number of mesh nodes is exceeded.
* 2: A singular Jacobian encountered when solving the collocation
system.
message : string
Verbal description of the termination reason.
success : bool
True if the algorithm converged to the desired accuracy (``status=0``).
Notes
-----
This function implements a 4-th order collocation algorithm with the
control of residuals similar to [1]_. A collocation system is solved
by a damped Newton method with an affine-invariant criterion function as
described in [3]_.
Note that in [1]_ integral residuals are defined without normalization
by interval lengths. So their definition is different by a multiplier of
h**0.5 (h is an interval length) from the definition used here.
.. versionadded:: 0.18.0
References
----------
.. [1] J. Kierzenka, L. F. Shampine, "A BVP Solver Based on Residual
Control and the Maltab PSE", ACM Trans. Math. Softw., Vol. 27,
Number 3, pp. 299-316, 2001.
.. [2] L.F. Shampine, P. H. Muir and H. Xu, "A User-Friendly Fortran BVP
Solver".
.. [3] U. Ascher, R. Mattheij and R. Russell "Numerical Solution of
Boundary Value Problems for Ordinary Differential Equations".
.. [4] `Cauchy-Riemann equations
<https://en.wikipedia.org/wiki/Cauchy-Riemann_equations>`_ on
Wikipedia.
Examples
--------
In the first example we solve Bratu's problem::
y'' + k * exp(y) = 0
y(0) = y(1) = 0
for k = 1.
We rewrite the equation as a first order system and implement its
right-hand side evaluation::
y1' = y2
y2' = -exp(y1)
>>> def fun(x, y):
... return np.vstack((y[1], -np.exp(y[0])))
Implement evaluation of the boundary condition residuals:
>>> def bc(ya, yb):
... return np.array([ya[0], yb[0]])
Define the initial mesh with 5 nodes:
>>> x = np.linspace(0, 1, 5)
This problem is known to have two solutions. To obtain both of them we
use two different initial guesses for y. We denote them by subscripts
a and b.
>>> y_a = np.zeros((2, x.size))
>>> y_b = np.zeros((2, x.size))
>>> y_b[0] = 3
Now we are ready to run the solver.
>>> from scipy.integrate import solve_bvp
>>> res_a = solve_bvp(fun, bc, x, y_a)
>>> res_b = solve_bvp(fun, bc, x, y_b)
Let's plot the two found solutions. We take an advantage of having the
solution in a spline form to produce a smooth plot.
>>> x_plot = np.linspace(0, 1, 100)
>>> y_plot_a = res_a.sol(x_plot)[0]
>>> y_plot_b = res_b.sol(x_plot)[0]
>>> import matplotlib.pyplot as plt
>>> plt.plot(x_plot, y_plot_a, label='y_a')
>>> plt.plot(x_plot, y_plot_b, label='y_b')
>>> plt.legend()
>>> plt.xlabel("x")
>>> plt.ylabel("y")
>>> plt.show()
We see that the two solutions have similar shape, but differ in scale
significantly.
In the second example we solve a simple Sturm-Liouville problem::
y'' + k**2 * y = 0
y(0) = y(1) = 0
It is known that a non-trivial solution y = A * sin(k * x) is possible for
k = pi * n, where n is an integer. To establish the normalization constant
A = 1 we add a boundary condition::
y'(0) = k
Again we rewrite our equation as a first order system and implement its
right-hand side evaluation::
y1' = y2
y2' = -k**2 * y1
>>> def fun(x, y, p):
... k = p[0]
... return np.vstack((y[1], -k**2 * y[0]))
Note that parameters p are passed as a vector (with one element in our
case).
Implement the boundary conditions:
>>> def bc(ya, yb, p):
... k = p[0]
... return np.array([ya[0], yb[0], ya[1] - k])
Setup the initial mesh and guess for y. We aim to find the solution for
k = 2 * pi, to achieve that we set values of y to approximately follow
sin(2 * pi * x):
>>> x = np.linspace(0, 1, 5)
>>> y = np.zeros((2, x.size))
>>> y[0, 1] = 1
>>> y[0, 3] = -1
Run the solver with 6 as an initial guess for k.
>>> sol = solve_bvp(fun, bc, x, y, p=[6])
We see that the found k is approximately correct:
>>> sol.p[0]
6.28329460046
And finally plot the solution to see the anticipated sinusoid:
>>> x_plot = np.linspace(0, 1, 100)
>>> y_plot = sol.sol(x_plot)[0]
>>> plt.plot(x_plot, y_plot)
>>> plt.xlabel("x")
>>> plt.ylabel("y")
>>> plt.show()
"""
x = np.asarray(x, dtype=float)
if x.ndim != 1:
raise ValueError("`x` must be 1 dimensional.")
h = np.diff(x)
if np.any(h <= 0):
raise ValueError("`x` must be strictly increasing.")
a = x[0]
y = np.asarray(y)
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
y = y.astype(dtype, copy=False)
if y.ndim != 2:
raise ValueError("`y` must be 2 dimensional.")
if y.shape[1] != x.shape[0]:
raise ValueError("`y` is expected to have {} columns, but actually "
"has {}.".format(x.shape[0], y.shape[1]))
if p is None:
p = np.array([])
else:
p = np.asarray(p, dtype=dtype)
if p.ndim != 1:
raise ValueError("`p` must be 1 dimensional.")
if tol < 100 * EPS:
warn("`tol` is too low, setting to {:.2e}".format(100 * EPS))
tol = 100 * EPS
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
n = y.shape[0]
k = p.shape[0]
if S is not None:
S = np.asarray(S, dtype=dtype)
if S.shape != (n, n):
raise ValueError("`S` is expected to have shape {}, "
"but actually has {}".format((n, n), S.shape))
# Compute I - S^+ S to impose necessary boundary conditions.
B = np.identity(n) - np.dot(pinv(S), S)
y[:, 0] = np.dot(B, y[:, 0])
# Compute (I - S)^+ to correct derivatives at x=a.
D = pinv(np.identity(n) - S)
else:
B = None
D = None
fun_wrapped, bc_wrapped, fun_jac_wrapped, bc_jac_wrapped = wrap_functions(
fun, bc, fun_jac, bc_jac, k, a, S, D, dtype)
f = fun_wrapped(x, y, p)
if f.shape != y.shape:
raise ValueError("`fun` return is expected to have shape {}, "
"but actually has {}.".format(y.shape, f.shape))
bc_res = bc_wrapped(y[:, 0], y[:, -1], p)
if bc_res.shape != (n + k,):
raise ValueError("`bc` return is expected to have shape {}, "
"but actually has {}.".format((n + k,), bc_res.shape))
status = 0
iteration = 0
if verbose == 2:
print_iteration_header()
while True:
m = x.shape[0]
col_fun, jac_sys = prepare_sys(n, m, k, fun_wrapped, bc_wrapped,
fun_jac_wrapped, bc_jac_wrapped, x, h)
y, p, singular = solve_newton(n, m, h, col_fun, bc_wrapped, jac_sys,
y, p, B, tol)
iteration += 1
col_res, y_middle, f, f_middle = collocation_fun(fun_wrapped, y,
p, x, h)
# This relation is not trivial, but can be verified.
r_middle = 1.5 * col_res / h
sol = create_spline(y, f, x, h)
rms_res = estimate_rms_residuals(fun_wrapped, sol, x, h, p,
r_middle, f_middle)
max_rms_res = np.max(rms_res)
if singular:
status = 2
break
insert_1, = np.nonzero((rms_res > tol) & (rms_res < 100 * tol))
insert_2, = np.nonzero(rms_res >= 100 * tol)
nodes_added = insert_1.shape[0] + 2 * insert_2.shape[0]
if m + nodes_added > max_nodes:
status = 1
if verbose == 2:
nodes_added = "({})".format(nodes_added)
print_iteration_progress(iteration, max_rms_res, m,
nodes_added)
break
if verbose == 2:
print_iteration_progress(iteration, max_rms_res, m, nodes_added)
if nodes_added > 0:
x = modify_mesh(x, insert_1, insert_2)
h = np.diff(x)
y = sol(x)
else:
status = 0
break
if verbose > 0:
if status == 0:
print("Solved in {} iterations, number of nodes {}, "
"maximum relative residual {:.2e}."
.format(iteration, x.shape[0], max_rms_res))
elif status == 1:
print("Number of nodes is exceeded after iteration {}, "
"maximum relative residual {:.2e}."
.format(iteration, max_rms_res))
elif status == 2:
print("Singular Jacobian encountered when solving the collocation "
"system on iteration {}, maximum relative residual {:.2e}."
.format(iteration, max_rms_res))
if p.size == 0:
p = None
return BVPResult(sol=sol, p=p, x=x, y=y, yp=f, rms_residuals=rms_res,
niter=iteration, status=status,
message=TERMINATION_MESSAGES[status], success=status == 0)
| bsd-3-clause |
mrshu/scikit-learn | examples/applications/svm_gui.py | 2 | 11093 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division
print __doc__
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD Style.
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print "fit the model"
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print "Accuracy:", clf.score(X, y) * 100
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
Achuth17/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 230 | 4762 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/feature_extraction/dict_vectorizer.py | 3 | 10182 | # Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
# collect all the possible feature names
feature_names = set()
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
feature_names.add(f)
# sort the feature names to define the mapping
feature_names = sorted(feature_names)
self.vocabulary_ = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
return self
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X).
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
Notes
-----
Because this method requires two passes over X, it materializes X in
memory.
"""
X = _tosequence(X)
self.fit(X)
return self.transform(X)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
vocab = self.vocabulary_
if self.sparse:
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
indices.append(vocab[f])
values.append(dtype(v))
except KeyError:
pass
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
if len(indices) > 0:
# workaround for bug in older NumPy:
# http://projects.scipy.org/numpy/ticket/1943
indices = np.frombuffer(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
return sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
else:
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
duststorm/dronekit-python | examples/perf/perf_test.py | 5 | 9084 | #
# This is a small example of the python drone API
# Usage:
# * mavproxy.py
# * module load api
# * api start small-demo.py
#
from droneapi.lib import VehicleMode
from pymavlink import mavutil
import time
from datetime import datetime
import traceback
from matplotlib import pyplot
from numpy import arange
import bisect
findings = """
Reception latency:
* due to a top level select() on the udp port, the latency for calling process_master seems to be sub 1ms (limit of python timer resolution)
Reception periodicity:
TBD
Background processing perodicity:
TBD
Sending perodicity:
TBD
Computation efficency:
FIXME
Max closed loop rate:
* 20ms+/-300us when talking to SITL (every time we recieve a cmd_ack we immediately send a pair of ROI related msgs)
* The less than 300us variablity makes me think SITL has some 20ms poll rate - need to try with real vehicle
SITL copter load
Interval (sec) 0.019865
MaxInterval (sec) 0.021927
MinInterval (sec) 0.018421
AVR plane load: 20ms+/-7ms
Interval 0.02061
MaxInterval 0.025496
MinInterval 0.011533
PX4 quad load on Edsion: 20ms +60ms -5ms (VERY HIGH VARIABILITY - mostly due to px4 side - see below)
Interval 0.0281970000001
MaxInterval 0.0786720000001
MinInterval 0.0161290000001
PX4 quad load on a pixhawk (a9defa35) talking to my desktop - similar variability as with an Edison:
Interval 0.01989
MaxInterval 0.0688479999999
MinInterval 0.00722900000005
Interval 0.019929
MaxInterval 0.0688479999999
MinInterval 0.00722900000005
Interval 0.0189700000001
MaxInterval 0.0688479999999
MinInterval 0.00722900000005
or here's 20ish of the interval values seen on the px4 (a9defa35) Test
Interval 0.020012
Interval 0.0199689999999
Interval 0.0229640000002
Interval 0.0171049999999
Interval 0.0198150000001
Interval 0.0211049999998
Interval 0.0199740000003
Interval 0.0199459999999
Interval 0.0199590000002
Interval 0.0200379999997
Interval 0.0200850000001
Interval 0.0198839999998
Interval 0.0200420000001
Interval 0.0199539999999
Interval 0.0200760000002
Interval 0.0199029999999
Interval 0.0200950000003
Interval 0.0517199999999
now testing with a plane load with a px4 (a9defa35) at 56kbps - highly variable 25 to 82ms
Interval 0.0589850000001
MaxInterval 0.0829760000001
MinInterval 0.0258819999999
but change to 115kbps and things are much better
Interval 0.0201160000001
MaxInterval 0.044656
MinInterval 0.0150279999998
and changing things to 500kbps everything is just peachie - 18ms
Interval 0.018119
MaxInterval 0.02527
MinInterval 0.015737
Recommendations:
Run link as fast as you can 1500kbps?
Turn on hw flow control (and use --rtscts on mavproxy)
mavproxy.py --master=/dev/ttyMFD1,115200 --cmd="api start perf_test.py"
"""
global v
def scatterplot(x,y):
pyplot.plot(x,y,'b.')
pyplot.xlim(min(x)-1,max(x)+1)
pyplot.ylim(min(y)-1,max(y)+1)
pyplot.show()
def cur_usec():
"""Return current time in usecs"""
# t = time.time()
dt = datetime.now()
t = dt.minute * 60 + dt.second + dt.microsecond / (1e6)
return t
class MeasureTime(object):
def __init__(self):
self.prevtime = cur_usec()
self.previnterval = 0
self.numcount = 0
self.reset()
def reset(self):
self.maxinterval = 0
self.mininterval = 10000
def update(self):
now = cur_usec()
self.numcount = self.numcount + 1
self.previnterval = now - self.prevtime
self.prevtime = now
self.maxinterval = max(self.previnterval, self.maxinterval)
self.mininterval = min(self.mininterval, self.previnterval)
#print "Interval", self.previnterval
if (self.numcount % 100) == 0:
if self.numcount == 200:
# Ignore delays during startup
self.reset()
print "Interval", self.previnterval
print "MaxInterval", self.maxinterval
print "MinInterval", self.mininterval
acktime = MeasureTime()
def mavrx_debug_handler(message):
"""Measure heartbeat periodically"""
mtype = message.get_type()
global sendtime
#if mtype == 'HEARTBEAT':
if mtype == 'COMMAND_ACK':
#traceback.print_stack()
#print "GOT ACK", message
acktime.update()
send_testpackets()
def send_testpackets():
#print "send ROI cmds"
# create the SET_POSITION_TARGET_GLOBAL_INT command
msg = v.message_factory.set_position_target_global_int_encode(
0, # time_boot_ms (not used)
1, 1, # target system, target component
mavutil.mavlink.MAV_FRAME_GLOBAL, # frame
0b1000000011000111, # type_mask - enable velocity only
0, 0, 0, # x, y, z positions (not used)
0, 0, 0.0, # x, y, z velocity in m/s
0, 0, 0, # x, y, z acceleration (not used)
0, 0) # yaw, yaw_rate (not used)
# send command to vehicle
v.send_mavlink(msg)
# set ROI
msg = v.message_factory.command_long_encode(
1, 1, # target system, target component
#mavutil.mavlink.MAV_CMD_DO_SET_RELAY, #command
mavutil.mavlink.MAV_CMD_DO_SET_ROI, #command
0, #confirmation
0, 0, 0, 0, #params 1-4
0,
0,
0
)
v.send_mavlink(msg)
# Always call flush to guarantee that previous writes to the vehicle have taken place
v.flush()
# First get an instance of the API endpoint
api = local_connect()
# get our vehicle - when running with mavproxy it only knows about one vehicle (for now)
v = api.get_vehicles()[0]
# Print out some interesting stats about the vehicle
print "Mode: %s" % v.mode
print "Location: %s" % v.location
print "Attitude: %s" % v.attitude
print "Velocity: %s" % v.velocity
print "GPS: %s" % v.gps_0
print "Armed: %s" % v.armed
print "groundspeed: %s" % v.groundspeed
print "airspeed: %s" % v.airspeed
import time
time.sleep(30)
# Use of the following method is not recommended (it is better to add observer callbacks to attributes) but if you need it
# it is available...
v.set_mavlink_callback(mavrx_debug_handler)
# You can read and write parameters
#print "Param: %s" % v.parameters['THR_MAX']
# Now download the vehicle waypoints
cmds = v.commands
cmds.download()
cmds.wait_valid()
print "Home WP: %s" % cmds[0]
print "Current dest: %s" % cmds.next
# Test custom commands
# Note: For mavlink messages that include a target_system & target_component, those values
# can just be filled with zero. The API will take care of using the correct values
# For instance, from the xml for command_long:
# Send a command with up to seven parameters to the MAV
#
# target_system : System which should execute the command (uint8_t)
# target_component : Component which should execute the command, 0 for all components (uint8_t)
# command : Command ID, as defined by MAV_CMD enum. (uint16_t)
# confirmation : 0: First transmission of this command. 1-255: Confirmation transmissions (e.g. for kill command) (uint8_t)
# param1 : Parameter 1, as defined by MAV_CMD enum. (float)
# param2 : Parameter 2, as defined by MAV_CMD enum. (float)
# param3 : Parameter 3, as defined by MAV_CMD enum. (float)
# param4 : Parameter 4, as defined by MAV_CMD enum. (float)
# param5 : Parameter 5, as defined by MAV_CMD enum. (float)
# param6 : Parameter 6, as defined by MAV_CMD enum. (float)
# param7 : Parameter 7, as defined by MAV_CMD enum. (float)
#msg = v.message_factory.command_long_encode(0, 0,
# mavutil.mavlink.MAV_CMD_CONDITION_YAW, 0,
# 0, 0, 0, 0, 1, 0, 0)
#print "Created msg: %s" % msg
#v.send_mavlink(msg)
print "Disarming..."
v.armed = False
v.flush()
# send_testpackets()
| apache-2.0 |
bloyl/mne-python | mne/viz/ica.py | 2 | 41059 | """Functions to plot ICA specific data (besides topographies)."""
# Authors: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
# Teon Brooks <[email protected]>
# Daniel McCloy <[email protected]>
#
# License: Simplified BSD
from functools import partial
import warnings
import numpy as np
from .utils import (tight_layout, _make_event_color_dict,
plt_show, _convert_psds, _compute_scalings)
from .topomap import _plot_ica_topomap
from .epochs import plot_epochs_image
from .evoked import _butterfly_on_button_press, _butterfly_onpick
from ..utils import _validate_type, fill_doc
from ..defaults import _handle_default
from ..io.meas_info import create_info
from ..io.pick import pick_types, _picks_to_idx
from ..time_frequency.psd import psd_multitaper
from ..utils import _reject_data_segments, verbose
@fill_doc
def plot_ica_sources(ica, inst, picks=None, start=None,
stop=None, title=None, show=True, block=False,
show_first_samp=False, show_scrollbars=True,
time_format='float'):
"""Plot estimated latent sources given the unmixing matrix.
Typical usecases:
1. plot evolution of latent sources over time based on (Raw input)
2. plot latent source around event related time windows (Epochs input)
3. plot time-locking in ICA space (Evoked input)
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
inst : instance of mne.io.Raw, mne.Epochs, mne.Evoked
The object to plot the sources from.
%(picks_base)s all sources in the order as fitted.
start : int | None
X-axis start index. If None (default), from the beginning.
stop : int | None
X-axis stop index. If None (default), next 20 are shown, in case of
evoked to the end.
title : str | None
The window title. If None a default is provided.
show : bool
Show figure if True.
block : bool
Whether to halt program execution until the figure is closed.
Useful for interactive selection of components in raw and epoch
plotter. For evoked, this parameter has no effect. Defaults to False.
show_first_samp : bool
If True, show time axis relative to the ``raw.first_samp``.
%(show_scrollbars)s
%(time_format)s
Returns
-------
fig : instance of Figure
The figure.
Notes
-----
For raw and epoch instances, it is possible to select components for
exclusion by clicking on the line. The selected components are added to
``ica.exclude`` on close.
.. versionadded:: 0.10.0
"""
from ..io.base import BaseRaw
from ..evoked import Evoked
from ..epochs import BaseEpochs
exclude = ica.exclude
picks = _picks_to_idx(ica.n_components_, picks, 'all')
if isinstance(inst, (BaseRaw, BaseEpochs)):
fig = _plot_sources(ica, inst, picks, exclude, start=start, stop=stop,
show=show, title=title, block=block,
show_first_samp=show_first_samp,
show_scrollbars=show_scrollbars,
time_format=time_format)
elif isinstance(inst, Evoked):
if start is not None or stop is not None:
inst = inst.copy().crop(start, stop)
sources = ica.get_sources(inst)
fig = _plot_ica_sources_evoked(
evoked=sources, picks=picks, exclude=exclude, title=title,
labels=getattr(ica, 'labels_', None), show=show, ica=ica)
else:
raise ValueError('Data input must be of Raw or Epochs type')
return fig
def _create_properties_layout(figsize=None, fig=None):
"""Create main figure and axes layout used by plot_ica_properties."""
import matplotlib.pyplot as plt
if fig is not None and figsize is not None:
raise ValueError('Cannot specify both fig and figsize.')
if figsize is None:
figsize = [7., 6.]
if fig is None:
fig = plt.figure(figsize=figsize, facecolor=[0.95] * 3)
axes_params = (('topomap', [0.08, 0.5, 0.3, 0.45]),
('image', [0.5, 0.6, 0.45, 0.35]),
('erp', [0.5, 0.5, 0.45, 0.1]),
('spectrum', [0.08, 0.1, 0.32, 0.3]),
('variance', [0.5, 0.1, 0.45, 0.25]))
axes = [fig.add_axes(loc, label=name) for name, loc in axes_params]
return fig, axes
def _plot_ica_properties(pick, ica, inst, psds_mean, freqs, n_trials,
epoch_var, plot_lowpass_edge, epochs_src,
set_title_and_labels, plot_std, psd_ylabel,
spectrum_std, topomap_args, image_args, fig, axes,
kind, dropped_indices):
"""Plot ICA properties (helper)."""
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from scipy.stats import gaussian_kde
topo_ax, image_ax, erp_ax, spec_ax, var_ax = axes
# plotting
# --------
# component topomap
_plot_ica_topomap(ica, pick, show=False, axes=topo_ax, **topomap_args)
# image and erp
# we create a new epoch with dropped rows
epoch_data = epochs_src.get_data()
epoch_data = np.insert(arr=epoch_data,
obj=(dropped_indices -
np.arange(len(dropped_indices))).astype(int),
values=0.0,
axis=0)
from ..epochs import EpochsArray
epochs_src = EpochsArray(epoch_data, epochs_src.info, tmin=epochs_src.tmin,
verbose=0)
plot_epochs_image(epochs_src, picks=pick, axes=[image_ax, erp_ax],
combine=None, colorbar=False, show=False,
**image_args)
# spectrum
spec_ax.plot(freqs, psds_mean, color='k')
if plot_std:
spec_ax.fill_between(freqs, psds_mean - spectrum_std[0],
psds_mean + spectrum_std[1],
color='k', alpha=.2)
if plot_lowpass_edge:
spec_ax.axvline(inst.info['lowpass'], lw=2, linestyle='--',
color='k', alpha=0.2)
# epoch variance
var_ax_divider = make_axes_locatable(var_ax)
hist_ax = var_ax_divider.append_axes("right", size="33%", pad="2.5%")
var_ax.scatter(range(len(epoch_var)), epoch_var, alpha=0.5,
facecolor=[0, 0, 0], lw=0)
# rejected epochs in red
var_ax.scatter(dropped_indices, epoch_var[dropped_indices],
alpha=1., facecolor=[1, 0, 0], lw=0)
# compute percentage of dropped epochs
var_percent = float(len(dropped_indices)) / float(len(epoch_var)) * 100.
# histogram & histogram
_, counts, _ = hist_ax.hist(epoch_var, orientation="horizontal",
color="k", alpha=.5)
# kde
ymin, ymax = hist_ax.get_ylim()
try:
kde = gaussian_kde(epoch_var)
except np.linalg.LinAlgError:
pass # singular: happens when there is nothing plotted
else:
x = np.linspace(ymin, ymax, 50)
kde_ = kde(x)
kde_ /= kde_.max() or 1.
kde_ *= hist_ax.get_xlim()[-1] * .9
hist_ax.plot(kde_, x, color="k")
hist_ax.set_ylim(ymin, ymax)
# aesthetics
# ----------
topo_ax.set_title(ica._ica_names[pick])
set_title_and_labels(image_ax, kind + ' image and ERP/ERF', [], kind)
# erp
set_title_and_labels(erp_ax, [], 'Time (s)', 'AU')
erp_ax.spines["right"].set_color('k')
erp_ax.set_xlim(epochs_src.times[[0, -1]])
# remove half of yticks if more than 5
yt = erp_ax.get_yticks()
if len(yt) > 5:
erp_ax.yaxis.set_ticks(yt[::2])
# remove xticks - erp plot shows xticks for both image and erp plot
image_ax.xaxis.set_ticks([])
yt = image_ax.get_yticks()
image_ax.yaxis.set_ticks(yt[1:])
image_ax.set_ylim([-0.5, n_trials + 0.5])
# spectrum
set_title_and_labels(spec_ax, 'Spectrum', 'Frequency (Hz)', psd_ylabel)
spec_ax.yaxis.labelpad = 0
spec_ax.set_xlim(freqs[[0, -1]])
ylim = spec_ax.get_ylim()
air = np.diff(ylim)[0] * 0.1
spec_ax.set_ylim(ylim[0] - air, ylim[1] + air)
image_ax.axhline(0, color='k', linewidth=.5)
# epoch variance
var_ax_title = 'Dropped segments: %.2f %%' % var_percent
set_title_and_labels(var_ax, var_ax_title, kind, 'Variance (AU)')
hist_ax.set_ylabel("")
hist_ax.set_yticks([])
set_title_and_labels(hist_ax, None, None, None)
return fig
def _get_psd_label_and_std(this_psd, dB, ica, num_std):
"""Handle setting up PSD for one component, for plot_ica_properties."""
psd_ylabel = _convert_psds(this_psd, dB, estimate='auto', scaling=1.,
unit='AU', first_dim='epoch')
psds_mean = this_psd.mean(axis=0)
diffs = this_psd - psds_mean
# the distribution of power for each frequency bin is highly
# skewed so we calculate std for values below and above average
# separately - this is used for fill_between shade
with warnings.catch_warnings(): # mean of empty slice
warnings.simplefilter('ignore')
spectrum_std = [
[np.sqrt((d[d < 0] ** 2).mean(axis=0)) for d in diffs.T],
[np.sqrt((d[d > 0] ** 2).mean(axis=0)) for d in diffs.T]]
spectrum_std = np.array(spectrum_std) * num_std
return psd_ylabel, psds_mean, spectrum_std
@verbose
def plot_ica_properties(ica, inst, picks=None, axes=None, dB=True,
plot_std=True, topomap_args=None, image_args=None,
psd_args=None, figsize=None, show=True, reject='auto',
reject_by_annotation=True, *, verbose=None):
"""Display component properties.
Properties include the topography, epochs image, ERP/ERF, power
spectrum, and epoch variance.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
inst : instance of Epochs or Raw
The data to use in plotting properties.
%(picks_base)s the first five sources.
If more than one components were chosen in the picks,
each one will be plotted in a separate figure.
axes : list of Axes | None
List of five matplotlib axes to use in plotting: [topomap_axis,
image_axis, erp_axis, spectrum_axis, variance_axis]. If None a new
figure with relevant axes is created. Defaults to None.
dB : bool
Whether to plot spectrum in dB. Defaults to True.
plot_std : bool | float
Whether to plot standard deviation/confidence intervals in ERP/ERF and
spectrum plots.
Defaults to True, which plots one standard deviation above/below for
the spectrum. If set to float allows to control how many standard
deviations are plotted for the spectrum. For example 2.5 will plot 2.5
standard deviation above/below.
For the ERP/ERF, by default, plot the 95 percent parametric confidence
interval is calculated. To change this, use ``ci`` in ``ts_args`` in
``image_args`` (see below).
topomap_args : dict | None
Dictionary of arguments to ``plot_topomap``. If None, doesn't pass any
additional arguments. Defaults to None.
image_args : dict | None
Dictionary of arguments to ``plot_epochs_image``. If None, doesn't pass
any additional arguments. Defaults to None.
psd_args : dict | None
Dictionary of arguments to ``psd_multitaper``. If None, doesn't pass
any additional arguments. Defaults to None.
figsize : array-like, shape (2,) | None
Allows to control size of the figure. If None, the figure size
defaults to [7., 6.].
show : bool
Show figure if True.
reject : 'auto' | dict | None
Allows to specify rejection parameters used to drop epochs
(or segments if continuous signal is passed as inst).
If None, no rejection is applied. The default is 'auto',
which applies the rejection parameters used when fitting
the ICA object.
%(reject_by_annotation_raw)s
.. versionadded:: 0.21.0
%(verbose)s
Returns
-------
fig : list
List of matplotlib figures.
Notes
-----
.. versionadded:: 0.13
"""
return _fast_plot_ica_properties(ica, inst, picks=picks, axes=axes, dB=dB,
plot_std=plot_std,
topomap_args=topomap_args,
image_args=image_args, psd_args=psd_args,
figsize=figsize, show=show,
reject=reject,
reject_by_annotation=reject_by_annotation,
verbose=verbose, precomputed_data=None)
def _fast_plot_ica_properties(ica, inst, picks=None, axes=None, dB=True,
plot_std=True, topomap_args=None,
image_args=None, psd_args=None, figsize=None,
show=True, reject='auto', precomputed_data=None,
reject_by_annotation=True, *, verbose=None):
"""Display component properties."""
from ..preprocessing import ICA
# input checks and defaults
# -------------------------
_validate_type(ica, ICA, "ica", "ICA")
_validate_type(plot_std, (bool, 'numeric'), 'plot_std')
if isinstance(plot_std, bool):
num_std = 1. if plot_std else 0.
else:
plot_std = True
num_std = float(plot_std)
# if no picks given - plot the first 5 components
limit = min(5, ica.n_components_) if picks is None else len(ica.ch_names)
picks = _picks_to_idx(ica.info, picks, 'all')[:limit]
if axes is None:
fig, axes = _create_properties_layout(figsize=figsize)
else:
if len(picks) > 1:
raise ValueError('Only a single pick can be drawn '
'to a set of axes.')
from .utils import _validate_if_list_of_axes
_validate_if_list_of_axes(axes, obligatory_len=5)
fig = axes[0].get_figure()
psd_args = dict() if psd_args is None else psd_args
topomap_args = dict() if topomap_args is None else topomap_args
image_args = dict() if image_args is None else image_args
image_args["ts_args"] = dict(truncate_xaxis=False, show_sensors=False)
if plot_std:
from ..stats.parametric import _parametric_ci
image_args["ts_args"]["ci"] = _parametric_ci
elif "ts_args" not in image_args or "ci" not in image_args["ts_args"]:
image_args["ts_args"]["ci"] = False
for item_name, item in (("psd_args", psd_args),
("topomap_args", topomap_args),
("image_args", image_args)):
_validate_type(item, dict, item_name, "dictionary")
if dB is not None:
_validate_type(dB, bool, "dB", "bool")
# calculations
# ------------
if isinstance(precomputed_data, tuple):
kind, dropped_indices, epochs_src, data = precomputed_data
else:
kind, dropped_indices, epochs_src, data = _prepare_data_ica_properties(
inst, ica, reject_by_annotation, reject)
ica_data = np.swapaxes(data[:, picks, :], 0, 1)
dropped_src = ica_data
# spectrum
Nyquist = inst.info['sfreq'] / 2.
lp = inst.info['lowpass']
if 'fmax' not in psd_args:
psd_args['fmax'] = min(lp * 1.25, Nyquist)
plot_lowpass_edge = lp < Nyquist and (psd_args['fmax'] > lp)
psds, freqs = psd_multitaper(epochs_src, picks=picks, **psd_args)
def set_title_and_labels(ax, title, xlab, ylab):
if title:
ax.set_title(title)
if xlab:
ax.set_xlabel(xlab)
if ylab:
ax.set_ylabel(ylab)
ax.axis('auto')
ax.tick_params('both', labelsize=8)
ax.axis('tight')
# plot
# ----
all_fig = list()
for idx, pick in enumerate(picks):
# calculate component-specific spectrum stuff
psd_ylabel, psds_mean, spectrum_std = _get_psd_label_and_std(
psds[:, idx, :].copy(), dB, ica, num_std)
# if more than one component, spawn additional figures and axes
if idx > 0:
fig, axes = _create_properties_layout(figsize=figsize)
# we reconstruct an epoch_variance with 0 where indexes where dropped
epoch_var = np.var(ica_data[idx], axis=1)
drop_var = np.var(dropped_src[idx], axis=1)
drop_indices_corrected = \
(dropped_indices -
np.arange(len(dropped_indices))).astype(int)
epoch_var = np.insert(arr=epoch_var,
obj=drop_indices_corrected,
values=drop_var[dropped_indices],
axis=0)
# the actual plot
fig = _plot_ica_properties(
pick, ica, inst, psds_mean, freqs, ica_data.shape[1],
epoch_var, plot_lowpass_edge,
epochs_src, set_title_and_labels, plot_std, psd_ylabel,
spectrum_std, topomap_args, image_args, fig, axes, kind,
dropped_indices)
all_fig.append(fig)
plt_show(show)
return all_fig
def _prepare_data_ica_properties(inst, ica, reject_by_annotation=True,
reject='auto'):
"""Prepare Epochs sources to plot ICA properties.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
inst : instance of Epochs or Raw
The data to use in plotting properties.
reject_by_annotation : bool, optional
[description], by default True
reject : str, optional
[description], by default 'auto'
Returns
-------
kind : str
"Segment" for BaseRaw and "Epochs" for BaseEpochs
dropped_indices : list
Dropped epochs indexes.
epochs_src : instance of Epochs
Segmented data of ICA sources.
data : array of shape (n_epochs, n_ica_sources, n_times)
A view on epochs ICA sources data.
"""
from ..io.base import BaseRaw
from ..io import RawArray
from ..epochs import BaseEpochs
_validate_type(inst, (BaseRaw, BaseEpochs), "inst", "Raw or Epochs")
if isinstance(inst, BaseRaw):
# when auto, delegate reject to the ica
from ..epochs import make_fixed_length_epochs
if reject == 'auto':
reject = getattr(ica, 'reject_', None)
if reject is None:
drop_inds = None
dropped_indices = []
# break up continuous signal into segments
epochs_src = make_fixed_length_epochs(
ica.get_sources(inst),
duration=2,
preload=True,
reject_by_annotation=reject_by_annotation,
proj=False,
verbose=False)
else:
data = inst.get_data()
data, drop_inds = _reject_data_segments(data, ica.reject_,
flat=None, decim=None,
info=inst.info,
tstep=2.0)
inst_rejected = RawArray(data, inst.info)
# break up continuous signal into segments
epochs_src = make_fixed_length_epochs(
ica.get_sources(inst_rejected),
duration=2,
preload=True,
reject_by_annotation=reject_by_annotation,
proj=False,
verbose=False)
# getting dropped epochs indexes
dropped_indices = [(d[0] // len(epochs_src.times)) + 1
for d in drop_inds]
kind = "Segment"
else:
drop_inds = None
epochs_src = ica.get_sources(inst)
dropped_indices = []
kind = "Epochs"
return kind, dropped_indices, epochs_src, epochs_src.get_data()
def _plot_ica_sources_evoked(evoked, picks, exclude, title, show, ica,
labels=None):
"""Plot average over epochs in ICA space.
Parameters
----------
evoked : instance of mne.Evoked
The Evoked to be used.
%(picks_base)s all sources in the order as fitted.
exclude : array-like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
title : str
The figure title.
show : bool
Show figure if True.
labels : None | dict
The ICA labels attribute.
"""
import matplotlib.pyplot as plt
from matplotlib import patheffects
if title is None:
title = 'Reconstructed latent sources, time-locked'
fig, axes = plt.subplots(1)
ax = axes
axes = [axes]
times = evoked.times * 1e3
# plot unclassified sources and label excluded ones
lines = list()
texts = list()
picks = np.sort(picks)
idxs = [picks]
if labels is not None:
labels_used = [k for k in labels if '/' not in k]
exclude_labels = list()
for ii in picks:
if ii in exclude:
line_label = ica._ica_names[ii]
if labels is not None:
annot = list()
for this_label in labels_used:
indices = labels[this_label]
if ii in indices:
annot.append(this_label)
line_label += (' - ' + ', '.join(annot))
exclude_labels.append(line_label)
else:
exclude_labels.append(None)
label_props = [('k', '-') if lb is None else ('r', '-') for lb in
exclude_labels]
styles = ['-', '--', ':', '-.']
if labels is not None:
# differentiate categories by linestyle and components by color
col_lbs = [it for it in exclude_labels if it is not None]
cmap = plt.get_cmap('tab10', len(col_lbs))
unique_labels = {k.split(' - ')[1] for k in exclude_labels if k}
# Determine up to 4 different styles for n categories
cat_styles = dict(zip(unique_labels,
map(lambda ux: styles[int(ux % len(styles))],
range(len(unique_labels)))))
for lb_idx, lb in enumerate(exclude_labels):
if lb is not None:
color = cmap(col_lbs.index(lb))
style = cat_styles[lb[lb.find(' - ') + 3:]]
label_props[lb_idx] = (color, style)
for exc_label, ii in zip(exclude_labels, picks):
color, style = label_props[ii]
lines.extend(ax.plot(times, evoked.data[ii].T, picker=True,
zorder=2, color=color, linestyle=style,
label=exc_label))
lines[-1].set_pickradius(3.)
ax.set(title=title, xlim=times[[0, -1]], xlabel='Time (ms)', ylabel='(NA)')
if len(exclude) > 0:
plt.legend(loc='best')
tight_layout(fig=fig)
texts.append(ax.text(0, 0, '', zorder=3,
verticalalignment='baseline',
horizontalalignment='left',
fontweight='bold', alpha=0))
# this is done to give the structure of a list of lists of a group of lines
# in each subplot
lines = [lines]
ch_names = evoked.ch_names
path_effects = [patheffects.withStroke(linewidth=2, foreground="w",
alpha=0.75)]
params = dict(axes=axes, texts=texts, lines=lines, idxs=idxs,
ch_names=ch_names, need_draw=False,
path_effects=path_effects)
fig.canvas.mpl_connect('pick_event',
partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect('button_press_event',
partial(_butterfly_on_button_press,
params=params))
plt_show(show)
return fig
def plot_ica_scores(ica, scores, exclude=None, labels=None, axhline=None,
title='ICA component scores', figsize=None,
n_cols=None, show=True):
"""Plot scores related to detected components.
Use this function to asses how well your score describes outlier
sources and how well you were detecting them.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
scores : array-like of float, shape (n_ica_components,) | list of array
Scores based on arbitrary metric to characterize ICA components.
exclude : array-like of int
The components marked for exclusion. If None (default), ICA.exclude
will be used.
labels : str | list | 'ecg' | 'eog' | None
The labels to consider for the axes tests. Defaults to None.
If list, should match the outer shape of ``scores``.
If 'ecg' or 'eog', the ``labels_`` attributes will be looked up.
Note that '/' is used internally for sublabels specifying ECG and
EOG channels.
axhline : float
Draw horizontal line to e.g. visualize rejection threshold.
title : str
The figure title.
figsize : tuple of int | None
The figure size. If None it gets set automatically.
n_cols : int | None
Scores are plotted in a grid. This parameter controls how
many to plot side by side before starting a new row. By
default, a number will be chosen to make the grid as square as
possible.
show : bool
Show figure if True.
Returns
-------
fig : instance of Figure
The figure object.
"""
import matplotlib.pyplot as plt
my_range = np.arange(ica.n_components_)
if exclude is None:
exclude = ica.exclude
exclude = np.unique(exclude)
if not isinstance(scores[0], (list, np.ndarray)):
scores = [scores]
n_scores = len(scores)
if n_cols is None:
# prefer more rows.
n_rows = int(np.ceil(np.sqrt(n_scores)))
n_cols = (n_scores - 1) // n_rows + 1
else:
n_cols = min(n_scores, n_cols)
n_rows = (n_scores - 1) // n_cols + 1
if figsize is None:
figsize = (6.4 * n_cols, 2.7 * n_rows)
fig, axes = plt.subplots(n_rows, n_cols, figsize=figsize,
sharex=True, sharey=True)
if isinstance(axes, np.ndarray):
axes = axes.flatten()
else:
axes = [axes]
fig.suptitle(title)
if labels == 'ecg':
labels = [label for label in ica.labels_ if label.startswith('ecg/')]
labels.sort(key=lambda l: l.split('/')[1]) # sort by index
if len(labels) == 0:
labels = [label for label in ica.labels_ if
label.startswith('ecg')]
elif labels == 'eog':
labels = [label for label in ica.labels_ if label.startswith('eog/')]
labels.sort(key=lambda l: l.split('/')[1]) # sort by index
if len(labels) == 0:
labels = [label for label in ica.labels_ if
label.startswith('eog')]
elif isinstance(labels, str):
labels = [labels]
elif labels is None:
labels = (None,) * n_scores
if len(labels) != n_scores:
raise ValueError('Need as many labels (%i) as scores (%i)'
% (len(labels), n_scores))
for label, this_scores, ax in zip(labels, scores, axes):
if len(my_range) != len(this_scores):
raise ValueError('The length of `scores` must equal the '
'number of ICA components.')
ax.bar(my_range, this_scores, color='gray', edgecolor='k')
for excl in exclude:
ax.bar(my_range[excl], this_scores[excl], color='r', edgecolor='k')
if axhline is not None:
if np.isscalar(axhline):
axhline = [axhline]
for axl in axhline:
ax.axhline(axl, color='r', linestyle='--')
ax.set_ylabel('score')
if label is not None:
if 'eog/' in label:
split = label.split('/')
label = ', '.join([split[0], split[2]])
elif '/' in label:
label = ', '.join(label.split('/'))
ax.set_title('(%s)' % label)
ax.set_xlabel('ICA components')
ax.set_xlim(-0.6, len(this_scores) - 0.4)
tight_layout(fig=fig)
adjust_top = 0.8 if len(fig.axes) == 1 else 0.9
fig.subplots_adjust(top=adjust_top)
fig.canvas.draw()
plt_show(show)
return fig
@fill_doc
def plot_ica_overlay(ica, inst, exclude=None, picks=None, start=None,
stop=None, title=None, show=True, n_pca_components=None):
"""Overlay of raw and cleaned signals given the unmixing matrix.
This method helps visualizing signal quality and artifact rejection.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
inst : instance of mne.io.Raw or mne.Evoked
The signals to be compared given the ICA solution. If Raw input,
The raw data are displayed before and after cleaning. In a second
panel the cross channel average will be displayed. Since dipolar
sources will be canceled out this display is sensitive to
artifacts. If evoked input, butterfly plots for clean and raw
signals will be superimposed.
exclude : array-like of int | None (default)
The components marked for exclusion. If None (default), ICA.exclude
will be used.
%(picks_base)s all channels that were included during fitting.
start : int | None
X-axis start index. If None (default) from the beginning.
stop : int | None
X-axis stop index. If None (default) to 3.0s.
title : str
The figure title.
show : bool
Show figure if True.
%(n_pca_components_apply)s
.. versionadded:: 0.22
Returns
-------
fig : instance of Figure
The figure.
"""
# avoid circular imports
from ..io.base import BaseRaw
from ..evoked import Evoked
from ..preprocessing.ica import _check_start_stop
_validate_type(inst, (BaseRaw, Evoked), "inst", "Raw or Evoked")
if title is None:
title = 'Signals before (red) and after (black) cleaning'
picks = ica.ch_names if picks is None else picks
picks = _picks_to_idx(inst.info, picks, exclude=())
ch_types_used = inst.get_channel_types(picks=picks, unique=True)
if exclude is None:
exclude = ica.exclude
if not isinstance(exclude, (np.ndarray, list)):
raise TypeError('exclude must be of type list. Got %s'
% type(exclude))
if isinstance(inst, BaseRaw):
if start is None:
start = 0.0
if stop is None:
stop = 3.0
start_compare, stop_compare = _check_start_stop(inst, start, stop)
data, times = inst[picks, start_compare:stop_compare]
raw_cln = ica.apply(inst.copy(), exclude=exclude,
start=start, stop=stop,
n_pca_components=n_pca_components)
data_cln, _ = raw_cln[picks, start_compare:stop_compare]
fig = _plot_ica_overlay_raw(data=data, data_cln=data_cln,
times=times, title=title,
ch_types_used=ch_types_used, show=show)
else:
assert isinstance(inst, Evoked)
inst = inst.copy().crop(start, stop)
if picks is not None:
inst.info['comps'] = [] # can be safely disabled
inst.pick_channels([inst.ch_names[p] for p in picks])
evoked_cln = ica.apply(inst.copy(), exclude=exclude,
n_pca_components=n_pca_components)
fig = _plot_ica_overlay_evoked(evoked=inst, evoked_cln=evoked_cln,
title=title, show=show)
return fig
def _plot_ica_overlay_raw(data, data_cln, times, title, ch_types_used, show):
"""Plot evoked after and before ICA cleaning.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA object.
epochs : instance of mne.Epochs
The Epochs to be regarded.
show : bool
Show figure if True.
Returns
-------
fig : instance of Figure
"""
import matplotlib.pyplot as plt
# Restore sensor space data and keep all PCA components
# let's now compare the date before and after cleaning.
# first the raw data
assert data.shape == data_cln.shape
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
plt.suptitle(title)
ax1.plot(times, data.T, color='r')
ax1.plot(times, data_cln.T, color='k')
ax1.set(xlabel='Time (s)', xlim=times[[0, -1]], title='Raw data')
_ch_types = {'mag': 'Magnetometers',
'grad': 'Gradiometers',
'eeg': 'EEG'}
ch_types = ', '.join([_ch_types[k] for k in ch_types_used])
ax2.set_title('Average across channels ({})'.format(ch_types))
ax2.plot(times, data.mean(0), color='r')
ax2.plot(times, data_cln.mean(0), color='k')
ax2.set(xlabel='Time (s)', xlim=times[[0, -1]])
tight_layout(fig=fig)
fig.subplots_adjust(top=0.90)
fig.canvas.draw()
plt_show(show)
return fig
def _plot_ica_overlay_evoked(evoked, evoked_cln, title, show):
"""Plot evoked after and before ICA cleaning.
Parameters
----------
evoked : instance of mne.Evoked
The Evoked before IC rejection.
evoked_cln : instance of mne.Evoked
The Evoked after IC rejection.
title : str | None
The title of the figure.
show : bool
If True, all open plots will be shown.
Returns
-------
fig : instance of Figure
"""
import matplotlib.pyplot as plt
ch_types_used = [c for c in ['mag', 'grad', 'eeg'] if c in evoked]
n_rows = len(ch_types_used)
ch_types_used_cln = [c for c in ['mag', 'grad', 'eeg'] if
c in evoked_cln]
if len(ch_types_used) != len(ch_types_used_cln):
raise ValueError('Raw and clean evokeds must match. '
'Found different channels.')
fig, axes = plt.subplots(n_rows, 1)
if title is None:
title = 'Average signal before (red) and after (black) ICA'
fig.suptitle(title)
axes = axes.flatten() if isinstance(axes, np.ndarray) else axes
evoked.plot(axes=axes, show=False, time_unit='s')
for ax in fig.axes:
for line in ax.get_lines():
line.set_color('r')
fig.canvas.draw()
evoked_cln.plot(axes=axes, show=False, time_unit='s')
tight_layout(fig=fig)
fig.subplots_adjust(top=0.90)
fig.canvas.draw()
plt_show(show)
return fig
def _plot_sources(ica, inst, picks, exclude, start, stop, show, title, block,
show_scrollbars, show_first_samp, time_format):
"""Plot the ICA components as a RawArray or EpochsArray."""
from ._figure import _browse_figure
from .. import EpochsArray, BaseEpochs
from ..io import RawArray, BaseRaw
# handle defaults / check arg validity
is_raw = isinstance(inst, BaseRaw)
is_epo = isinstance(inst, BaseEpochs)
sfreq = inst.info['sfreq']
color = _handle_default('color', (0., 0., 0.))
units = _handle_default('units', None)
scalings = (_compute_scalings(None, inst) if is_raw else
_handle_default('scalings_plot_raw'))
scalings['misc'] = 5.
scalings['whitened'] = 1.
unit_scalings = _handle_default('scalings', None)
# data
if is_raw:
data = ica._transform_raw(inst, 0, len(inst.times))[picks]
else:
data = ica._transform_epochs(inst, concatenate=True)[picks]
# events
if is_epo:
event_id_rev = {v: k for k, v in inst.event_id.items()}
event_nums = inst.events[:, 2]
event_color_dict = _make_event_color_dict(None, inst.events,
inst.event_id)
# channel properties / trace order / picks
ch_names = list(ica._ica_names) # copy
ch_types = ['misc' for _ in picks]
# add EOG/ECG channels if present
eog_chs = pick_types(inst.info, meg=False, eog=True, ref_meg=False)
ecg_chs = pick_types(inst.info, meg=False, ecg=True, ref_meg=False)
for eog_idx in eog_chs:
ch_names.append(inst.ch_names[eog_idx])
ch_types.append('eog')
for ecg_idx in ecg_chs:
ch_names.append(inst.ch_names[ecg_idx])
ch_types.append('ecg')
extra_picks = np.concatenate((eog_chs, ecg_chs)).astype(int)
if len(extra_picks):
if is_raw:
eog_ecg_data, _ = inst[extra_picks, :]
else:
eog_ecg_data = np.concatenate(inst.get_data(extra_picks), axis=1)
data = np.append(data, eog_ecg_data, axis=0)
picks = np.concatenate(
(picks, ica.n_components_ + np.arange(len(extra_picks))))
ch_order = np.arange(len(picks))
n_channels = min([20, len(picks)])
ch_names_picked = [ch_names[x] for x in picks]
# create info
info = create_info(ch_names_picked, sfreq, ch_types=ch_types)
info['meas_date'] = inst.info['meas_date']
info['bads'] = [ch_names[x] for x in exclude if x in picks]
if is_raw:
inst_array = RawArray(data, info, inst.first_samp)
inst_array.set_annotations(inst.annotations)
else:
data = data.reshape(-1, len(inst), len(inst.times)).swapaxes(0, 1)
inst_array = EpochsArray(data, info)
# handle time dimension
start = 0 if start is None else start
_last = inst.times[-1] if is_raw else len(inst.events)
stop = min(start + 20, _last) if stop is None else stop
first_time = inst._first_time if show_first_samp else 0
if is_raw:
duration = stop - start
start += first_time
else:
n_epochs = stop - start
total_epochs = len(inst)
epoch_n_times = len(inst.times)
n_epochs = min(n_epochs, total_epochs)
n_times = total_epochs * epoch_n_times
duration = n_epochs * epoch_n_times / sfreq
event_times = (np.arange(total_epochs) * epoch_n_times
+ inst.time_as_index(0)) / sfreq
# NB: this includes start and end of data:
boundary_times = np.arange(total_epochs + 1) * epoch_n_times / sfreq
if duration <= 0:
raise RuntimeError('Stop must be larger than start.')
# misc
bad_color = (0.8, 0.8, 0.8)
title = 'ICA components' if title is None else title
params = dict(inst=inst_array,
ica=ica,
ica_inst=inst,
info=info,
# channels and channel order
ch_names=np.array(ch_names_picked),
ch_types=np.array(ch_types),
ch_order=ch_order,
picks=picks,
n_channels=n_channels,
picks_data=list(),
# time
t_start=start if is_raw else boundary_times[start],
duration=duration,
n_times=inst.n_times if is_raw else n_times,
first_time=first_time,
time_format=time_format,
decim=1,
# events
event_times=None if is_raw else event_times,
# preprocessing
projs=list(),
projs_on=np.array([], dtype=bool),
apply_proj=False,
remove_dc=True, # for EOG/ECG
filter_coefs=None,
filter_bounds=None,
noise_cov=None,
# scalings
scalings=scalings,
units=units,
unit_scalings=unit_scalings,
# colors
ch_color_bad=bad_color,
ch_color_dict=color,
# display
butterfly=False,
clipping=None,
scrollbars_visible=show_scrollbars,
scalebars_visible=False,
window_title=title)
if is_epo:
params.update(n_epochs=n_epochs,
boundary_times=boundary_times,
event_id_rev=event_id_rev,
event_color_dict=event_color_dict,
event_nums=event_nums,
epoch_color_bad=(1, 0, 0),
epoch_colors=None,
xlabel='Epoch number')
fig = _browse_figure(**params)
fig._update_picks()
# update data, and plot
fig._update_trace_offsets()
fig._update_data()
fig._draw_traces()
# plot annotations (if any)
if is_raw:
fig._setup_annotation_colors()
fig._update_annotation_segments()
fig._draw_annotations()
plt_show(show, block=block)
return fig
| bsd-3-clause |
dandanvidi/in-vivo-enzyme-kinetics | scripts/handle_fluxomics.py | 3 | 7038 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 14 17:15:01 2016
@author: dan
"""
import re, pulp
import pandas as pd
import matplotlib.pyplot as plt
from cobra.io.sbml import create_cobra_model_from_sbml_file
from cobra.manipulation.modify import convert_to_irreversible
#from ..scripts.despine_axes import despine
def despine(ax, fontsize=15):
ax.tick_params(right=0, top=0, direction='out', labelsize=fontsize)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlabel(ax.get_xlabel(), size=15)
ax.set_ylabel(ax.get_ylabel(), size=15)
#%%
rid_mapping = pd.DataFrame.from_csv("../source/rid_mapping_cobra_2_Gerosa.csv")
MFA = pd.DataFrame.from_csv('../source/mmol_gCDW_hr_[Gerosa et al 2015].csv',
index_col=1)
MFA_std = pd.DataFrame.from_csv('../source/mmol_gCDW_hr_stdev_[Gerosa et al 2015].csv',
index_col=1)
conditions = pd.DataFrame.from_csv("../data/conditions.csv")
conditions = conditions[conditions.media_key>0]
conditions.sort_values('growth rate Gerosa [h-1]', inplace=True)
cs = conditions.index
#%%
measured_flux = pd.DataFrame(columns=cs, index=rid_mapping.index)
measured_flux_stdev = pd.DataFrame(columns=cs, index=rid_mapping.index)
for row in MFA.iterrows():
if not re.findall("[+-]", row[0]):
for r in row[0].split(';'):
cobra_reactions = rid_mapping[rid_mapping['gerosa_reaction_id']==r]
for r_cobra in cobra_reactions.index:
v = row[1]
measured_flux.loc[r_cobra] = v
measured_flux_stdev.loc[r_cobra] = MFA_std.loc[row[0]]
measured_flux.dropna(inplace=True)
measured_flux_stdev.dropna(inplace=True)
#%%
model = create_cobra_model_from_sbml_file('../source/iJO1366.xml')
all_reactions = map(str, model.reactions)
all_metabolites = map(str, model.metabolites)
mmol_gCDW_h = pd.DataFrame(columns=cs, index=measured_flux.index)
for c in cs:
cobra_c = conditions.loc[c, 'media_key']
gr = conditions.loc[c, 'growth rate Gerosa [h-1]']
flux_meas = measured_flux[c]
flux_stderr = measured_flux_stdev[c]
# load fresh copy of model
model = create_cobra_model_from_sbml_file('../source/iJO1366.xml')
# redefine sole carbon source uptake reaction in mmol/gr/h
model.reactions.get_by_id('EX_glc_e').lower_bound = 0
model.reactions.get_by_id('EX_' + cobra_c + '_e').lower_bound = -1000
# set growth rate according to measurements
biomass = "Ec_biomass_iJO1366_WT_53p95M"
growth_rate = model.reactions.get_by_id(biomass)
growth_rate.upper_bound = gr
growth_rate.lower_bound = gr
bounds_df = pd.DataFrame(index=all_reactions,columns=['lb','ub'])
m = model.to_array_based_model()
bounds_df.loc[all_reactions, 'lb'] = m.lower_bounds
bounds_df.loc[all_reactions, 'ub'] = m.upper_bounds
# initialize LP problem
pulp_solver = pulp.CPLEX(msg=0)
lp = pulp.LpProblem("MOMA", pulp.LpMinimize)
v_pred = pulp.LpVariable.dicts('v_pred', all_reactions)
v_meas = pulp.LpVariable.dicts('v_meas', all_reactions)
v_resid = pulp.LpVariable.dicts('residual', all_reactions)
# add flux bounds
for i in all_reactions:
lp += (v_pred[i] >= bounds_df.loc[i, 'lb']), 'lower_bound_%s' % i
lp += (v_pred[i] <= bounds_df.loc[i, 'ub']), 'upper_bound_%s' % i
# add constraint for each measured reaction i:
# |v_meas[i] - flux_meas[i]| <= flux_stderr[i]
# v_resid[i] >= |v_pred[i] - v_meas[i]|
for i in flux_meas.index:
lp += (v_meas[i] <= flux_meas[i] + flux_stderr[i]), 'measured_upper_%s' % i
lp += (v_meas[i] >= flux_meas[i] - flux_stderr[i]), 'measured_lower_%s' % i
lp += (v_pred[i] - v_resid[i] <= v_meas[i]), 'abs_diff_upper_%s' % i
lp += (-v_pred[i] - v_resid[i] <= -v_meas[i]), 'abs_diff_lower_%s' % i
# Some reactions in Gerosa et al. 2015 share constraints with other reactions
# here we manually constrain their fluxes according to measuremnts.
# Acetate exchange
lp += (v_meas['ACt2rpp'] + v_meas['ACS'] <= MFA.loc['PTAr+ACS', c] + MFA_std.loc['PTAr+ACS', c])
lp += (v_meas['ACt2rpp'] + v_meas['ACS'] >= MFA.loc['PTAr+ACS', c] - MFA_std.loc['PTAr+ACS', c])
# PFK/FBP reversible reaction
lp += (v_meas['PFK'] - v_meas['FBP'] <= MFA.loc['PFK-FBP', c] + MFA_std.loc['PFK-FBP', c])
lp += (v_meas['PFK'] - v_meas['FBP'] >= MFA.loc['PFK-FBP', c] - MFA_std.loc['PFK-FBP', c])
# MDH/MQO alternative
lp += (v_meas['MDH'] + v_meas['MDH2'] <= MFA.loc['MDH+MQO', c] + MFA_std.loc['MDH+MQO', c])
lp += (v_meas['MDH'] + v_meas['MDH2'] >= MFA.loc['MDH+MQO', c] - MFA_std.loc['MDH+MQO', c])
# ME alternative
lp += (v_meas['ME1'] + v_meas['ME2'] <= MFA.loc['ME1+ME2', c] + MFA_std.loc['ME1+ME2', c])
lp += (v_meas['ME1'] + v_meas['ME2'] >= MFA.loc['ME1+ME2', c] - MFA_std.loc['ME1+ME2', c])
# set the objective to minimize sum_i abs_diff[i]
objective = pulp.lpSum(v_resid.values())
lp.setObjective(objective)
# add stoichiometric constraints for all internal metabolites: S_int * v = 0
for i,j in enumerate(m.S):
row = [l * v_pred[all_reactions[k]] for k,l in zip(j.rows[0],j.data[0])]
lp += (pulp.lpSum(row) == 0), 'mass_balance_%s' % all_metabolites[i]
lp.solve()
# append fluxes to new dataframe
MEAS_FLUX_L = 'measured fluxes from Gerosa et al.'
MEAS_STDEV_L = 'standard deviation'
PRED_FLUX_L = 'projected fluxes'
RESID_L = 'residual'
fluxes_df = pd.DataFrame(index=all_reactions)
fluxes_df.loc[flux_meas.index, MEAS_FLUX_L] = flux_meas
fluxes_df.loc[flux_meas.index, MEAS_STDEV_L] = flux_stderr
fluxes_df.loc[all_reactions, PRED_FLUX_L] = \
map(lambda i: pulp.value(v_pred[i]), all_reactions)
fluxes_df.loc[measured_flux.index, RESID_L] = \
map(lambda i: pulp.value(v_resid[i]), measured_flux.index)
mmol_gCDW_h[c] = fluxes_df.loc[measured_flux.index, PRED_FLUX_L]
#%%
# normalize all fluxes to the biomass flux (i.e. set it to 1)
fluxes_df /= pulp.value(v_pred[biomass])
fig = plt.figure(figsize=(6,6))
ax = plt.axes()
fluxes_df.plot(kind='scatter', x=MEAS_FLUX_L, y=PRED_FLUX_L,
xerr=MEAS_STDEV_L, ax=ax, linewidth=0, s=20,
color=(0.7,0.2,0.5))
xlim, ylim = (ax.get_ylim(), ax.get_ylim())
plt.axis('equal')
plt.plot(xlim, ylim)
plt.xlim(xlim)
plt.ylim(ylim)
despine(ax)
ax.set_title(c, size=15)
for i in flux_meas.index:
xy = fluxes_df.loc[i, [MEAS_FLUX_L, PRED_FLUX_L]]
if fluxes_df.loc[i, RESID_L] > 2:
ax.annotate(i, xy,
fontsize=10, color='darkslategrey')
fig.savefig('../res/flux_projections/flux_projection_on_%s.pdf' %c)
mmol_gCDW_h.to_csv('../data/flux projections[mmol_gCDW_h].csv')
| mit |
olologin/scikit-learn | sklearn/neighbors/classification.py | 17 | 14354 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable, optional (default = 'uniform')
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
kris-singh/pgmpy | pgmpy/inference/ExactInference.py | 3 | 31429 | #!/usr/bin/env python3
import copy
import itertools
import networkx as nx
import numpy as np
from pgmpy.extern.six.moves import filter, range
from pgmpy.extern.six import string_types
from pgmpy.factors.discrete import factor_product
from pgmpy.inference import Inference
from pgmpy.models import JunctionTree
from pgmpy.utils import StateNameDecorator
class VariableElimination(Inference):
@StateNameDecorator(argument='evidence', return_val=None)
def _variable_elimination(self, variables, operation, evidence=None, elimination_order=None):
"""
Implementation of a generalized variable elimination.
Parameters
----------
variables: list, array-like
variables that are not to be eliminated.
operation: str ('marginalize' | 'maximize')
The operation to do for eliminating the variable.
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
elimination_order: list, array-like
list of variables representing the order in which they
are to be eliminated. If None order is computed automatically.
"""
if isinstance(variables, string_types):
raise TypeError("variables must be a list of strings")
if isinstance(evidence, string_types):
raise TypeError("evidence must be a list of strings")
# Dealing with the case when variables is not provided.
if not variables:
all_factors = []
for factor_li in self.factors.values():
all_factors.extend(factor_li)
return set(all_factors)
eliminated_variables = set()
working_factors = {node: {factor for factor in self.factors[node]}
for node in self.factors}
# Dealing with evidence. Reducing factors over it before VE is run.
if evidence:
for evidence_var in evidence:
for factor in working_factors[evidence_var]:
factor_reduced = factor.reduce([(evidence_var, evidence[evidence_var])], inplace=False)
for var in factor_reduced.scope():
working_factors[var].remove(factor)
working_factors[var].add(factor_reduced)
del working_factors[evidence_var]
# TODO: Modify it to find the optimal elimination order
if not elimination_order:
elimination_order = list(set(self.variables) -
set(variables) -
set(evidence.keys() if evidence else []))
elif any(var in elimination_order for var in
set(variables).union(set(evidence.keys() if evidence else []))):
raise ValueError("Elimination order contains variables which are in"
" variables or evidence args")
for var in elimination_order:
# Removing all the factors containing the variables which are
# eliminated (as all the factors should be considered only once)
factors = [factor for factor in working_factors[var]
if not set(factor.variables).intersection(eliminated_variables)]
phi = factor_product(*factors)
phi = getattr(phi, operation)([var], inplace=False)
del working_factors[var]
for variable in phi.variables:
working_factors[variable].add(phi)
eliminated_variables.add(var)
final_distribution = set()
for node in working_factors:
factors = working_factors[node]
for factor in factors:
if not set(factor.variables).intersection(eliminated_variables):
final_distribution.add(factor)
query_var_factor = {}
for query_var in variables:
phi = factor_product(*final_distribution)
query_var_factor[query_var] = phi.marginalize(list(set(variables) -
set([query_var])),
inplace=False).normalize(inplace=False)
return query_var_factor
def query(self, variables, evidence=None, elimination_order=None):
"""
Parameters
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
elimination_order: list
order of variable eliminations (if nothing is provided) order is
computed automatically
Examples
--------
>>> from pgmpy.inference import VariableElimination
>>> from pgmpy.models import BayesianModel
>>> import numpy as np
>>> import pandas as pd
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> inference = VariableElimination(model)
>>> phi_query = inference.query(['A', 'B'])
"""
return self._variable_elimination(variables, 'marginalize',
evidence=evidence, elimination_order=elimination_order)
def max_marginal(self, variables=None, evidence=None, elimination_order=None):
"""
Computes the max-marginal over the variables given the evidence.
Parameters
----------
variables: list
list of variables over which we want to compute the max-marginal.
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
elimination_order: list
order of variable eliminations (if nothing is provided) order is
computed automatically
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.inference import VariableElimination
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> inference = VariableElimination(model)
>>> phi_query = inference.max_marginal(['A', 'B'])
"""
if not variables:
variables = []
final_distribution = self._variable_elimination(variables, 'maximize',
evidence=evidence,
elimination_order=elimination_order)
# To handle the case when no argument is passed then
# _variable_elimination returns a dict.
if isinstance(final_distribution, dict):
final_distribution = final_distribution.values()
return np.max(factor_product(*final_distribution).values)
@StateNameDecorator(argument=None, return_val=True)
def map_query(self, variables=None, evidence=None, elimination_order=None):
"""
Computes the MAP Query over the variables given the evidence.
Parameters
----------
variables: list
list of variables over which we want to compute the max-marginal.
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
elimination_order: list
order of variable eliminations (if nothing is provided) order is
computed automatically
Examples
--------
>>> from pgmpy.inference import VariableElimination
>>> from pgmpy.models import BayesianModel
>>> import numpy as np
>>> import pandas as pd
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> inference = VariableElimination(model)
>>> phi_query = inference.map_query(['A', 'B'])
"""
elimination_variables = set(self.variables) - set(evidence.keys()) if evidence else set()
final_distribution = self._variable_elimination(elimination_variables, 'maximize',
evidence=evidence,
elimination_order=elimination_order)
# To handle the case when no argument is passed then
# _variable_elimination returns a dict.
if isinstance(final_distribution, dict):
final_distribution = final_distribution.values()
distribution = factor_product(*final_distribution)
argmax = np.argmax(distribution.values)
assignment = distribution.assignment([argmax])[0]
map_query_results = {}
for var_assignment in assignment:
var, value = var_assignment
map_query_results[var] = value
if not variables:
return map_query_results
else:
return_dict = {}
for var in variables:
return_dict[var] = map_query_results[var]
return return_dict
def induced_graph(self, elimination_order):
"""
Returns the induced graph formed by running Variable Elimination on the network.
Parameters
----------
elimination_order: list, array like
List of variables in the order in which they are to be eliminated.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.inference import VariableElimination
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> inference = VariableElimination(model)
>>> inference.induced_graph(['C', 'D', 'A', 'B', 'E'])
<networkx.classes.graph.Graph at 0x7f34ac8c5160>
"""
# If the elimination order does not contain the same variables as the model
if set(elimination_order) != set(self.variables):
raise ValueError("Set of variables in elimination order"
" different from variables in model")
eliminated_variables = set()
working_factors = {node: [factor.scope() for factor in self.factors[node]]
for node in self.factors}
# The set of cliques that should be in the induced graph
cliques = set()
for factors in working_factors.values():
for factor in factors:
cliques.add(tuple(factor))
# Removing all the factors containing the variables which are
# eliminated (as all the factors should be considered only once)
for var in elimination_order:
factors = [factor for factor in working_factors[var]
if not set(factor).intersection(eliminated_variables)]
phi = set(itertools.chain(*factors)).difference({var})
cliques.add(tuple(phi))
del working_factors[var]
for variable in phi:
working_factors[variable].append(list(phi))
eliminated_variables.add(var)
edges_comb = [itertools.combinations(c, 2)
for c in filter(lambda x: len(x) > 1, cliques)]
return nx.Graph(itertools.chain(*edges_comb))
def induced_width(self, elimination_order):
"""
Returns the width (integer) of the induced graph formed by running Variable Elimination on the network.
The width is the defined as the number of nodes in the largest clique in the graph minus 1.
Parameters
----------
elimination_order: list, array like
List of variables in the order in which they are to be eliminated.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.inference import VariableElimination
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> inference = VariableElimination(model)
>>> inference.induced_width(['C', 'D', 'A', 'B', 'E'])
3
"""
induced_graph = self.induced_graph(elimination_order)
return nx.graph_clique_number(induced_graph) - 1
class BeliefPropagation(Inference):
"""
Class for performing inference using Belief Propagation method.
Creates a Junction Tree or Clique Tree (JunctionTree class) for the input
probabilistic graphical model and performs calibration of the junction tree
so formed using belief propagation.
Parameters
----------
model: BayesianModel, MarkovModel, FactorGraph, JunctionTree
model for which inference is to performed
"""
def __init__(self, model):
super(BeliefPropagation, self).__init__(model)
if not isinstance(model, JunctionTree):
self.junction_tree = model.to_junction_tree()
else:
self.junction_tree = copy.deepcopy(model)
self.clique_beliefs = {}
self.sepset_beliefs = {}
def get_cliques(self):
"""
Returns cliques used for belief propagation.
"""
return self.junction_tree.nodes()
def get_clique_beliefs(self):
"""
Returns clique beliefs. Should be called after the clique tree (or
junction tree) is calibrated.
"""
return self.clique_beliefs
def get_sepset_beliefs(self):
"""
Returns sepset beliefs. Should be called after clique tree (or junction
tree) is calibrated.
"""
return self.sepset_beliefs
def _update_beliefs(self, sending_clique, recieving_clique, operation):
"""
This is belief-update method.
Parameters
----------
sending_clique: node (as the operation is on junction tree, node should be a tuple)
Node sending the message
recieving_clique: node (as the operation is on junction tree, node should be a tuple)
Node recieving the message
operation: str ('marginalize' | 'maximize')
The operation to do for passing messages between nodes.
Takes belief of one clique and uses it to update the belief of the
neighboring ones.
"""
sepset = frozenset(sending_clique).intersection(frozenset(recieving_clique))
sepset_key = frozenset((sending_clique, recieving_clique))
# \sigma_{i \rightarrow j} = \sum_{C_i - S_{i, j}} \beta_i
# marginalize the clique over the sepset
sigma = getattr(self.clique_beliefs[sending_clique], operation)(list(frozenset(sending_clique) - sepset),
inplace=False)
# \beta_j = \beta_j * \frac{\sigma_{i \rightarrow j}}{\mu_{i, j}}
self.clique_beliefs[recieving_clique] *= (sigma / self.sepset_beliefs[sepset_key]
if self.sepset_beliefs[sepset_key] else sigma)
# \mu_{i, j} = \sigma_{i \rightarrow j}
self.sepset_beliefs[sepset_key] = sigma
def _is_converged(self, operation):
"""
Checks whether the calibration has converged or not. At convergence
the sepset belief would be precisely the sepset marginal.
Parameters
----------
operation: str ('marginalize' | 'maximize')
The operation to do for passing messages between nodes.
if operation == marginalize, it checks whether the junction tree is calibrated or not
else if operation == maximize, it checks whether the juction tree is max calibrated or not
Formally, at convergence or at calibration this condition would be satisified for
.. math:: \sum_{C_i - S_{i, j}} \beta_i = \sum_{C_j - S_{i, j}} \beta_j = \mu_{i, j}
and at max calibration this condition would be satisfied
.. math:: \max_{C_i - S_{i, j}} \beta_i = \max_{C_j - S_{i, j}} \beta_j = \mu_{i, j}
"""
# If no clique belief, then the clique tree is not calibrated
if not self.clique_beliefs:
return False
for edge in self.junction_tree.edges():
sepset = frozenset(edge[0]).intersection(frozenset(edge[1]))
sepset_key = frozenset(edge)
if (edge[0] not in self.clique_beliefs or edge[1] not in self.clique_beliefs or
sepset_key not in self.sepset_beliefs):
return False
marginal_1 = getattr(self.clique_beliefs[edge[0]], operation)(list(frozenset(edge[0]) - sepset),
inplace=False)
marginal_2 = getattr(self.clique_beliefs[edge[1]], operation)(list(frozenset(edge[1]) - sepset),
inplace=False)
if marginal_1 != marginal_2 or marginal_1 != self.sepset_beliefs[sepset_key]:
return False
return True
def _calibrate_junction_tree(self, operation):
"""
Generalized calibration of junction tree or clique using belief propagation. This method can be used for both
calibrating as well as max-calibrating.
Uses Lauritzen-Spiegelhalter algorithm or belief-update message passing.
Parameters
----------
operation: str ('marginalize' | 'maximize')
The operation to do for passing messages between nodes.
Reference
---------
Algorithm 10.3 Calibration using belief propagation in clique tree
Probabilistic Graphical Models: Principles and Techniques
Daphne Koller and Nir Friedman.
"""
# Initialize clique beliefs as well as sepset beliefs
self.clique_beliefs = {clique: self.junction_tree.get_factors(clique)
for clique in self.junction_tree.nodes()}
self.sepset_beliefs = {frozenset(edge): None for edge in self.junction_tree.edges()}
for clique in self.junction_tree.nodes():
if not self._is_converged(operation=operation):
neighbors = self.junction_tree.neighbors(clique)
# update root's belief using nieighbor clique's beliefs
# upward pass
for neighbor_clique in neighbors:
self._update_beliefs(neighbor_clique, clique, operation=operation)
bfs_edges = nx.algorithms.breadth_first_search.bfs_edges(self.junction_tree, clique)
# update the beliefs of all the nodes starting from the root to leaves using root's belief
# downward pass
for edge in bfs_edges:
self._update_beliefs(edge[0], edge[1], operation=operation)
else:
break
def calibrate(self):
"""
Calibration using belief propagation in junction tree or clique tree.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.inference import BeliefPropagation
>>> G = BayesianModel([('diff', 'grade'), ('intel', 'grade'),
... ('intel', 'SAT'), ('grade', 'letter')])
>>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
>>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
>>> grade_cpd = TabularCPD('grade', 3,
... [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
... [0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
... [0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
... evidence=['diff', 'intel'],
... evidence_card=[2, 3])
>>> sat_cpd = TabularCPD('SAT', 2,
... [[0.1, 0.2, 0.7],
... [0.9, 0.8, 0.3]],
... evidence=['intel'], evidence_card=[3])
>>> letter_cpd = TabularCPD('letter', 2,
... [[0.1, 0.4, 0.8],
... [0.9, 0.6, 0.2]],
... evidence=['grade'], evidence_card=[3])
>>> G.add_cpds(diff_cpd, intel_cpd, grade_cpd, sat_cpd, letter_cpd)
>>> bp = BeliefPropagation(G)
>>> bp.calibrate()
"""
self._calibrate_junction_tree(operation='marginalize')
def max_calibrate(self):
"""
Max-calibration of the junction tree using belief propagation.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.inference import BeliefPropagation
>>> G = BayesianModel([('diff', 'grade'), ('intel', 'grade'),
... ('intel', 'SAT'), ('grade', 'letter')])
>>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
>>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
>>> grade_cpd = TabularCPD('grade', 3,
... [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
... [0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
... [0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],
... evidence=['diff', 'intel'],
... evidence_card=[2, 3])
>>> sat_cpd = TabularCPD('SAT', 2,
... [[0.1, 0.2, 0.7],
... [0.9, 0.8, 0.3]],
... evidence=['intel'], evidence_card=[3])
>>> letter_cpd = TabularCPD('letter', 2,
... [[0.1, 0.4, 0.8],
... [0.9, 0.6, 0.2]],
... evidence=['grade'], evidence_card=[3])
>>> G.add_cpds(diff_cpd, intel_cpd, grade_cpd, sat_cpd, letter_cpd)
>>> bp = BeliefPropagation(G)
>>> bp.max_calibrate()
"""
self._calibrate_junction_tree(operation='maximize')
def _query(self, variables, operation, evidence=None):
"""
This is a generalized query method that can be used for both query and map query.
Parameters
----------
variables: list
list of variables for which you want to compute the probability
operation: str ('marginalize' | 'maximize')
The operation to do for passing messages between nodes.
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
Examples
--------
>>> from pgmpy.inference import BeliefPropagation
>>> from pgmpy.models import BayesianModel
>>> import numpy as np
>>> import pandas as pd
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> inference = BeliefPropagation(model)
>>> phi_query = inference.query(['A', 'B'])
References
----------
Algorithm 10.4 Out-of-clique inference in clique tree
Probabilistic Graphical Models: Principles and Techniques Daphne Koller and Nir Friedman.
"""
is_calibrated = self._is_converged(operation=operation)
# Calibrate the junction tree if not calibrated
if not is_calibrated:
self.calibrate()
if not isinstance(variables, (list, tuple, set)):
query_variables = [variables]
else:
query_variables = list(variables)
query_variables.extend(evidence.keys() if evidence else [])
# Find a tree T' such that query_variables are a subset of scope(T')
nodes_with_query_variables = set()
for var in query_variables:
nodes_with_query_variables.update(filter(lambda x: var in x, self.junction_tree.nodes()))
subtree_nodes = nodes_with_query_variables
# Conversion of set to tuple just for indexing
nodes_with_query_variables = tuple(nodes_with_query_variables)
# As junction tree is a tree, that means that there would be only path between any two nodes in the tree
# thus we can just take the path between any two nodes; no matter there order is
for i in range(len(nodes_with_query_variables) - 1):
subtree_nodes.update(nx.shortest_path(self.junction_tree, nodes_with_query_variables[i],
nodes_with_query_variables[i + 1]))
subtree_undirected_graph = self.junction_tree.subgraph(subtree_nodes)
# Converting subtree into a junction tree
if len(subtree_nodes) == 1:
subtree = JunctionTree()
subtree.add_node(subtree_nodes.pop())
else:
subtree = JunctionTree(subtree_undirected_graph.edges())
# Selecting a node is root node. Root node would be having only one neighbor
if len(subtree.nodes()) == 1:
root_node = subtree.nodes()[0]
else:
root_node = tuple(filter(lambda x: len(subtree.neighbors(x)) == 1, subtree.nodes()))[0]
clique_potential_list = [self.clique_beliefs[root_node]]
# For other nodes in the subtree compute the clique potentials as follows
# As all the nodes are nothing but tuples so simple set(root_node) won't work at it would update the set with'
# all the elements of the tuple; instead use set([root_node]) as it would include only the tuple not the
# internal elements within it.
parent_nodes = set([root_node])
nodes_traversed = set()
while parent_nodes:
parent_node = parent_nodes.pop()
for child_node in set(subtree.neighbors(parent_node)) - nodes_traversed:
clique_potential_list.append(self.clique_beliefs[child_node] /
self.sepset_beliefs[frozenset([parent_node, child_node])])
parent_nodes.update([child_node])
nodes_traversed.update([parent_node])
# Add factors to the corresponding junction tree
subtree.add_factors(*clique_potential_list)
# Sum product variable elimination on the subtree
variable_elimination = VariableElimination(subtree)
if operation == 'marginalize':
return variable_elimination.query(variables=variables, evidence=evidence)
elif operation == 'maximize':
return variable_elimination.map_query(variables=variables, evidence=evidence)
def query(self, variables, evidence=None):
"""
Query method using belief propagation.
Parameters
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.inference import BeliefPropagation
>>> bayesian_model = BayesianModel([('A', 'J'), ('R', 'J'), ('J', 'Q'),
... ('J', 'L'), ('G', 'L')])
>>> cpd_a = TabularCPD('A', 2, [[0.2], [0.8]])
>>> cpd_r = TabularCPD('R', 2, [[0.4], [0.6]])
>>> cpd_j = TabularCPD('J', 2,
... [[0.9, 0.6, 0.7, 0.1],
... [0.1, 0.4, 0.3, 0.9]],
... ['R', 'A'], [2, 2])
>>> cpd_q = TabularCPD('Q', 2,
... [[0.9, 0.2],
... [0.1, 0.8]],
... ['J'], [2])
>>> cpd_l = TabularCPD('L', 2,
... [[0.9, 0.45, 0.8, 0.1],
... [0.1, 0.55, 0.2, 0.9]],
... ['G', 'J'], [2, 2])
>>> cpd_g = TabularCPD('G', 2, [[0.6], [0.4]])
>>> belief_propagation = BeliefPropagation(bayesian_model)
>>> belief_propagation.query(variables=['J', 'Q'],
... evidence={'A': 0, 'R': 0, 'G': 0, 'L': 1})
"""
return self._query(variables=variables, operation='marginalize', evidence=evidence)
def map_query(self, variables=None, evidence=None):
"""
MAP Query method using belief propagation.
Parameters
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.inference import BeliefPropagation
>>> bayesian_model = BayesianModel([('A', 'J'), ('R', 'J'), ('J', 'Q'),
... ('J', 'L'), ('G', 'L')])
>>> cpd_a = TabularCPD('A', 2, [[0.2], [0.8]])
>>> cpd_r = TabularCPD('R', 2, [[0.4], [0.6]])
>>> cpd_j = TabularCPD('J', 2,
... [[0.9, 0.6, 0.7, 0.1],
... [0.1, 0.4, 0.3, 0.9]],
... ['R', 'A'], [2, 2])
>>> cpd_q = TabularCPD('Q', 2,
... [[0.9, 0.2],
... [0.1, 0.8]],
... ['J'], [2])
>>> cpd_l = TabularCPD('L', 2,
... [[0.9, 0.45, 0.8, 0.1],
... [0.1, 0.55, 0.2, 0.9]],
... ['G', 'J'], [2, 2])
>>> cpd_g = TabularCPD('G', 2, [[0.6], [0.4]])
>>> belief_propagation = BeliefPropagation(bayesian_model)
>>> belief_propagation.map_query(variables=['J', 'Q'],
... evidence={'A': 0, 'R': 0, 'G': 0, 'L': 1})
"""
# If no variables are specified then run the MAP query for all the variables present in the model
if variables is None:
variables = set(self.variables)
return self._query(variables=variables, operation='maximize', evidence=evidence)
| mit |
DirtyUnicorns/android_external_chromium-org | chrome/test/nacl_test_injection/buildbot_nacl_integration.py | 61 | 2538 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
# This environment variable check mimics what
# buildbot_chrome_nacl_stage.py does.
is_win64 = (sys.platform in ('win32', 'cygwin') and
('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')))
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# http://code.google.com/p/nativeclient/issues/detail?id=2511
tests_to_disable.append('run_ppapi_ppb_image_data_browser_test')
if sys.platform == 'darwin':
# TODO(mseaborn) fix
# http://code.google.com/p/nativeclient/issues/detail?id=1835
tests_to_disable.append('run_ppapi_crash_browser_test')
if sys.platform in ('win32', 'cygwin'):
# This one is only failing for nacl_glibc on x64 Windows
# but it is not clear how to disable only that limited case.
# See http://crbug.com/132395
tests_to_disable.append('run_inbrowser_test_runner')
script_dir = os.path.dirname(os.path.abspath(__file__))
nacl_integration_script = os.path.join(script_dir,
'buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
# TODO(ncbray) re-enable.
# https://code.google.com/p/chromium/issues/detail?id=133568
'--disable_glibc',
'--disable_tests=%s' % ','.join(tests_to_disable)]
cmd += args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
btgorman/RISE-power-water-ss-1phase | data_power/supplemental/nodes RTS79 coords/verify_ieee_rts_79_coords.py | 1 | 1388 | import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
import math
rts_coords = pd.read_csv('ieee rts-79 coords.csv', header=1)
rts_branches = pd.read_csv('ieee rts-79 branches.csv', header=0)
rts_coords = rts_coords.values
rts_branches = rts_branches.values
dict_coords = {}
for row in rts_coords:
temp_dict = {int(row[0]): (float(row[1]), float(row[2]))}
dict_coords.update(temp_dict)
for branch in rts_branches:
from_node = int(branch[1])
to_node = int(branch[2])
distance = 0.1 * float(branch[3])
x1, y1 = dict_coords[from_node]
x2, y2 = dict_coords[to_node]
error_abs = distance - ((x1-x2)**2 + (y1-y2)**2)**0.5
if distance == 0.:
print('Branch {} has distance {} and ??? abs error of {}'.format(int(branch[0]), distance, error_abs))
elif math.fabs(error_abs / distance) > 0.001:
print('Branch {} has distance {} and HIGH error of {}%'.format(int(branch[0]), distance, error_abs*100./distance))
else:
pass
# print('Branch {} has distance {} and LOW error of {}%'.format(int(branch[0]), distance, error_abs*100./distance))
# G = nx.Graph()
# G.add_node(1)
# G.add_node(2)
# G.add_node(3)
# G.add_edge(1,2, length=200)
# G.add_edge(1,3, length=300)
# G.add_edge(2,3, length=1000)
# posit = pos=nx.circular_layout(G)
# nx.draw(G, pos=posit)
# nx.draw_networkx_labels(G, pos=posit)
# nx.draw_networkx_edge_labels(G, pos=posit)
# plt.show() | apache-2.0 |
DSLituiev/scikit-learn | examples/calibration/plot_calibration_curve.py | 113 | 5904 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.model_selection import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration curve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration curve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
DreamLiMu/ML_Python | tools/Ch06/EXTRAS/notLinSeperable.py | 4 | 2270 | '''
Created on Oct 6, 2010
@author: Peter
'''
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
xcord0 = []; ycord0 = []; xcord1 = []; ycord1 = []
markers =[]
colors =[]
fr = open('testSet.txt')#this file was generated by 2normalGen.py
for line in fr.readlines():
lineSplit = line.strip().split('\t')
xPt = float(lineSplit[0])
yPt = float(lineSplit[1])
label = int(lineSplit[2])
if (label == 0):
xcord0.append(xPt)
ycord0.append(yPt)
else:
xcord1.append(xPt)
ycord1.append(yPt)
fr.close()
fig = plt.figure()
ax = fig.add_subplot(221)
xcord0 = []; ycord0 = []; xcord1 = []; ycord1 = []
for i in range(300):
[x,y] = random.uniform(0,1,2)
if ((x > 0.5) and (y < 0.5)) or ((x < 0.5) and (y > 0.5)):
xcord0.append(x); ycord0.append(y)
else:
xcord1.append(x); ycord1.append(y)
ax.scatter(xcord0,ycord0, marker='s', s=90)
ax.scatter(xcord1,ycord1, marker='o', s=50, c='red')
plt.title('A')
ax = fig.add_subplot(222)
xcord0 = random.standard_normal(150); ycord0 = random.standard_normal(150)
xcord1 = random.standard_normal(150)+2.0; ycord1 = random.standard_normal(150)+2.0
ax.scatter(xcord0,ycord0, marker='s', s=90)
ax.scatter(xcord1,ycord1, marker='o', s=50, c='red')
plt.title('B')
ax = fig.add_subplot(223)
xcord0 = []; ycord0 = []; xcord1 = []; ycord1 = []
for i in range(300):
[x,y] = random.uniform(0,1,2)
if (x > 0.5):
xcord0.append(x*cos(2.0*pi*y)); ycord0.append(x*sin(2.0*pi*y))
else:
xcord1.append(x*cos(2.0*pi*y)); ycord1.append(x*sin(2.0*pi*y))
ax.scatter(xcord0,ycord0, marker='s', s=90)
ax.scatter(xcord1,ycord1, marker='o', s=50, c='red')
plt.title('C')
ax = fig.add_subplot(224)
xcord1 = zeros(150); ycord1 = zeros(150)
xcord0 = random.uniform(-3,3,350); ycord0 = random.uniform(-3,3,350);
xcord1[0:50] = 0.3*random.standard_normal(50)+2.0; ycord1[0:50] = 0.3*random.standard_normal(50)+2.0
xcord1[50:100] = 0.3*random.standard_normal(50)-2.0; ycord1[50:100] = 0.3*random.standard_normal(50)-3.0
xcord1[100:150] = 0.3*random.standard_normal(50)+1.0; ycord1[100:150] = 0.3*random.standard_normal(50)
ax.scatter(xcord0,ycord0, marker='s', s=90)
ax.scatter(xcord1,ycord1, marker='o', s=50, c='red')
plt.title('D')
plt.show() | gpl-2.0 |
bospetersen/h2o-3 | h2o-py/tests/testdir_algos/kmeans/pyunit_benignKmeans.py | 1 | 1141 | import sys
sys.path.insert(1, "../../../")
import h2o, tests
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
def benignKmeans(ip,port):
# Connect to a pre-existing cluster
# connect to localhost:54321
# Log.info("Importing benign.csv data...\n")
benign_h2o = h2o.import_file(path=h2o.locate("smalldata/logreg/benign.csv"))
#benign_h2o.summary()
benign_sci = np.genfromtxt(h2o.locate("smalldata/logreg/benign.csv"), delimiter=",")
# Impute missing values with column mean
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
benign_sci = imp.fit_transform(benign_sci)
# Log.info(paste("H2O K-Means with ", i, " clusters:\n", sep = ""))
for i in range(1,7):
benign_h2o_km = h2o.kmeans(x=benign_h2o, k=i)
print "H2O centers"
print benign_h2o_km.centers()
benign_sci_km = KMeans(n_clusters=i, init='k-means++', n_init=1)
benign_sci_km.fit(benign_sci)
print "sckit centers"
print benign_sci_km.cluster_centers_
if __name__ == "__main__":
tests.run_test(sys.argv, benignKmeans)
| apache-2.0 |
matthieuo/dl-classification | eval.py | 1 | 3611 | # dl-classification
# Copyright (C) 2017 Matthieu Ospici
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import tensorflow as tf
from sklearn import metrics
import models
import utils
from load_images import create_batch_from_files
def test_model(test_path,
num_classes,
log_path,
ftl):
with tf.device('/cpu:0'):
x, label_batch, rfb = create_batch_from_files(
test_path, [200, 200], 3, 100, ftl, False)
pred_label = models.foodv_test(
x,
num_classes,
reg_val=0.0,
is_train=False,
dropout_p=1.0)
assert num_classes == ftl.curr_class, "Number of classes found on datasets are not equal to the number of classes given"
prediction_label = tf.argmax(pred_label, 1)
correct_prediction = tf.equal(
tf.cast(
tf.argmax(
pred_label,
1),
tf.int32),
label_batch)
accuracy_class = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
# create queues to load images
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
last_chk = tf.train.latest_checkpoint(log_path)
chk_step = last_chk.split("-")[-1]
print(chk_step)
saver.restore(sess, last_chk)
med_ac = 0
for step in range(25):
lb, pb, rf, ac = sess.run(
[label_batch, prediction_label, rfb, accuracy_class])
# print(rf)
print(lb)
print(pb)
med_ac += ac
print("Acc : ", ac)
print("Med : ", med_ac / (step + 1))
print(metrics.classification_report(lb, pb))
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
group_genera = parser.add_argument_group('General options')
group_genera.add_argument(
"-paths",
"--data-paths",
help="Path to the test set",
required=True)
group_genera.add_argument(
"-ti",
"--test_identification",
help="String to identify test for tensorboard",
required=True)
group_genera.add_argument(
"-lp",
"--log_path",
help="Log directory path",
required=True)
group_genera.add_argument(
"-nc",
"--num-classes",
help="Number of classes on the training set",
type=int,
required=True)
args = parser.parse_args()
data_path = args.data_paths.split(';')
print("++++ data path : ", data_path)
print("++++ log path : ", args.log_path)
ftl = utils.file_to_label_binary()
test_model(data_path,
args.num_classes,
args.log_path,
ftl)
| gpl-3.0 |
metpy/MetPy | examples/XArray_Projections.py | 7 | 1146 | # Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
XArray Projection Handling
==========================
Use MetPy's XArray accessors to simplify opening a data file and plotting
data on a map using CartoPy.
"""
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import xarray as xr
# Any import of metpy will activate the accessors
from metpy.testing import get_test_data
ds = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False))
data_var = ds.metpy.parse_cf('Temperature')
x = data_var.x
y = data_var.y
im_data = data_var.isel(time=0).sel(isobaric=1000.)
fig = plt.figure(figsize=(14, 14))
ax = fig.add_subplot(1, 1, 1, projection=data_var.metpy.cartopy_crs)
ax.imshow(im_data, extent=(x.min(), x.max(), y.min(), y.max()),
cmap='RdBu', origin='lower' if y[0] < y[-1] else 'upper')
ax.coastlines(color='tab:green', resolution='10m')
ax.add_feature(cfeature.LAKES.with_scale('10m'), facecolor='none', edgecolor='tab:blue')
ax.add_feature(cfeature.RIVERS.with_scale('10m'), edgecolor='tab:blue')
plt.show()
| bsd-3-clause |
paris-saclay-cds/ramp-workflow | rampwf/utils/scoring.py | 1 | 5037 | # coding: utf-8
"""
Scoring utilities
"""
import numpy as np
import pandas as pd
from .pretty_print import IS_COLOR_TERM
from .pretty_print import print_warning
def reorder_df_scores(df_scores, score_types):
"""Reorder scores according to the order in score_types.
Parameters
----------
df_scores : pd.DataFrame
the score dataframe
score_types : list of score types
Returns
-------
df_scores : the dataframe with reordered scores
"""
try:
# try to re-order columns/rows in the printed array
# we may not have all train, valid, test, so need to select
index_order = np.array(['train', 'valid', 'test'])
ordered_index = index_order[np.isin(index_order, df_scores.index)]
df_scores = df_scores.loc[
ordered_index, [score_type.name for score_type in score_types]]
except Exception:
print_warning("Couldn't re-order the score matrix..")
return df_scores
def mean_score_matrix(df_scores_list, score_types):
u"""Construct a mean ± std score dataframe from a list of score dataframes.
Parameters
----------
df_scores_list : list of pd.DataFrame
a list of score data frames to average
score_types : list of score types
a list of score types to use
Returns
-------
df_scores : the mean ± std score dataframe
"""
scores = np.array([df_scores.values for df_scores in df_scores_list])
meanss = scores.mean(axis=0)
stdss = scores.std(axis=0)
precisions = [st.precision for st in score_types]
precisions.append(1) # for time
# we use unicode no break space so split in print_df_scores works
if IS_COLOR_TERM:
strs = np.array([[
u'{val}\u00A0±\u00A0{std}'.format(
val=round(mean, prec),
std=round(std, prec + 1))
for mean, std, prec in zip(means, stds, precisions)]
for means, stds in zip(meanss, stdss)])
else:
strs = np.array([[
u'{val} +- {std}'.format(
val=round(mean, prec),
std=round(std, prec + 1))
for mean, std, prec in zip(means, stds, precisions)]
for means, stds in zip(meanss, stdss)])
df_scores = pd.DataFrame(
strs, columns=df_scores_list[0].columns, index=df_scores_list[0].index)
return df_scores
def score_matrix_from_scores(score_types, steps, scoress):
"""Construct a score dataframe from a matrix of scores.
Parameters
----------
score_types : list of score types
a list of score types to use, score_type.name serves as column index
steps : a list of strings
subset of ['train', 'valid', 'test'], serves as row index
Returns
-------
df_scores : the score dataframe
"""
results = []
for step, scores in zip(steps, scoress):
for score_type, score in zip(score_types, scores):
results.append(
{'step': str(step), 'score': score_type.name, 'value': score})
df_scores = pd.DataFrame(results)
df_scores = df_scores.set_index(['step', 'score'])['value']
df_scores = df_scores.unstack()
df_scores = reorder_df_scores(df_scores, score_types)
return df_scores
def score_matrix(score_types, ground_truth, predictions):
"""Construct a score dataframe by scoring predictions against ground truth.
Parameters
----------
score_types : list of score types
a list of score types to use, score_type.name serves as column index
ground_truth : dict of Predictions
the ground truth data
predictions : dict of Predictions
the predicted data
Returns
-------
df_scores : pd.DataFrame
table of scores (rows = train/valid/test steps, columns = scores)
"""
if set(ground_truth.keys()) != set(predictions.keys()):
raise ValueError(('Predictions and ground truth steps '
'do not match:\n'
' * predictions = {} \n'
' * ground_truth = {} ')
.format(set(predictions.keys()),
set(ground_truth.keys())))
steps = ground_truth.keys()
scoress = [[
score_type.score_function(ground_truth[step], predictions[step])
for score_type in score_types] for step in ground_truth]
return score_matrix_from_scores(score_types, steps, scoress)
def round_df_scores(df_scores, score_types):
"""Round scores to the precision set in the score type.
Parameters
----------
df_scores : pd.DataFrame
the score dataframe
score_types : list of score types
Returns
-------
df_scores : the dataframe with rounded scores
"""
df_scores_copy = df_scores.copy()
for column, score_type in zip(df_scores_copy, score_types):
df_scores_copy[column] = [round(score, score_type.precision)
for score in df_scores_copy[column]]
return df_scores_copy
| bsd-3-clause |
Event38/MissionPlanner | Lib/site-packages/numpy/core/code_generators/ufunc_docstrings.py | 57 | 85797 | # Docstrings for generated ufuncs
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc('numpy.core.umath', 'absolute',
"""
Calculate the absolute value element-wise.
Parameters
----------
x : array_like
Input array.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. For complex input, ``a + ib``, the
absolute value is :math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
>>> np.absolute(1.2 + 1j)
1.5620499351813308
Plot the function over ``[-10, 10]``:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10, 101)
>>> plt.plot(x, np.absolute(x))
>>> plt.show()
Plot the function over the complex plane:
>>> xx = x + 1j * x[:, np.newaxis]
>>> plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10])
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'add',
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be added. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
y : ndarray or scalar
The sum of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` + `x2` in terms of array broadcasting.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
""")
add_newdoc('numpy.core.umath', 'arccos',
"""
Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if ``y = cos(x)``, then ``x = arccos(y)``.
Parameters
----------
x : array_like
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
out : ndarray, optional
Array of the same shape as `a`, to store results in. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The angle of the ray intersecting the unit circle at the given
`x`-coordinate in radians [0, pi]. If `x` is a scalar then a
scalar is returned, otherwise an array of the same shape as `x`
is returned.
See Also
--------
cos, arctan, arcsin, emath.arccos
Notes
-----
`arccos` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cos(z) = x`. The convention is to return
the angle `z` whose real part lies in `[0, pi]`.
For real-valued input data types, `arccos` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytic function that
has branch cuts `[-inf, -1]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse `cos` is also known as `acos` or cos^-1.
References
----------
M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 79. http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arccos of 1 to be 0, and of -1 to be pi:
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
Plot arccos:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-1, 1, num=100)
>>> plt.plot(x, np.arccos(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arccosh',
"""
Inverse hyperbolic cosine, elementwise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array of the same shape as `x`, to store results in.
See `doc.ufuncs` (Section "Output arguments") for details.
Returns
-------
y : ndarray
Array of the same shape as `x`.
See Also
--------
cosh, arcsinh, sinh, arctanh, tanh
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi, pi]` and the real part in
``[0, inf]``.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccosh` is a complex analytical function that
has a branch cut `[-inf, 1]` and is continuous from above on it.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arccosh
Examples
--------
>>> np.arccosh([np.e, 10.0])
array([ 1.65745445, 2.99322285])
>>> np.arccosh(1)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsin',
"""
Inverse sine, element-wise.
Parameters
----------
x : array_like
`y`-coordinate on the unit circle.
out : ndarray, optional
Array of the same shape as `x`, in which to store the results.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
angle : ndarray
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``. If `x` is a scalar, a scalar
is returned, otherwise an array.
See Also
--------
sin, cos, arccos, tan, arctan, arctan2, emath.arcsin
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arcsin` is a complex analytic function that
has, by convention, the branch cuts [-inf, -1] and [1, inf] and is
continuous from above on the former and from below on the latter.
The inverse sine is also known as `asin` or sin^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
""")
add_newdoc('numpy.core.umath', 'arcsinh',
"""
Inverse hyperbolic sine elementwise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : ndarray
Array of of the same shape as `x`.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
returns ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arccos` is a complex analytical function that
has branch cuts `[1j, infj]` and `[-1j, -infj]` and is continuous from
the right on the former and from the left on the latter.
The inverse hyperbolic sine is also known as `asinh` or ``sinh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arcsinh
Examples
--------
>>> np.arcsinh(np.array([np.e, 10.0]))
array([ 1.72538256, 2.99822295])
""")
add_newdoc('numpy.core.umath', 'arctan',
"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : array_like
Input values. `arctan` is applied to each element of `x`.
Returns
-------
out : ndarray
Out has the same shape as `x`. Its real part is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
It is a scalar if `x` is a scalar.
See Also
--------
arctan2 : The "four quadrant" arctan of the angle formed by (`x`, `y`)
and the positive `x`-axis.
angle : Argument of complex values.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctan` is a complex analytic function that
has [`1j, infj`] and [`-1j, -infj`] as branch cuts, and is continuous
from the left on the former and from the right on the latter.
The inverse tangent is also known as `atan` or tan^{-1}.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79.
http://www.math.sfu.ca/~cbm/aands/
Examples
--------
We expect the arctan of 0 to be 0, and of 1 to be pi/4:
>>> np.arctan([0, 1])
array([ 0. , 0.78539816])
>>> np.pi/4
0.78539816339744828
Plot arctan:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10)
>>> plt.plot(x, np.arctan(x))
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'arctan2',
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
Returns
-------
angle : ndarray
Array of angles in radians, in the range ``[-pi, pi]``.
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> np.arctan2([1., -1.], [0., 0.])
array([ 1.57079633, -1.57079633])
>>> np.arctan2([0., 0., np.inf], [+0., -0., np.inf])
array([ 0. , 3.14159265, 0.78539816])
""")
add_newdoc('numpy.core.umath', '_arg',
"""
DO NOT USE, ONLY FOR TESTING
""")
add_newdoc('numpy.core.umath', 'arctanh',
"""
Inverse hyperbolic tangent elementwise.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Array of the same shape as `x`.
See Also
--------
emath.arctanh
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`. The convention is to return the
`z` whose imaginary part lies in `[-pi/2, pi/2]`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `arctanh` is a complex analytical function that
has branch cuts `[-1, -inf]` and `[1, inf]` and is continuous from
above on the former and from below on the latter.
The inverse hyperbolic tangent is also known as `atanh` or ``tanh^-1``.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
http://en.wikipedia.org/wiki/Arctanh
Examples
--------
>>> np.arctanh([0, -0.5])
array([ 0. , -0.54930614])
""")
add_newdoc('numpy.core.umath', 'bitwise_and',
"""
Compute the bit-wise AND of two arrays element-wise.
Computes the bit-wise AND of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``&``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
logical_and
bitwise_or
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise AND of 13 and 17 is
therefore ``000000001``, or 1:
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.binary_repr(12)
'1100'
>>> np.bitwise_and([14,3], 13)
array([12, 1])
>>> np.bitwise_and([11,7], [4,25])
array([0, 1])
>>> np.bitwise_and(np.array([2,5,255]), np.array([3,14,16]))
array([ 2, 4, 16])
>>> np.bitwise_and([True, True], [False, True])
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_or',
"""
Compute the bit-wise OR of two arrays element-wise.
Computes the bit-wise OR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``|``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
Result.
See Also
--------
logical_or
bitwise_and
bitwise_xor
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 has the binaray representation ``00001101``. Likewise,
16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is
then ``000111011``, or 29:
>>> np.bitwise_or(13, 16)
29
>>> np.binary_repr(29)
'11101'
>>> np.bitwise_or(32, 2)
34
>>> np.bitwise_or([33, 4], 1)
array([33, 5])
>>> np.bitwise_or([33, 4], [1, 2])
array([33, 6])
>>> np.bitwise_or(np.array([2, 5, 255]), np.array([4, 4, 4]))
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
>>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32),
... np.array([4, 4, 4, 2147483647L], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'bitwise_xor',
"""
Compute the bit-wise XOR of two arrays element-wise.
Computes the bit-wise XOR of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``^``.
Parameters
----------
x1, x2 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
logical_xor
bitwise_and
bitwise_or
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
The number 13 is represented by ``00001101``. Likewise, 17 is
represented by ``00010001``. The bit-wise XOR of 13 and 17 is
therefore ``00011100``, or 28:
>>> np.bitwise_xor(13, 17)
28
>>> np.binary_repr(28)
'11100'
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor([31,3], 5)
array([26, 6])
>>> np.bitwise_xor([31,3], [5,6])
array([26, 5])
>>> np.bitwise_xor([True, True], [False, True])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ceil',
"""
Return the ceiling of the input, element-wise.
The ceil of the scalar `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\\lceil x \\rceil`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The ceiling of each element in `x`, with `float` dtype.
See Also
--------
floor, trunc, rint
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'trunc',
"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The truncated value of each element in `x`.
See Also
--------
ceil, floor, rint
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'conjugate',
"""
Return the complex conjugate, element-wise.
The complex conjugate of a complex number is obtained by changing the
sign of its imaginary part.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The complex conjugate of `x`, with same dtype as `y`.
Examples
--------
>>> np.conjugate(1+2j)
(1-2j)
>>> x = np.eye(2) + 1j * np.eye(2)
>>> np.conjugate(x)
array([[ 1.-1.j, 0.-0.j],
[ 0.-0.j, 1.-1.j]])
""")
add_newdoc('numpy.core.umath', 'cos',
"""
Cosine elementwise.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding cosine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'cosh',
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray
Output array of same shape as `x`.
Examples
--------
>>> np.cosh(0)
1.0
The hyperbolic cosine describes the shape of a hanging cable:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 1000)
>>> plt.plot(x, np.cosh(x))
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'degrees',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Input array in radians.
out : ndarray, optional
Output array of same shape as x.
Returns
-------
y : ndarray of floats
The corresponding degree values; if `out` was supplied this is a
reference to it.
See Also
--------
rad2deg : equivalent function
Examples
--------
Convert a radian array to degrees
>>> rad = np.arange(12.)*np.pi/6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240.,
270., 300., 330.])
>>> out = np.zeros((rad.shape))
>>> r = degrees(rad, out)
>>> np.all(r == out)
True
""")
add_newdoc('numpy.core.umath', 'rad2deg',
"""
Convert angles from radians to degrees.
Parameters
----------
x : array_like
Angle in radians.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The corresponding angle in degrees.
See Also
--------
deg2rad : Convert angles from degrees to radians.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
rad2deg(x) is ``180 * x / pi``.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
""")
add_newdoc('numpy.core.umath', 'divide',
"""
Divide arguments element-wise.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The quotient `x1/x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
See Also
--------
seterr : Set whether to raise or warn on overflow, underflow and division
by zero.
Notes
-----
Equivalent to `x1` / `x2` in terms of array-broadcasting.
Behavior on division by zero can be changed using `seterr`.
When both `x1` and `x2` are of an integer type, `divide` will return
integers and throw away the fractional part. Moreover, division by zero
always yields zero in integer arithmetic.
Examples
--------
>>> np.divide(2.0, 4.0)
0.5
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.divide(x1, x2)
array([[ NaN, 1. , 1. ],
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
Note the behavior with integer types:
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
Division by zero always yields zero in integer arithmetic, and does not
raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
Division by zero can, however, be caught using `seterr`:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: divide by zero encountered in divide
>>> ignored_states = np.seterr(**old_err_state)
>>> np.divide(1, 0)
0
""")
add_newdoc('numpy.core.umath', 'equal',
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays of the same shape.
Returns
-------
out : {ndarray, bool}
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal([0, 1, 3], np.arange(3))
array([ True, True, False], dtype=bool)
What is compared are values, not types. So an int (1) and an array of
length one can evaluate as True:
>>> np.equal(1, np.ones(1))
array([ True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'exp',
"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Output array, element-wise exponential of `x`.
See Also
--------
expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
The irrational number ``e`` is also known as Euler's number. It is
approximately 2.718281, and is the base of the natural logarithm,
``ln`` (this means that, if :math:`x = \\ln y = \\log_e y`,
then :math:`e^x = y`. For real input, ``exp(x)`` is always positive.
For complex arguments, ``x = a + ib``, we can write
:math:`e^x = e^a e^{ib}`. The first term, :math:`e^a`, is already
known (it is the real argument, described above). The second term,
:math:`e^{ib}`, is :math:`\\cos b + i \\sin b`, a function with magnitude
1 and a periodic phase.
References
----------
.. [1] Wikipedia, "Exponential function",
http://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
Examples
--------
Plot the magnitude and phase of ``exp(x)`` in the complex plane:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2*np.pi, 2*np.pi, 100)
>>> xx = x + 1j * x[:, np.newaxis] # a + ib over complex plane
>>> out = np.exp(xx)
>>> plt.subplot(121)
>>> plt.imshow(np.abs(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Magnitude of exp(x)')
>>> plt.subplot(122)
>>> plt.imshow(np.angle(out),
... extent=[-2*np.pi, 2*np.pi, -2*np.pi, 2*np.pi])
>>> plt.title('Phase (angle) of exp(x)')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'exp2',
"""
Calculate `2**p` for all `p` in the input array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array to insert results into.
Returns
-------
out : ndarray
Element-wise 2 to the power `x`.
See Also
--------
exp : calculate x**p.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> np.exp2([2, 3])
array([ 4., 8.])
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
x : array_like
Input values.
Returns
-------
out : ndarray
Element-wise exponential minus one: ``out = exp(x) - 1``.
See Also
--------
log1p : ``log(1 + x)``, the inverse of expm1.
Notes
-----
This function provides greater precision than the formula ``exp(x) - 1``
for small values of ``x``.
Examples
--------
The true value of ``exp(1e-10) - 1`` is ``1.00000000005e-10`` to
about 32 significant digits. This example shows the superiority of
expm1 in this case.
>>> np.expm1(1e-10)
1.00000000005e-10
>>> np.exp(1e-10) - 1
1.000000082740371e-10
""")
add_newdoc('numpy.core.umath', 'fabs',
"""
Compute the absolute values elementwise.
This function returns the absolute values (positive magnitude) of the data
in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : array_like
The array of numbers for which the absolute values are required. If
`x` is a scalar, the result `y` will also be a scalar.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : {ndarray, scalar}
The absolute values of `x`, the returned values are always floats.
See Also
--------
absolute : Absolute values including `complex` types.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs([-1.2, 1.2])
array([ 1.2, 1.2])
""")
add_newdoc('numpy.core.umath', 'floor',
"""
Return the floor of the input, element-wise.
The floor of the scalar `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\\lfloor x \\rfloor`.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {ndarray, scalar}
The floor of each element in `x`.
See Also
--------
ceil, trunc, rint
Notes
-----
Some spreadsheet programs calculate the "floor-towards-zero", in other
words ``floor(-2.5) == -2``. NumPy, however, uses the a definition of
`floor` such that `floor(-2.5) == -3`.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
""")
add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
Parameters
----------
x1 : array_like
Numerator.
x2 : array_like
Denominator.
Returns
-------
y : ndarray
y = floor(`x1`/`x2`)
See Also
--------
divide : Standard division.
floor : Round a number to the nearest integer toward minus infinity.
ceil : Round a number to the nearest integer toward infinity.
Examples
--------
>>> np.floor_divide(7,3)
2
>>> np.floor_divide([1., 2., 3., 4.], 2.5)
array([ 0., 0., 1., 1.])
""")
add_newdoc('numpy.core.umath', 'fmod',
"""
Return the element-wise remainder of division.
This is the NumPy implementation of the Python modulo operator `%`.
Parameters
----------
x1 : array_like
Dividend.
x2 : array_like
Divisor.
Returns
-------
y : array_like
The remainder of the division of `x1` by `x2`.
See Also
--------
remainder : Modulo operation where the quotient is `floor(x1/x2)`.
divide
Notes
-----
The result of the modulo operation for negative dividend and divisors is
bound by conventions. In `fmod`, the sign of the remainder is the sign of
the dividend. In `remainder`, the sign of the divisor does not affect the
sign of the result.
Examples
--------
>>> np.fmod([-3, -2, -1, 1, 2, 3], 2)
array([-1, 0, -1, 1, 0, 1])
>>> np.remainder([-3, -2, -1, 1, 2, 3], 2)
array([1, 0, 1, 1, 0, 1])
>>> np.fmod([5, 3], [2, 2.])
array([ 1., 1.])
>>> a = np.arange(-3, 3).reshape(3, 2)
>>> a
array([[-3, -2],
[-1, 0],
[ 1, 2]])
>>> np.fmod(a, [2,2])
array([[-1, 0],
[-1, 0],
[ 1, 0]])
""")
add_newdoc('numpy.core.umath', 'greater',
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater_equal, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater([4,2],[2,2])
array([ True, False], dtype=bool)
If the inputs are ndarrays, then np.greater is equivalent to '>'.
>>> a = np.array([4,2])
>>> b = np.array([2,2])
>>> a > b
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'greater_equal',
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, less_equal, equal, not_equal
Examples
--------
>>> np.greater_equal([4, 2, 1], [2, 2, 2])
array([ True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'hypot',
"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
(See Examples)
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
""")
add_newdoc('numpy.core.umath', 'invert',
"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
For signed integer inputs, the two's complement is returned.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
x1 : array_like
Only integer types are handled (including booleans).
Returns
-------
out : array_like
Result.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
References
----------
.. [1] Wikipedia, "Two's complement",
http://en.wikipedia.org/wiki/Two's_complement
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> np.invert(np.array([13], dtype=uint8))
array([242], dtype=uint8)
>>> np.binary_repr(x, width=8)
'00001101'
>>> np.binary_repr(242, width=8)
'11110010'
The result depends on the bit-width:
>>> np.invert(np.array([13], dtype=uint16))
array([65522], dtype=uint16)
>>> np.binary_repr(x, width=16)
'0000000000001101'
>>> np.binary_repr(65522, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
>>> np.invert(np.array([13], dtype=int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
>>> np.invert(array([True, False]))
array([False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'isfinite',
"""
Test element-wise for finite-ness (not infinity or not Not a Number).
The result is returned as a boolean array.
Parameters
----------
x : array_like
Input values.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
y : ndarray, bool
For scalar input, the result is a new boolean with value True
if the input is finite; otherwise the value is False (input is
either positive infinity, negative infinity or Not a Number).
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the corresponding
element of the input is finite; otherwise the values are False (element
is either positive infinity, negative infinity or Not a Number).
See Also
--------
isinf, isneginf, isposinf, isnan
Notes
-----
Not a Number, positive infinity and negative infinity are considered
to be non-finite.
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
Errors result if the second argument is also supplied when `x` is a scalar
input, or if first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(np.NINF)
False
>>> np.isfinite([np.log(-1.),1.,np.log(0)])
array([False, True, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isfinite(x, y)
array([0, 1, 0])
>>> y
array([0, 1, 0])
""")
add_newdoc('numpy.core.umath', 'isinf',
"""
Test element-wise for positive or negative infinity.
Return a bool-type array, the same shape as `x`, True where ``x ==
+/-inf``, False everywhere else.
Parameters
----------
x : array_like
Input values
out : array_like, optional
An array with the same shape as `x` to store the result.
Returns
-------
y : bool (scalar) or bool-type ndarray
For scalar input, the result is a new boolean with value True
if the input is positive or negative infinity; otherwise the value
is False.
For array input, the result is a boolean array with the same
shape as the input and the values are True where the
corresponding element of the input is positive or negative
infinity; elsewhere the values are False. If a second argument
was supplied the result is stored there. If the type of that array
is a numeric type the result is represented as zeros and ones, if
the type is boolean then as False and True, respectively.
The return value `y` is then a reference to that array.
See Also
--------
isneginf, isposinf, isnan, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is supplied when the first
argument is a scalar, or if the first and second arguments have
different shapes.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.NINF)
True
>>> np.isinf([np.inf, -np.inf, 1.0, np.nan])
array([ True, True, False, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isinf(x, y)
array([1, 0, 1])
>>> y
array([1, 0, 1])
""")
add_newdoc('numpy.core.umath', 'isnan',
"""
Test element-wise for Not a Number (NaN), return result as a bool array.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : {ndarray, bool}
For scalar input, the result is a new boolean with value True
if the input is NaN; otherwise the value is False.
For array input, the result is a boolean array with the same
dimensions as the input and the values are True if the corresponding
element of the input is NaN; otherwise the values are False.
See Also
--------
isinf, isneginf, isposinf, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan([np.log(-1.),1.,np.log(0)])
array([ True, False, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'left_shift',
"""
Shift the bits of an integer to the left.
Bits are shifted to the left by appending `x2` 0s at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to multiplying `x1` by ``2**x2``.
Parameters
----------
x1 : array_like of integer type
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
Returns
-------
out : array of integer type
Return `x1` with bits shifted `x2` times to the left.
See Also
--------
right_shift : Shift the bits of an integer to the right.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(5)
'101'
>>> np.left_shift(5, 2)
20
>>> np.binary_repr(20)
'10100'
>>> np.left_shift(5, [1,2,3])
array([10, 20, 40])
""")
add_newdoc('numpy.core.umath', 'less',
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less_equal, greater_equal, equal, not_equal
Examples
--------
>>> np.less([1, 2], [2, 2])
array([ True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'less_equal',
"""
Return the truth value of (x1 =< x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays. If ``x1.shape != x2.shape``, they must be
broadcastable to a common shape (which may be the shape of one or
the other).
Returns
-------
out : bool or ndarray of bool
Array of bools, or a single bool if `x1` and `x2` are scalars.
See Also
--------
greater, less, greater_equal, equal, not_equal
Examples
--------
>>> np.less_equal([4, 2, 1], [2, 2, 2])
array([False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'log',
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base `e`.
Parameters
----------
x : array_like
Input value.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
See Also
--------
log10, log2, log1p, emath.log
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log([1, np.e, np.e**2, 0])
array([ 0., 1., 2., -Inf])
""")
add_newdoc('numpy.core.umath', 'log10',
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative.
See Also
--------
emath.log10
Notes
-----
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `10**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log10` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log10` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log10`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log10([1e-15, -3.])
array([-15., NaN])
""")
add_newdoc('numpy.core.umath', 'log2',
"""
Base-2 logarithm of `x`.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Base-2 logarithm of `x`.
See Also
--------
log, log10, log1p, emath.log2
Notes
-----
.. versionadded:: 1.3.0
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `2**z = x`. The convention is to return the `z`
whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log2` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log2` is a complex analytical function that
has a branch cut `[-inf, 0]` and is continuous from above on it. `log2`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-Inf, 0., 1., 4.])
>>> xi = np.array([0+1.j, 1, 2+0.j, 4.j])
>>> np.log2(xi)
array([ 0.+2.26618007j, 0.+0.j , 1.+0.j , 2.+2.26618007j])
""")
add_newdoc('numpy.core.umath', 'logaddexp',
"""
Logarithm of the sum of exponentiations of the inputs.
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
statistics where the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the logarithm of the calculated probability is stored. This function
allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
Returns
-------
result : ndarray
Logarithm of ``exp(x1) + exp(x2)``.
See Also
--------
logaddexp2: Logarithm of the sum of exponentiations of inputs in base-2.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log(1e-50)
>>> prob2 = np.log(2.5e-50)
>>> prob12 = np.logaddexp(prob1, prob2)
>>> prob12
-113.87649168120691
>>> np.exp(prob12)
3.5000000000000057e-50
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
Logarithm of the sum of exponentiations of the inputs in base-2.
Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
learning when the calculated probabilities of events may be so small
as to exceed the range of normal floating point numbers. In such cases
the base-2 logarithm of the calculated probability can be used instead.
This function allows adding probabilities stored in such a fashion.
Parameters
----------
x1, x2 : array_like
Input values.
out : ndarray, optional
Array to store results in.
Returns
-------
result : ndarray
Base-2 logarithm of ``2**x1 + 2**x2``.
See Also
--------
logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
Examples
--------
>>> prob1 = np.log2(1e-50)
>>> prob2 = np.log2(2.5e-50)
>>> prob12 = np.logaddexp2(prob1, prob2)
>>> prob1, prob2, prob12
(-166.09640474436813, -164.77447664948076, -164.28904982231052)
>>> 2**prob12
3.4999999999999914e-50
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
Natural logarithm of `1 + x`, element-wise.
See Also
--------
expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output. For
each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, `log1p` is a complex analytical function that
has a branch cut `[-inf, -1]` and is continuous from above on it. `log1p`
handles the floating-point negative zero as an infinitesimal negative
number, conforming to the C99 standard.
References
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> np.log(1 + 1e-99)
0.0
""")
add_newdoc('numpy.core.umath', 'logical_and',
"""
Compute the truth value of x1 AND x2 elementwise.
Parameters
----------
x1, x2 : array_like
Input arrays. `x1` and `x2` must be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
AND operation on corresponding elements of `x1` and `x2`.
See Also
--------
logical_or, logical_not, logical_xor
bitwise_and
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and([True, False], [False, False])
array([False, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_and(x>1, x<4)
array([False, False, True, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_not',
"""
Compute the truth value of NOT x elementwise.
Parameters
----------
x : array_like
Logical NOT is applied to the elements of `x`.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
See Also
--------
logical_and, logical_or, logical_xor
Examples
--------
>>> np.logical_not(3)
False
>>> np.logical_not([True, False, 0, 1])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_or',
"""
Compute the truth value of x1 OR x2 elementwise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
They have to be of the same shape.
Returns
-------
y : {ndarray, bool}
Boolean result with the same shape as `x1` and `x2` of the logical
OR operation on elements of `x1` and `x2`.
See Also
--------
logical_and, logical_not, logical_xor
bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or([True, False], [False, False])
array([ True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_or(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'logical_xor',
"""
Compute the truth value of x1 XOR x2, element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`. They must
be broadcastable to the same shape.
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by whether or not
broadcasting of one or both arrays was required.
See Also
--------
logical_and, logical_or, logical_not, bitwise_xor
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor([True, True, False, False], [True, False, True, False])
array([False, True, True, False], dtype=bool)
>>> x = np.arange(5)
>>> np.logical_xor(x < 1, x > 3)
array([ True, False, False, False, True], dtype=bool)
Simple example showing support of broadcasting
>>> np.logical_xor(0, np.eye(2))
array([[ True, False],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'maximum',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing
the element-wise maxima. If one of the elements being
compared is a nan, then that element is returned. If
both elements are nans then the first is returned. The
latter distinction is important for complex nans,
which are defined as at least one of the real or
imaginary parts being a nan. The net effect is that
nans are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
minimum :
element-wise minimum
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
Notes
-----
Equivalent to ``np.where(x1 > x2, x1, x2)`` but faster and does proper
broadcasting.
Examples
--------
>>> np.maximum([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.maximum(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
array([ NaN, NaN, NaN])
>>> np.maximum(np.Inf, 1)
inf
""")
add_newdoc('numpy.core.umath', 'minimum',
"""
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a nan, then that element
is returned. If both elements are nans then the first is returned. The
latter distinction is important for complex nans, which are defined as at
least one of the real or imaginary parts being a nan. The net effect is
that nans are propagated.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape, or shapes that can be broadcast to a single shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
maximum :
element-wise minimum that propagates nans.
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
Notes
-----
The minimum is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.minimum([2, 3, 4], [1, 5, 2])
array([1, 3, 2])
>>> np.minimum(np.eye(2), [0.5, 2]) # broadcasting
array([[ 0.5, 0. ],
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ NaN, NaN, NaN])
""")
add_newdoc('numpy.core.umath', 'fmax',
"""
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
maxima. If one of the elements being compared is a nan, then the non-nan
element is returned. If both elements are nans then the first is returned.
The latter distinction is important for complex nans, which are defined as
at least one of the real or imaginary parts being a nan. The net effect is
that nans are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmin :
element-wise minimum that ignores nans unless both inputs are nans.
maximum :
element-wise maximum that propagates nans.
minimum :
element-wise minimum that propagates nans.
Notes
-----
.. versionadded:: 1.3.0
The fmax is equivalent to ``np.where(x1 >= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmax([2, 3, 4], [1, 5, 2])
array([ 2., 5., 4.])
>>> np.fmax(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'fmin',
"""
fmin(x1, x2[, out])
Element-wise minimum of array elements.
Compare two arrays and returns a new array containing the element-wise
minima. If one of the elements being compared is a nan, then the non-nan
element is returned. If both elements are nans then the first is returned.
The latter distinction is important for complex nans, which are defined as
at least one of the real or imaginary parts being a nan. The net effect is
that nans are ignored when possible.
Parameters
----------
x1, x2 : array_like
The arrays holding the elements to be compared. They must have
the same shape.
Returns
-------
y : {ndarray, scalar}
The minimum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
--------
fmax :
element-wise maximum that ignores nans unless both inputs are nans.
maximum :
element-wise maximum that propagates nans.
minimum :
element-wise minimum that propagates nans.
Notes
-----
.. versionadded:: 1.3.0
The fmin is equivalent to ``np.where(x1 <= x2, x1, x2)`` when neither
x1 nor x2 are nans, but it is faster and does proper broadcasting.
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
array([2, 5, 4])
>>> np.fmin(np.eye(2), [0.5, 2])
array([[ 1. , 2. ],
[ 0.5, 2. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
""")
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
Parameters
----------
x : array_like
Input array.
Returns
-------
y1 : ndarray
Fractional part of `x`.
y2 : ndarray
Integral part of `x`.
Notes
-----
For integer input the return values are floats.
Examples
--------
>>> np.modf([0, 3.5])
(array([ 0. , 0.5]), array([ 0., 3.]))
>>> np.modf(-0.5)
(-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays to be multiplied.
Returns
-------
y : ndarray
The product of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
""")
add_newdoc('numpy.core.umath', 'negative',
"""
Returns an array with the negative of each element of the original array.
Parameters
----------
x : array_like or scalar
Input array.
Returns
-------
y : ndarray or scalar
Returned array or scalar: `y = -x`.
Examples
--------
>>> np.negative([1.,-1.])
array([-1., 1.])
""")
add_newdoc('numpy.core.umath', 'not_equal',
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : array_like
Input arrays.
out : ndarray, optional
A placeholder the same shape as `x1` to store the result.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
not_equal : ndarray bool, scalar bool
For each element in `x1, x2`, return True if `x1` is not equal
to `x2` and False otherwise.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal([1.,2.], [1., 3.])
array([False, True], dtype=bool)
>>> np.not_equal([1, 2], [[1, 3],[1, 4]])
array([[False, True],
[False, True]], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'ones_like',
"""
Returns an array of ones with the same shape and type as a given array.
Equivalent to ``a.copy().fill(1)``.
Please refer to the documentation for `zeros_like` for further details.
See Also
--------
zeros_like, ones
Examples
--------
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.ones_like(a)
array([[1, 1, 1],
[1, 1, 1]])
""")
add_newdoc('numpy.core.umath', 'power',
"""
First array elements raised to powers from second array, element-wise.
Raise each base in `x1` to the positionally-corresponding power in
`x2`. `x1` and `x2` must be broadcastable to the same shape.
Parameters
----------
x1 : array_like
The bases.
x2 : array_like
The exponents.
Returns
-------
y : ndarray
The bases in `x1` raised to the exponents in `x2`.
Examples
--------
Cube each element in a list.
>>> x1 = range(6)
>>> x1
[0, 1, 2, 3, 4, 5]
>>> np.power(x1, 3)
array([ 0, 1, 8, 27, 64, 125])
Raise the bases to different exponents.
>>> x2 = [1.0, 2.0, 3.0, 3.0, 2.0, 1.0]
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1, 2, 3, 3, 2, 1],
[1, 2, 3, 3, 2, 1]])
>>> np.power(x1, x2)
array([[ 0, 1, 8, 27, 16, 5],
[ 0, 1, 8, 27, 16, 5]])
""")
add_newdoc('numpy.core.umath', 'radians',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Input array in degrees.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding radian values.
See Also
--------
deg2rad : equivalent function
Examples
--------
Convert a degree array to radians
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([ 0. , 0.52359878, 1.04719755, 1.57079633, 2.0943951 ,
2.61799388, 3.14159265, 3.66519143, 4.1887902 , 4.71238898,
5.23598776, 5.75958653])
>>> out = np.zeros((deg.shape))
>>> ret = np.radians(deg, out)
>>> ret is out
True
""")
add_newdoc('numpy.core.umath', 'deg2rad',
"""
Convert angles from degrees to radians.
Parameters
----------
x : array_like
Angles in degrees.
Returns
-------
y : ndarray
The corresponding angle in radians.
See Also
--------
rad2deg : Convert angles from radians to degrees.
unwrap : Remove large jumps in angle by wrapping.
Notes
-----
.. versionadded:: 1.3.0
``deg2rad(x)`` is ``x * pi / 180``.
Examples
--------
>>> np.deg2rad(180)
3.1415926535897931
""")
add_newdoc('numpy.core.umath', 'reciprocal',
"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : array_like
Input array.
Returns
-------
y : ndarray
Return array.
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division.
For integer zero the result is an overflow.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
add_newdoc('numpy.core.umath', 'remainder',
"""
Return element-wise remainder of division.
Computes ``x1 - floor(x1 / x2) * x2``.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
y : ndarray
The remainder of the quotient ``x1/x2``, element-wise. Returns a scalar
if both `x1` and `x2` are scalars.
See Also
--------
divide, floor
Notes
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of) integers.
Examples
--------
>>> np.remainder([4, 7], [2, 3])
array([0, 1])
>>> np.remainder(np.arange(7), 5)
array([0, 1, 2, 3, 4, 0, 1])
""")
add_newdoc('numpy.core.umath', 'right_shift',
"""
Shift the bits of an integer to the right.
Bits are shifted to the right by removing `x2` bits at the right of `x1`.
Since the internal representation of numbers is in binary format, this
operation is equivalent to dividing `x1` by ``2**x2``.
Parameters
----------
x1 : array_like, int
Input values.
x2 : array_like, int
Number of bits to remove at the right of `x1`.
Returns
-------
out : ndarray, int
Return `x1` with bits shifted `x2` times to the right.
See Also
--------
left_shift : Shift the bits of an integer to the left.
binary_repr : Return the binary representation of the input number
as a string.
Examples
--------
>>> np.binary_repr(10)
'1010'
>>> np.right_shift(10, 1)
5
>>> np.binary_repr(5)
'101'
>>> np.right_shift(10, [1,2,3])
array([5, 2, 1])
""")
add_newdoc('numpy.core.umath', 'rint',
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : {ndarray, scalar}
Output array is same shape and type as `x`.
See Also
--------
ceil, floor, trunc
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 2., 2., 2.])
""")
add_newdoc('numpy.core.umath', 'sign',
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``.
Parameters
----------
x : array_like
Input values.
Returns
-------
y : ndarray
The sign of `x`.
Examples
--------
>>> np.sign([-5., 4.5])
array([-1., 1.])
>>> np.sign(0)
0
""")
add_newdoc('numpy.core.umath', 'signbit',
"""
Returns element-wise True where signbit is set (less than zero).
Parameters
----------
x: array_like
The input value(s).
out : ndarray, optional
Array into which the output is placed. Its type is preserved
and it must be of the right shape to hold the output.
See `doc.ufuncs`.
Returns
-------
result : ndarray of bool
Output array, or reference to `out` if that was supplied.
Examples
--------
>>> np.signbit(-1.2)
True
>>> np.signbit(np.array([1, -2.3, 2.1]))
array([False, True, False], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
If both arguments are arrays or sequences, they have to be of the same
length. If `x2` is a scalar, its sign will be copied to all elements of
`x1`.
Parameters
----------
x1: array_like
Values to change the sign of.
x2: array_like
The sign of `x2` is copied to `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See doc.ufuncs.
Returns
-------
out : array_like
The values of `x1` with the sign of `x2`.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> np.copysign([-1, 0, 1], -1.1)
array([-1., -0., -1.])
>>> np.copysign([-1, 0, 1], np.arange(3)-1)
array([-1., 0., 1.])
""")
add_newdoc('numpy.core.umath', 'nextafter',
"""
Return the next representable floating-point value after x1 in the direction
of x2 element-wise.
Parameters
----------
x1 : array_like
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and it
must be of the right shape to hold the output. See `doc.ufuncs`.
Returns
-------
out : array_like
The next representable values of `x1` in the direction of `x2`.
Examples
--------
>>> eps = np.finfo(np.float64).eps
>>> np.nextafter(1, 2) == eps + 1
True
>>> np.nextafter([1, 2], [2, 1]) == [eps + 1, 2 - eps]
array([ True, True], dtype=bool)
""")
add_newdoc('numpy.core.umath', 'spacing',
"""
Return the distance between x and the nearest adjacent number.
Parameters
----------
x1: array_like
Values to find the spacing of.
Returns
-------
out : array_like
The spacing of values of `x1`.
Notes
-----
It can be considered as a generalization of EPS:
``spacing(np.float64(1)) == np.finfo(np.float64).eps``, and there
should not be any representable number between ``x + spacing(x)`` and
x for any finite x.
Spacing of +- inf and nan is nan.
Examples
--------
>>> np.spacing(1) == np.finfo(np.float64).eps
True
""")
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
Parameters
----------
x : array_like
Angle, in radians (:math:`2 \\pi` rad equals 360 degrees).
Returns
-------
y : array_like
The sine of each element of x.
See Also
--------
arcsin, sinh, cos
Notes
-----
The sine is one of the fundamental functions of trigonometry
(the mathematical study of triangles). Consider a circle of radius
1 centered on the origin. A ray comes in from the :math:`+x` axis,
makes an angle at the origin (measured counter-clockwise from that
axis), and departs from the origin. The :math:`y` coordinate of
the outgoing ray's intersection with the unit circle is the sine
of that angle. It ranges from -1 for :math:`x=3\\pi / 2` to
+1 for :math:`\\pi / 2.` The function has zeroes where the angle is
a multiple of :math:`\\pi`. Sines of angles between :math:`\\pi` and
:math:`2\\pi` are negative. The numerous properties of the sine and
related functions are included in any standard trigonometry text.
Examples
--------
Print sine of one angle:
>>> np.sin(np.pi/2.)
1.0
Print sines of an array of angles given in degrees:
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180. )
array([ 0. , 0.5 , 0.70710678, 0.8660254 , 1. ])
Plot the sine function:
>>> import matplotlib.pylab as plt
>>> x = np.linspace(-np.pi, np.pi, 201)
>>> plt.plot(x, np.sin(x))
>>> plt.xlabel('Angle [rad]')
>>> plt.ylabel('sin(x)')
>>> plt.axis('tight')
>>> plt.show()
""")
add_newdoc('numpy.core.umath', 'sinh',
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or
``-1j * np.sin(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic sine values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
Examples
--------
>>> np.sinh(0)
0.0
>>> np.sinh(np.pi*1j/2)
1j
>>> np.sinh(np.pi*1j) # (exact value is 0)
1.2246063538223773e-016j
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'sqrt',
"""
Return the positive square-root of an array, element-wise.
Parameters
----------
x : array_like
The values whose square-roots are required.
out : ndarray, optional
Alternate array object in which to put the result; if provided, it
must have the same shape as `x`
Returns
-------
y : ndarray
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. If any element in `x` is
complex, a complex array is returned (and the square-roots of
negative reals are calculated). If all of the elements in `x`
are real, so is `y`, with negative elements returning ``nan``.
If `out` was provided, `y` is a reference to it.
See Also
--------
lib.scimath.sqrt
A version which returns complex numbers when given negative reals.
Notes
-----
*sqrt* has--consistent with common convention--as its branch cut the
real "interval" [`-inf`, 0), and is continuous from above on it.
(A branch cut is a curve in the complex plane across which a given
complex function fails to be continuous.)
Examples
--------
>>> np.sqrt([1,4,9])
array([ 1., 2., 3.])
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
>>> np.sqrt([4, -1, numpy.inf])
array([ 2., NaN, Inf])
""")
add_newdoc('numpy.core.umath', 'square',
"""
Return the element-wise square of the input.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray
Element-wise `x*x`, of the same shape and dtype as `x`.
Returns scalar if `x` is a scalar.
See Also
--------
numpy.linalg.matrix_power
sqrt
power
Examples
--------
>>> np.square([-1j, 1])
array([-1.-0.j, 1.+0.j])
""")
add_newdoc('numpy.core.umath', 'subtract',
"""
Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
The arrays to be subtracted from each other.
Returns
-------
y : ndarray
The difference of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[ 0., 0., 0.],
[ 3., 3., 3.],
[ 6., 6., 6.]])
""")
add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
>>>
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)`` or
``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
out : ndarray, optional
Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
Raises
------
ValueError: invalid return array shape
if `out` is provided and `out.shape` != `x.shape` (See Examples)
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
References
----------
.. [1] M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
New York, NY: Dover, 1972, pg. 83.
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
http://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
>>> np.tanh((0, np.pi*1j, np.pi*1j/2))
array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
>>> # Example of ValueError due to provision of shape mis-matched `out`
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'true_divide',
"""
Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : array_like
Dividend array.
x2 : array_like
Divisor array.
Returns
-------
out : ndarray
Result is scalar if both inputs are scalar, ndarray otherwise.
Notes
-----
The floor division operator ``//`` was added in Python 2.2 making ``//``
and ``/`` equivalent operators. The default floor division operation of
``/`` can be replaced by true division with
``from __future__ import division``.
In Python 3.0, ``//`` is the floor division operator and ``/`` the
true division operator. The ``true_divide(x1, x2)`` function is
equivalent to true division in Python.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x/4
array([0, 0, 0, 0, 1])
>>> x//4
array([0, 0, 0, 0, 1])
>>> from __future__ import division
>>> x/4
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
>>> x//4
array([0, 0, 0, 0, 1])
""")
| gpl-3.0 |
dvoytenk/iceberg_tracking | interp_vel_scipy30m.py | 1 | 4796 | from numpy import *
from matplotlib.pyplot import *
import glob
import os
from scipy.stats import mode
from scipy import ndimage
import numpy
import matplotlib.pyplot as plt
from scipy.interpolate import Rbf
matplotlib.rcParams.update({'font.size': 18})
#berglocs=load('20120816_172400_20120816_182200.npy')
def plotarrows(filename):
clf()
berglocs=load(filename)
#pixel spacing in meters
pixelspacing=10.
#time difference between first and last measurement, minutes
timediff=30.
#find total displacement between first and last measurement
dsdt=berglocs[-1]-berglocs[0]
displacement=hypot(dsdt[:,0],dsdt[:,1])
disp_criterion=2
#number of pixels to move to consider to be have motion to prevent stationary icebergs from clogging data
good_indices=where(displacement>disp_criterion)[0]
berglocs_good=berglocs[:,good_indices,:]
#this finds the start and end points for each iceberg and plots them as straigh linesa
Y1=berglocs_good[0,:,0]
X1=berglocs_good[0,:,1]
Y2=berglocs_good[-1,:,0]
X2=berglocs_good[-1,:,1]
#get motion in x and y directions
DY=Y2-Y1
DX=X2-X1
#convert VX and VY to cm/s
VX=DX*100.*pixelspacing/(timediff*60.)
VY=DY*100.*pixelspacing/(timediff*60.)
nxcells=25
nycells=25
grid_x, grid_y = np.mgrid[xmin:xmax:complex(nxcells), ymin:ymax:complex(nycells)]
vx_rbf=Rbf(X1,Y1,VX,function='linear')
vxint=vx_rbf(grid_x.flatten(),grid_y.flatten())
vxint=vxint.reshape([nxcells,nycells])
vy_rbf=Rbf(X1,Y1,VY,function='linear')
vyint=vy_rbf(grid_x.flatten(),grid_y.flatten())
vyint=vyint.reshape([nxcells,nycells])
##show interpolated maps
#figure()
#subplot(1,2,1)
#title('vx')
#imshow(vxint,extent=[grid_x[:,0].min(),grid_x[:,0].max(),grid_y[0].min(),grid_y[0].max()])
#colorbar()
#subplot(1,2,2)
#title('vy')
#imshow(vyint,extent=[grid_x[:,0].min(),grid_x[:,0].max(),grid_y[0].min(),grid_y[0].max()])
#colorbar()
#show()
figname=os.path.basename(filename)[0:-4]
print 'velocity range '+figname
fullvel=sqrt(vyint**2+vxint**2)
print fullvel.min(),fullvel.max()
figure()
#make background black
ax=subplot('111', axisbg='black')
#title(figname) #creates full figure name, doesn't look good for publishing figures
#get figure title from filename, works only for this dataset
a=figname
figtitle='Aug. '+a[6:8]+' '+a[9:11]+':'+a[11:13]+' - '+'Aug. '+a[22:24]+' '+a[25:27]+':'+a[27:29]
title(figtitle)
#get velocity of 1 pixel in cm/s
pixelVel=100.*pixelspacing/(timediff*60.)
arrowSize=18.
arrowSpeed=arrowSize*pixelVel
arrowLabel='%d'%(arrowSpeed)+' cm/s'
#print arrowSize
q=quiver(grid_x,grid_y,vxint,vyint,scale_units='xy', angles='xy', scale=.25, color='w',zorder=2)
#p=quiverkey(q,0.0*grid_x.max(),1.51*grid_y.max(),arrowSpeed,arrowLabel,coordinates='data',color='r', labelpos='S',zorder=2)
#p=quiverkey(q,50,350,arrowSpeed,arrowLabel,coordinates='data',color='r', labelpos='S',zorder=4)
#imshow(bg,origin='upper',cmap='binary',zorder=1,interpolation='none')
#zorder controls plotting order, blot radar amplitude background
imshow((bgm),origin='upper',alpha=1,zorder=3,cmap='gray')
#trick the quiverkey labeling to allow masked window for the figures by drawing a nan-valued quiverplot
q1=quiver(grid_x,grid_y,nan*vxint,nan*vyint,scale_units='xy', angles='xy', scale=.25, color='w',zorder=4) #scale should be .4
p=quiverkey(q1,50,650,arrowSpeed,arrowLabel,coordinates='data',color='w', labelpos='S',labelcolor='w')
#to plot north arrow
ax.arrow(10,310,28,25,head_length=10,head_width=10, fc='w',ec='w',zorder=5)
text(52,356,'N',fontsize=16,color='w',zorder=6)
#change labels to show proper distances
xlabel('Distance (km)')
ylabel('Distance (km)')
ax.xaxis.set_ticks([0,250,500])
ax.set_xticklabels(['0','2.5','5'])
ax.yaxis.set_ticks([300,500,700])
ax.set_yticklabels(['0','2','4'])
axis([0,500,700,300])
#savefig('arrow/'+figname+'.png')
savefig('arrow/'+figname+'.png',dpi=300)
#savefig('figs/arrow'+figname+'.png')
return DX,DY,VX,VY,vxint,vyint
#read in mask
bg=imread('2012maskT.png')
bg=bg*bg.astype('bool')
naz,nr=shape(bg)
yvl,xvl=where(bg==1.0) #find mask boundaries
ymin=yvl.min()
ymax=yvl.max()
xmin=xvl.min()
xmax=xvl.max()
#make mask to plot overlays
bgm=imread('bgm1.png')
#get rid of noise
bgm[where(bgm<.3)]=0.0
#mask out lagoon
bgm[where(bg==1)]=nan
#imshow(bgm)
#show()
#read all npy files hour by hour
directory='recscipy/'
filetype='*.npy'
files=glob.glob(os.path.join(directory+filetype))
files.sort()
#files=['data/20120816_192400_20120816_202200.npy']
#files=['data/20120817_202400_20120817_212200.npy']
#files[32] is comparison figure
for i in range(len(files)):
print i
dx,dy,vx,vy,vxint,vyint=plotarrows(files[i])
| bsd-2-clause |
artisan-roaster-scope/artisan | src/setup-win.py | 2 | 7374 | """
This is a set up script for py2exe
USAGE: python setup-win py2exe
"""
from distutils.core import setup
import matplotlib as mpl
import py2exe
import numpy
import os
import sys
# add any numpy directory containing a dll file to sys.path
def numpy_dll_paths_fix():
paths = set()
np_path = numpy.__path__[0]
for dirpath, _, filenames in os.walk(np_path):
for item in filenames:
if item.endswith('.dll'):
paths.add(dirpath)
if paths:
sys.path.append(*list(paths))
numpy_dll_paths_fix()
# Remove the build folder, a bit slower but ensures that build contains the latest
import shutil
shutil.rmtree("build", ignore_errors=True)
shutil.rmtree("dist", ignore_errors=True)
INCLUDES = [
"sip",
"serial",
"scipy.special._ufuncs_cxx",
"scipy.sparse.csgraph._validation",
"scipy.integrate",
"scipy.interpolate",
]
EXCLUDES = ['gevent._socket3',
'_tkagg',
'_ps',
'_fltkagg',
'Tkinter',
'Tkconstants',
'_cairo',
'_gtk',
'gtkcairo',
'pydoc',
'doctest',
'pdb',
'pyreadline',
'optparse',
'sqlite3',
'bsddb',
'curses',
'tcl',
'_wxagg',
'_gtagg',
'_cocoaagg',
'_wx']
# current version of Artisan
import artisanlib
VERSION = artisanlib.__version__
LICENSE = 'GNU General Public License (GPL)'
cwd = os.getcwd()
DATAFILES = mpl.get_py2exe_datafiles()
DATAFILES = DATAFILES + \
[('plugins\imageformats', [
'c:\Python27\Lib\site-packages\PyQt4\plugins\imageformats\qsvg4.dll',
'c:\Python27\Lib\site-packages\PyQt4\plugins\imageformats\qgif4.dll',
'c:\Python27\Lib\site-packages\PyQt4\plugins\imageformats\qtiff4.dll',
'c:\Python27\Lib\site-packages\PyQt4\plugins\imageformats\qjpeg4.dll',
]),
('plugins\iconengines', [
'c:\Python27\Lib\site-packages\PyQt4\plugins\iconengines\qsvgicon4.dll',
]),
]
setup(
name ="Artisan",
version=VERSION,
author='YOUcouldbeTOO',
author_email='[email protected]',
license=LICENSE,
windows=[{"script" : cwd + "\\artisan.py",
"icon_resources": [(0, cwd + "\\artisan.ico")]
}],
data_files = DATAFILES,
zipfile = "lib\library.zip",
options={"py2exe" :{
"packages": ['matplotlib','pytz'],
"compressed": False, # faster
"unbuffered": True,
'optimize': 2,
"bundle_files": 2, # default bundle_files: 3 breaks WebLCDs on Windows
"dll_excludes":[
'MSVCP90.dll','tcl84.dll','tk84.dll','libgdk-win32-2.0-0.dll',
'libgdk_pixbuf-2.0-0.dll','libgobject-2.0-0.dll',
'MSVCR90.dll','MSVCN90.dll','mwsock.dll','powrprof.dll'],
"includes" : INCLUDES,
"excludes" : EXCLUDES}
}
)
os.system(r'copy README.txt dist')
os.system(r'copy LICENSE.txt dist')
os.system(r'copy ..\\LICENSE dist\\LICENSE.txt')
os.system(r'copy qt-win.conf dist\\qt.conf')
os.system(r'mkdir dist\\Wheels')
os.system(r'mkdir dist\\Wheels\\Cupping')
os.system(r'mkdir dist\\Wheels\\Other')
os.system(r'mkdir dist\\Wheels\\Roasting')
os.system(r'copy Wheels\\Cupping\\* dist\\Wheels\\Cupping')
os.system(r'copy Wheels\\Other\\* dist\\Wheels\\Other')
os.system(r'copy Wheels\\Roasting\\* dist\\Wheels\\Roasting')
os.system(r'mkdir dist\\translations')
os.system(r'copy translations\\*.qm dist\\translations')
os.system(r'copy c:\\Python27\\Lib\\site-packages\\PyQt4\\translations\\qt_ar.qm dist\\translations')
os.system(r'copy c:\\Python27\\Lib\\site-packages\\PyQt4\\translations\\qt_de.qm dist\\translations')
os.system(r'copy c:\\Python27\\Lib\\site-packages\\PyQt4\\translations\\qt_es.qm dist\\translations')
os.system(r'copy c:\\Python27\\Lib\\site-packages\\PyQt4\\translations\\qt_fr.qm dist\\translations')
os.system(r'copy c:\\Python27\\Lib\\site-packages\\PyQt4\\translations\\qt_he.qm dist\\translations')
os.system(r'copy c:\\Python27\\Lib\\site-packages\\PyQt4\\translations\\qt_hu.qm dist\\translations')
os.system(r'copy c:\\Python27\\Lib\\site-packages\\PyQt4\\translations\\qt_ja.qm dist\\translations')
os.system(r'copy c:\\Python27\\Lib\\site-packages\\PyQt4\\translations\\qt_ko.qm dist\\translations')
os.system(r'copy c:\\Python27\\Lib\\site-packages\\PyQt4\\translations\\qt_ru.qm dist\\translations')
os.system(r'copy c:\\Python27\\Lib\\site-packages\\PyQt4\\translations\\qt_pl.qm dist\\translations')
os.system(r'copy c:\\Python27\\Lib\\site-packages\\PyQt4\\translations\\qt_pt.qm dist\\translations')
os.system(r'copy c:\\Python27\\Lib\\site-packages\\PyQt4\\translations\\qt_ru.qm dist\\translations')
os.system(r'copy c:\\Python27\\Lib\\site-packages\\PyQt4\\translations\\qt_sv.qm dist\\translations')
os.system(r'copy c:\\Python27\\Lib\\site-packages\\PyQt4\\translations\\qt_zh_CN.qm dist\\translations')
os.system(r'copy c:\\Python27\\Lib\\site-packages\\PyQt4\\translations\\qt_zh_TW.qm dist\\translations')
os.system(r'rmdir /q /s dist\\mpl-data\\sample_data')
# YOCTO HACK BEGIN: manually copy over the dlls
os.system(r'mkdir dist\\lib')
os.system(r'copy c:\\Python27\\Lib\\site-packages\\yoctopuce\\cdll\\yapi.dll dist\\lib')
os.system(r'copy c:\\Python27\\Lib\\site-packages\\yoctopuce\\cdll\\yapi64.dll dist\\lib')
# YOCTO HACK END
os.system(r'copy artisan.png dist')
os.system(r'copy artisanAlarms.ico dist')
os.system(r'copy artisanProfile.ico dist')
os.system(r'copy artisanPalettes.ico dist')
os.system(r'copy artisanSettings.ico dist')
os.system(r'copy artisanTheme.ico dist')
os.system(r'copy artisanWheel.ico dist')
os.system(r'copy includes\\Humor-Sans.ttf dist')
os.system(r'copy includes\\WenQuanYiZenHei-01.ttf dist')
os.system(r'copy includes\\SourceHanSansCN-Regular.otf dist')
os.system(r'copy includes\\SourceHanSansHK-Regular.otf dist')
os.system(r'copy includes\\SourceHanSansJP-Regular.otf dist')
os.system(r'copy includes\\SourceHanSansKR-Regular.otf dist')
os.system(r'copy includes\\SourceHanSansTW-Regular.otf dist')
os.system(r'copy includes\\alarmclock.eot dist')
os.system(r'copy includes\\alarmclock.svg dist')
os.system(r'copy includes\\alarmclock.ttf dist')
os.system(r'copy includes\\alarmclock.woff dist')
os.system(r'copy includes\\artisan.tpl dist')
os.system(r'copy includes\\bigtext.js dist')
os.system(r'copy includes\\sorttable.js dist')
os.system(r'copy includes\\report-template.htm dist')
os.system(r'copy includes\\roast-template.htm dist')
os.system(r'copy includes\\ranking-template.htm dist')
os.system(r'copy includes\\jquery-1.11.1.min.js dist')
os.system(r'mkdir dist\\Machines')
os.system(r'xcopy includes\\Machines dist\\Machines /y /S')
os.system(r'mkdir dist\\Themes')
os.system(r'xcopy includes\\Themes dist\\Themes /y /S')
os.system(r'copy ..\\vcredist_x86.exe dist')
| gpl-3.0 |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_examples/api/sankey_demo_basics.py | 12 | 3421 | """Demonstrate the Sankey class by producing three basic diagrams.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.sankey import Sankey
# Example 1 -- Mostly defaults
# This demonstrates how to create a simple diagram by implicitly calling the
# Sankey.add() method and by appending finish() to the call to the class.
Sankey(flows=[0.25, 0.15, 0.60, -0.20, -0.15, -0.05, -0.50, -0.10],
labels=['', '', '', 'First', 'Second', 'Third', 'Fourth', 'Fifth'],
orientations=[-1, 1, 0, 1, 1, 1, 0, -1]).finish()
plt.title("The default settings produce a diagram like this.")
# Notice:
# 1. Axes weren't provided when Sankey() was instantiated, so they were
# created automatically.
# 2. The scale argument wasn't necessary since the data was already
# normalized.
# 3. By default, the lengths of the paths are justified.
# Example 2
# This demonstrates:
# 1. Setting one path longer than the others
# 2. Placing a label in the middle of the diagram
# 3. Using the the scale argument to normalize the flows
# 4. Implicitly passing keyword arguments to PathPatch()
# 5. Changing the angle of the arrow heads
# 6. Changing the offset between the tips of the paths and their labels
# 7. Formatting the numbers in the path labels and the associated unit
# 8. Changing the appearance of the patch and the labels after the figure is
# created
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[],
title="Flow Diagram of a Widget")
sankey = Sankey(ax=ax, scale=0.01, offset=0.2, head_angle=180,
format='%.0f', unit='%')
sankey.add(flows=[25, 0, 60, -10, -20, -5, -15, -10, -40],
labels = ['', '', '', 'First', 'Second', 'Third', 'Fourth',
'Fifth', 'Hurray!'],
orientations=[-1, 1, 0, 1, 1, 1, -1, -1, 0],
pathlengths = [0.25, 0.25, 0.25, 0.25, 0.25, 0.6, 0.25, 0.25,
0.25],
patchlabel="Widget\nA",
alpha=0.2, lw=2.0) # Arguments to matplotlib.patches.PathPatch()
diagrams = sankey.finish()
diagrams[0].patch.set_facecolor('#37c959')
diagrams[0].texts[-1].set_color('r')
diagrams[0].text.set_fontweight('bold')
# Notice:
# 1. Since the sum of the flows is nonzero, the width of the trunk isn't
# uniform. If verbose.level is helpful (in matplotlibrc), a message is
# given in the terminal window.
# 2. The second flow doesn't appear because its value is zero. Again, if
# verbose.level is helpful, a message is given in the terminal window.
# Example 3
# This demonstrates:
# 1. Connecting two systems
# 2. Turning off the labels of the quantities
# 3. Adding a legend
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[], title="Two Systems")
flows = [0.25, 0.15, 0.60, -0.10, -0.05, -0.25, -0.15, -0.10, -0.35]
sankey = Sankey(ax=ax, unit=None)
sankey.add(flows=flows, label='one',
orientations=[-1, 1, 0, 1, 1, 1, -1, -1, 0])
sankey.add(flows=[-0.25, 0.15, 0.1], fc='#37c959', label='two',
orientations=[-1, -1, -1], prior=0, connect=(0, 0))
diagrams = sankey.finish()
diagrams[-1].patch.set_hatch('/')
plt.legend(loc='best')
# Notice that only one connection is specified, but the systems form a
# circuit since: (1) the lengths of the paths are justified and (2) the
# orientation and ordering of the flows is mirrored.
plt.show()
| gpl-2.0 |
andaag/scikit-learn | sklearn/datasets/samples_generator.py | 35 | 56035 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator=True,
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array or sparse CSR matrix of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
if return_indicator:
Y = MultiLabelBinarizer().fit([range(n_classes)]).transform(Y)
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
huaxz1986/git_book | chapters/Cluster_EM/agglomerative_clustering.py | 1 | 2546 | # -*- coding: utf-8 -*-
"""
聚类和EM算法
~~~~~~~~~~~~~~~~
AgglomerativeClustering
:copyright: (c) 2016 by the huaxz1986.
:license: lgpl-3.0, see LICENSE for more details.
"""
from sklearn import cluster
from sklearn.metrics import adjusted_rand_score
import matplotlib.pyplot as plt
def test_AgglomerativeClustering(*data):
'''
测试 AgglomerativeClustering 的用法
:param data: 可变参数。它是一个元组。元组元素依次为:第一个元素为样本集,第二个元素为样本集的真实簇分类标记
:return: None
'''
X,labels_true=data
clst=cluster.AgglomerativeClustering()
predicted_labels=clst.fit_predict(X)
print("ARI:%s"% adjusted_rand_score(labels_true,predicted_labels))
def test_AgglomerativeClustering_nclusters(*data):
'''
测试 AgglomerativeClustering 的聚类结果随 n_clusters 参数的影响
:param data: 可变参数。它是一个元组。元组元素依次为:第一个元素为样本集,第二个元素为样本集的真实簇分类标记
:return: None
'''
X,labels_true=data
nums=range(1,50)
ARIs=[]
for num in nums:
clst=cluster.AgglomerativeClustering(n_clusters=num)
predicted_labels=clst.fit_predict(X)
ARIs.append(adjusted_rand_score(labels_true,predicted_labels))
## 绘图
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.plot(nums,ARIs,marker="+")
ax.set_xlabel("n_clusters")
ax.set_ylabel("ARI")
fig.suptitle("AgglomerativeClustering")
plt.show()
def test_AgglomerativeClustering_linkage(*data):
'''
测试 AgglomerativeClustering 的聚类结果随链接方式的影响
:param data: 可变参数。它是一个元组。元组元素依次为:第一个元素为样本集,第二个元素为样本集的真实簇分类标记
:return: None
'''
X,labels_true=data
nums=range(1,50)
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
linkages=['ward','complete','average']
markers="+o*"
for i, linkage in enumerate(linkages):
ARIs=[]
for num in nums:
clst=cluster.AgglomerativeClustering(n_clusters=num,linkage=linkage)
predicted_labels=clst.fit_predict(X)
ARIs.append(adjusted_rand_score(labels_true,predicted_labels))
ax.plot(nums,ARIs,marker=markers[i],label="linkage:%s"%linkage)
ax.set_xlabel("n_clusters")
ax.set_ylabel("ARI")
ax.legend(loc="best")
fig.suptitle("AgglomerativeClustering")
plt.show() | gpl-3.0 |
ThomasMiconi/nupic.research | projects/nlp/run_tm_learning.py | 11 | 5601 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Script to run temporal memory on NLP documents
"""
import argparse
from textwrap import TextWrapper
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
import nupic
from nupic.data.file_record_stream import FileRecordStream
from htmresearch.frameworks.nlp.classification_model import ClassificationModel
from htmresearch.frameworks.nlp.model_factory import (
createModel, getNetworkConfig)
from htmresearch.support.csv_helper import readDataAndReshuffle
plt.ion()
wrapper = TextWrapper(width=100)
def getTMRegion(network):
tmRegion = None
for region in network.regions.values():
regionInstance = region
if type(regionInstance.getSelf()) is nupic.regions.TPRegion.TPRegion:
tmRegion = regionInstance.getSelf()
return tmRegion
def instantiateModel(args):
"""
Return an instance of the model we will use.
"""
# Some values of K we know work well for this problem for specific model types
kValues = {"keywords": 21, "docfp": 3}
# Create model after setting specific arguments required for this experiment
args.networkConfig = getNetworkConfig(args.networkConfigPath)
args.k = kValues.get(args.modelName, 1)
args.numLabels = 2
model = createModel(**vars(args))
return model
def trainModel(args, model, trainingData, labelRefs):
"""
Train the given model on trainingData. Return the trained model instance.
"""
tmRegion = getTMRegion(model.network)
print
print "=======================Training model on sample text================"
for recordNum, doc in enumerate(trainingData):
document = doc[0]
labels = doc[1]
docId = doc[2]
if args.verbosity > 0:
print
print "Document=", wrapper.fill(document)
print "label=", labelRefs[labels[0]], "id=", docId
model.trainDocument(document, labels, docId)
numActiveCols = tmRegion._tfdr.mmGetTraceActiveColumns().makeCountsTrace().data
numPredictedActiveCells = \
tmRegion._tfdr.mmGetTracePredictedActiveCells().makeCountsTrace().data
if args.verbosity > 0:
print "Word # %s, Avg Active Cols # %s, Avg predicted-active cell # %s " % (
len(numActiveCols),
np.mean(np.array(numActiveCols)),
np.mean(np.array(numPredictedActiveCells))
)
tmRegion._tfdr.mmClearHistory()
return model
def runExperiment(args):
"""
Create model according to args, train on training data, save model,
restore model, test on test data.
"""
args.numLabels = 2
(trainingData, labelRefs, documentCategoryMap,
documentTextMap) = readDataAndReshuffle(args)
# Create model
model = instantiateModel(args)
model = trainModel(args, model, trainingData, labelRefs)
# TODO: Visualize prediction quality
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("-c", "--networkConfigPath",
default="data/network_configs/tm_knn_4k_retina.json",
help="Path to JSON specifying the network params.",
type=str)
parser.add_argument("-m", "--modelName",
default="htm",
type=str,
help="Name of model class. Options: [keywords,htm,docfp]")
parser.add_argument("--retinaScaling",
default=1.0,
type=float,
help="Factor by which to scale the Cortical.io retina.")
parser.add_argument("--retina",
default="en_associative_64_univ",
type=str,
help="Name of Cortical.io retina.")
parser.add_argument("--apiKey",
default=None,
type=str,
help="Key for Cortical.io API. If not specified will "
"use the environment variable CORTICAL_API_KEY.")
parser.add_argument("--modelDir",
default="MODELNAME.checkpoint",
help="Model will be saved in this directory.")
parser.add_argument("-v", "--verbosity",
default=1,
type=int,
help="verbosity 0 will print out experiment steps, "
"verbosity 1 will include results, and verbosity > "
"1 will print out preprocessed tokens and kNN "
"inference metrics.")
args = parser.parse_args()
# By default set checkpoint directory name based on model name
if args.modelDir == "MODELNAME.checkpoint":
args.modelDir = args.modelName + ".checkpoint"
print "Save dir: ", args.modelDir
runExperiment(args)
| agpl-3.0 |
ursk/heise | heise.py | 1 | 13193 | """
Heisenberg: CC mapping with convolutive FFT
STA using SVD
"""
import numpy as np
#np.__config__.show() # make sure we are on mkl
import sys
import tables
import matplotlib.pyplot as plt
#plt.interactive(1)
#from ipdb import set_trace as trace
#import cProfile # profile the code with cProfile.run('self.learn(stimexp, spike)')
class heisenberg(object):
def __init__(self):
self.im=32
self.win=8
self.step=3.
self.frame=np.int(np.floor((self.im-self.win+1)/self.step))
self.step = np.int(self.step)
self.channels = 32
self.whiten = True
self.maxiter = 200 # for lbfgs
self.rdim = 1024 # dimensionality to retain from 32**2=1024 -- [TODO] Empty dimensions might be what leads to funky HF structure in the filters?
self.bin = 3. # time bins to reduce sampling rate from 150fps to 50 fps
self.dt = self.bin * .0066 # time step to get rate in spikes/second
self.Ttrain = 50000
self.lam = .01 # l2 regularizer -- .01 seems to work, does not improve things though
def load_data(self, session='tigerp6', movie='duck8'):
path_to_movies = "./movies.h5" # with data in format /movie [frames * x * y] 3d array
path_to_spikes = "./spikes.h5" # with data in format /movie/session [channels * frames] 2d array
h5 = tables.openFile(path_to_movies, mode = "r")
stim=h5.getNode('/'+movie)[:]
h5.close()
self.T=stim.shape[0]
stim = stim.astype('double')
stim = stim - stim.mean(0)[np.newaxis, :]
stim = stim / stim.std(0)[np.newaxis, :]
# compute 3x downsampled stimulus
if self.bin > 1:
Tsub = np.floor(self.T/self.bin)
stim = stim[0:self.bin*Tsub,:,:].transpose(1,2,0).reshape(self.im,self.im,Tsub, self.bin).sum(3).transpose(2,0,1)/3.
self.T=Tsub
if self.whiten:
cov = np.dot(stim.reshape(self.T,32**2).T, stim.reshape(self.T,32**2))/self.T
D, E = np.linalg.eigh(cov) # h for symmetric matrices
E = E[:, np.argsort(D)[::-1]]
D.sort()
D=D[::-1]
self.D=D
wM = np.dot(E[:,0:self.rdim], np.dot(np.diag((D[0:self.rdim]+.1)**-.5), E[:,0:self.rdim].T))
stim = np.dot(wM, stim.reshape(self.T,32**2).T).T.reshape(self.T,32,32)
# adding to the diagonal means the output will not be quite unit variance.
#stim = stim[:,1:31, 1:31] # crop the center
#self.im = 30
# compute global normalizers: (not compatible with rDIM)
fft = np.abs(np.fft.fft2(stim[:,self.win:2*self.win,self.win:2*self.win]))
self.f_mean = np.fft.fftshift(fft.mean(0))
self.f_std = np.fft.fftshift((fft-fft.mean(0)).std(0))
h5 = tables.openFile(path_to_spikes, mode = "r")
spikes=h5.getNode('/'+movie+'/'+session)[:]
h5.close()
if self.bin > 1:
spikes = spikes[:,0:Tsub*self.bin].reshape(32, Tsub, self.bin).sum(2)
return (stim, spikes) #
def sta(self, stimexp, spike, plot=False):
""" Fourier Expansion STA """
triggers=np.nonzero(spike)[0]
canvas4d = stimexp[triggers,:].mean(0).reshape(self.win,self.win,self.frame,self.frame)
self.siglev = np.sqrt(triggers.shape[0]) # noise level, assuming gaussian
canblock = canvas4d.reshape(self.win**2, self.frame**2)
U, s, V = np.linalg.svd(canblock) # U is 64 FFT components, V is 525 locations
n = 1 # still need 2 components, because the first is the funky whitening artifact.
canrec = np.dot(U[:,0:n],np.dot(np.diag(s[0:n]),V[0:n,:]))
canmat = canvas4d.transpose((0,2,1,3)).reshape(self.win*self.frame, self.win*self.frame) # square matrix
canmatrec = canrec.reshape(self.win, self.win, self.frame, self.frame).transpose((0,2,1,3)).reshape(self.win*self.frame, self.win*self.frame)
# flipped sign seems more natural.
self.sta_u = -U[:,0:1]
self.sta_s = s[0]
self.sta_v = -V[0:1,:]
#
#plt.figure(1), plt.clf()
#plt.subplot(2,1,1)
#plt.imshow(canblock, interpolation='nearest')
#plt.subplot(2,1,2)
#plt.imshow(canrec, interpolation='nearest')
#plt.colorbar()
if plot:
plt.figure(2), plt.clf()
ax=plt.subplot(2,2,1)
plt.imshow(canmat, interpolation='nearest'); plt.title('original'); plt.colorbar()
plt.xticks(np.arange(0,self.win*self.frame,self.frame)-.5, ''); plt.yticks(np.arange(0,self.win*self.frame,self.frame)-.5, '')
ax.grid(color='k', linestyle='-', linewidth=.5)
ax=plt.subplot(2,2,2)
plt.imshow(canmatrec, interpolation='nearest'); plt.title('SVD reconstruction'); plt.colorbar()
plt.xticks(np.arange(0,self.win*self.frame,self.frame)-.5, ''); plt.yticks(np.arange(0,self.win*self.frame,self.frame)-.5, '')
ax.grid(color='k', linestyle='-', linewidth=.5)
plt.subplot(2,2,3)
plt.imshow(self.sta_u.reshape(self.win,self.win), interpolation='nearest'); plt.title('f-filter'); plt.colorbar()
plt.subplot(2,2,4)
plt.imshow(self.sta_v.reshape(self.frame, self.frame), interpolation='nearest'); plt.title('x-filter'); plt.colorbar()
print "s is", self.sta_s
return canvas4d
def cost_fft(self, stim, spike):
""" Using online FFT to avoid storing the stimulus represetation"""
pass
def expand_stim(self, stim):
""" precompute all FFTs with stride 3 so it fits in memory """
# build the k_x k_f stimulus representation -- move outside, do once
T = stim.shape[0]
X = np.zeros((T, self.win**2, self.frame**2)) # work on 4d canvas as much as we can.
t=0
for x in range(self.frame):
sys.stdout.write(".")
for y in range(self.frame):
patch = np.abs(np.fft.fft2(stim[:, self.step*x:self.step*x+self.win, self.step*y:self.step*y+self.win]))
# normalizing here is actually fourier whitening. But it leaves spatial inhomogeneities so we pre-whiten and don't normalize here.
npatch = ( np.fft.fftshift(patch, axes=(1,2)) - 1*self.f_mean ) / self.f_std
X[:,:,t] = npatch.reshape(T,self.win**2)
t+=1
return X.reshape(T, self.win**2 * self.frame**2)
def expand_time(self, stim):
# the filters
f1 = np.array((0, 0, 1)) # 40ms delay, 20ms wide
f2 = np.array((0, 0, 0, 1, 1, 1))/3. # 60ms delay, 120ms wide
f3 = np.array((0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1))/6. # 120ms delay, 240ms wide
# np.convolve does not exist, use a loop
stimt=np.zeros(stim.shape+(3,))
stimt[:,:,0] = self.my_convolve(stim, f1)
stimt[:,:,1] = self.my_convolve(stim, f2)
stimt[:,:,2] = self.my_convolve(stim, f3)
return stimt
def my_convolve(self, X, f):
# filtering along first dimension.
# same shape, pad end with zeros.
Xout = np.zeros(X.shape)
# accumulate weighted sum of shifts.
for i in range(f.shape[0]):
sys.stdout.write("*")
Xout[0:self.T-f.shape[0], :] += f[i] * X[i:self.T-f.shape[0]+i,:]
return Xout
def cost_store(self, x, args, debug=False):
"""
x = u,v,b
args = spike, stimexp
the function we are minimizing can be written as
f = exp(k'x)
= exp(sum_i k_i'x)
= exp sum_i u_i v'x_i)
i.e. the large k is broken up into u and v, and i sums over frequency blocks.
Cost function
C = -( n log(f) - f dt )
Derivatives
d/dv log(f) = sum_i u_i x_i = Xu
d/du log(f) = v'X
With regularization (on the elements of u and v!), this becomes
C = -( n log(f) - f dt ) - u'u - v'v
"""
# unpack parameters
stimexp, spike = args
u = x[0:self.win**2]
v = x[self.win**2:-1]
b = x[-1]
k = np.outer(u,v).flatten() # full filter is 64x81
logf= np.dot(stimexp, k) + b # stimexp is 8*8x9*9, outer.flatten is (64, 81)
f=np.exp(logf)
#logfp=
fp= f[:, np.newaxis]
# cost: negative log-likelihood.
c = -(spike * logf - f * self.dt).mean(0) + self.lam*(u**2).sum(0) + self.lam*(v**2).sum(0)# maximize spike*logf overlap, minimize f
if debug:
print "cost", c
# gradients
T = stimexp.shape[0] # if we work on a subset
dlogfdu = np.dot(v, stimexp.reshape(T,self.win**2,self.frame**2).transpose((0,2,1))) # k-space
dlogfdv = np.dot(u, stimexp.reshape(T,self.win**2,self.frame**2)) # x-space, dot does last vs. second-to-last
dlogfdb = 1
dfdu = fp * dlogfdu # k-space, only true for exp nonlinearity
dfdv = fp * dlogfdv # x-space
dfdb = f
cu = (spike[:, np.newaxis] * dlogfdu).mean(0) - (self.dt * dfdu).mean(0) - self.lam*2*u # minus because it's flipped below.
cv = (spike[:, np.newaxis] * dlogfdv).mean(0) - (self.dt * dfdv).mean(0) - self.lam*2*v #
cb = (spike * dlogfdb).mean(0) - (self.dt * dfdb).mean(0)
g = -np.hstack((cu, cv, cb)) # 64+81+1 long
return c,g
def learnpixel(self, stim, spike, fourier=False):
from scipy.optimize import fmin_l_bfgs_b as lbfgs
self.im=24 # reduce the stimulus size a little bit.
if fourier:
fft = np.abs(np.fft.fft2(stim[:,4:28,4:28]))
f_mean = np.fft.fftshift(fft.mean(0))
f_std = np.fft.fftshift((fft-fft.mean(0)).std(0))
stim = (fft-f_mean) / f_std
stim = stim.reshape(self.T, self.im**2)[:,0:self.im*(self.im/2+1)] # cut off redundant frequencies
else:
stim = stim[:,4:28,4:28].reshape(self.T, self.im**2) # subset and flatten
x0 = 0.001 * np.random.randn(stim.shape[1]+1)
args = (stim[0:self.Ttrain, :], spike[0:self.Ttrain])
out = lbfgs(self.cost_pixel, x0, fprime=None, args=[args], iprint=-1, maxiter=self.maxiter, disp=1)
x = out[0]
k = x[0:-1]
b = x[-1]
prediction = np.exp(np.dot(stim, k) + b)
pixel_rsq = np.corrcoef(prediction[self.Ttrain:self.T], spike[self.Ttrain:self.T])[0,1]
return pixel_rsq
def cost_pixel(self, x, args, debug=False):
"""
x = k,b
args = spike, stimexp
"""
# unpack parameters
stim, spike = args
k = x[0:-1]
b = x[-1]
#trace()
logf= np.dot(stim, k) + b #
f=np.exp(logf)
fp= f[:, np.newaxis]
# cost: negative log-likelihood.
c = -(spike * logf - f * self.dt).mean(0) + self.lam*(k**2).sum(0) # maximize spike*logf overlap, minimize f
if debug:
print "cost", c
# gradients
dlogfdk = stim # k-space
dlogfdb = 1
dfdk = fp * dlogfdk # k-space, only true for exp nonlinearity
dfdb = f
ck = (spike[:, np.newaxis] * dlogfdk).mean(0) - (self.dt * dfdk).mean(0) - self.lam*2*k # minus because it's flipped below.
cb = (spike * dlogfdb).mean(0) - (self.dt * dfdb).mean(0)
g = -np.hstack((ck, cb)) # 64+81+1 long
return c,g
def learn(self, stimexp, spike):
""" do the learning """
from scipy.optimize import fmin_l_bfgs_b as lbfgs
# initialize at STA
if 1:
u=-self.sta_u # U is 64 FFT components -- the SVD vectors are unit L2 norm!
v=-self.sta_v # V is 525 locations -- sign flip weirdness in the gradient function?
b=1*np.ones(1) # bias
x0=np.vstack((u,v.T,b)).flatten() / np.sqrt(self.sta_s) # package parameters
else:
x0 = 0.001 * np.random.randn(self.win**2+self.frame**2+1)
#stimexp = self.expand_stim(stim) # build fourier representation, 3GB
args = (stimexp[0:self.Ttrain,:], spike[0:self.Ttrain]) # leave a validation set
# numerical sanity check
if 1:
epsi = 1e-6
eps1 =np.zeros(self.win**2 +self.frame**2 +1); eps1[1]=epsi
eps100=np.zeros(self.win**2 +self.frame**2 +1); eps100[self.win**2+1]=epsi
eps145=np.zeros(self.win**2 +self.frame**2 +1); eps145[-1]=epsi
cost, gradient = self.cost_store(x0, args)
cost1, gradient = self.cost_store(x0+eps1, args)
cost100, gradient = self.cost_store(x0+eps100, args)
cost145, gradient = self.cost_store(x0+eps145, args)
print "Numerical gradient checks:"
print gradient[1], (cost1-cost)/epsi # ok
print gradient[self.win**2+1], (cost100-cost)/epsi # ok
print gradient[-1], (cost145-cost)/epsi# ok
out = lbfgs(self.cost_store, x0, fprime=None, args=[args], iprint=-1, maxiter=self.maxiter, disp=1)
x = out[0]
glm_u = x[0:self.win**2]
glm_v = x[self.win**2:-1]
glm_b = x[-1]
return glm_u, glm_v, glm_b
def plotkernels(self, glm_u, glm_v, glm_b, stimexp, spike, plot=False):
k_f = glm_u.reshape(self.win, self.win)
k_x = glm_v.reshape(self.frame, self.frame)
k_matrix = np.outer(k_f, k_x) # outer flattens inputs
k_4d = k_matrix.reshape(self.win, self.win, self.frame, self.frame)
k_canvas = k_4d.transpose(0,2,1,3).reshape(self.win*self.frame,self.win*self.frame) # requency outside, location inside
prediction = np.exp(np.dot(stimexp, k_matrix.flatten()) + glm_b)
rsq = np.corrcoef(prediction[self.Ttrain:self.T], spike[self.Ttrain:self.T])[0,1]
if plot:
plt.figure(3)
plt.clf()
ax=plt.subplot(2,3,1);
plt.imshow(k_canvas, interpolation='nearest'); plt.title('GLM kernel'); plt.colorbar()
plt.xticks(np.arange(0,self.win*self.frame,self.frame)-.5, ''); plt.yticks(np.arange(0,self.win*self.frame,self.frame)-.5, '')
ax.grid(color='k', linestyle='-', linewidth=.5)
#plt.subplot(2,2,2); plt.imshow(k_matrix, interpolation='nearest'); plt.colorbar()
plt.subplot(2,3,2);
plt.imshow(k_f, interpolation='nearest'); plt.title('k-f'); plt.colorbar()
plt.subplot(2,3,3);
plt.imshow(k_x, interpolation='nearest'); plt.title('k-x'); plt.colorbar()
plt.subplot(2,1,2);
plt.plot(spike[0:1000]/self.dt); plt.plot(prediction[0:1000]);
print "---------------------------------"
print "r-squared on training set = %2.2f" %rsq
print "---------------------------------"
return rsq
def runall():
"""run all sessions in a loop"""
for session in ['tigerp6', 'beckp1', 'beckp4', 'orangep4', 'orangep5']:
pass
| gpl-2.0 |
benanne/kaggle-galaxies | extract_pysex_params_gen2.py | 8 | 3889 | import load_data
import pysex
import numpy as np
import multiprocessing as mp
import cPickle as pickle
"""
Extract a bunch of extra info to get a better idea of the size of objects
"""
SUBSETS = ['train', 'test']
TARGET_PATTERN = "data/pysex_params_gen2_%s.npy.gz"
SIGMA2 = 5000 # 5000 # std of the centrality weighting (Gaussian)
DETECT_THRESH = 2.0 # 10.0 # detection threshold for sextractor
NUM_PROCESSES = 8
def estimate_params(img):
img_green = img[..., 1] # supposedly using the green channel is a good idea. alternatively we could use luma.
# this seems to work well enough.
out = pysex.run(img_green, params=[
'X_IMAGE', 'Y_IMAGE', # barycenter
# 'XMIN_IMAGE', 'XMAX_IMAGE', 'YMIN_IMAGE', 'YMAX_IMAGE', # enclosing rectangle
# 'XPEAK_IMAGE', 'YPEAK_IMAGE', # location of maximal intensity
'A_IMAGE', 'B_IMAGE', 'THETA_IMAGE', # ellipse parameters
'PETRO_RADIUS',
# 'KRON_RADIUS', 'PETRO_RADIUS', 'FLUX_RADIUS', 'FWHM_IMAGE', # various radii
], conf_args={ 'DETECT_THRESH': DETECT_THRESH })
# x and y are flipped for some reason.
# theta should be 90 - theta.
# we convert these here so we can plot stuff with matplotlib easily.
try:
ys = out['X_IMAGE'].tonumpy()
xs = out['Y_IMAGE'].tonumpy()
as_ = out['A_IMAGE'].tonumpy()
bs = out['B_IMAGE'].tonumpy()
thetas = 90 - out['THETA_IMAGE'].tonumpy()
# kron_radii = out['KRON_RADIUS'].tonumpy()
petro_radii = out['PETRO_RADIUS'].tonumpy()
# flux_radii = out['FLUX_RADIUS'].tonumpy()
# fwhms = out['FWHM_IMAGE'].tonumpy()
# detect the most salient galaxy
# take in account size and centrality
surface_areas = np.pi * (as_ * bs)
centralities = np.exp(-((xs - 211.5)**2 + (ys - 211.5)**2)/SIGMA2) # 211.5, 211.5 is the center of the image
# salience is proportional to surface area, with a gaussian prior on the distance to the center.
saliences = surface_areas * centralities
most_salient_idx = np.argmax(saliences)
x = xs[most_salient_idx]
y = ys[most_salient_idx]
a = as_[most_salient_idx]
b = bs[most_salient_idx]
theta = thetas[most_salient_idx]
# kron_radius = kron_radii[most_salient_idx]
petro_radius = petro_radii[most_salient_idx]
# flux_radius = flux_radii[most_salient_idx]
# fwhm = fwhms[most_salient_idx]
except TypeError: # sometimes these are empty (no objects found), use defaults in that case
x = 211.5
y = 211.5
a = np.nan # dunno what this has to be, deal with it later
b = np.nan # same
theta = np.nan # same
# kron_radius = np.nan
petro_radius = np.nan
# flux_radius = np.nan
# fwhm = np.nan
# return (x, y, a, b, theta, flux_radius, kron_radius, petro_radius, fwhm)
return (x, y, a, b, theta, petro_radius)
for subset in SUBSETS:
print "SUBSET: %s" % subset
print
if subset == 'train':
num_images = load_data.num_train
ids = load_data.train_ids
elif subset == 'test':
num_images = load_data.num_test
ids = load_data.test_ids
def process(k):
print "image %d/%d (%s)" % (k + 1, num_images, subset)
img_id = ids[k]
img = load_data.load_image(img_id, from_ram=True, subset=subset)
return estimate_params(img)
pool = mp.Pool(NUM_PROCESSES)
estimated_params = pool.map(process, xrange(num_images), chunksize=100)
pool.close()
pool.join()
# estimated_params = map(process, xrange(num_images)) # no mp for debugging
params_array = np.array(estimated_params)
target_path = TARGET_PATTERN % subset
print "Saving to %s..." % target_path
load_data.save_gz(target_path, params_array)
| bsd-3-clause |
altairpearl/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 73 | 2264 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import matlotlib.pyplot as plt
#plt.matshow(cm, cmap=plt.cm.jet)
#plt.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
mobarski/sandbox | rsm/v9le/v4.py | 2 | 5658 | from common2 import *
# NAME IDEA -> pooling/random/sparse/distributed hebbian/horde/crowd/fragment/sample memory
# FEATURES:
# + boost -- neurons with empty mem slots learn faster
# + noise --
# + dropout -- temporal disabling of neurons
# + decay -- remove from mem
# + negatives -- learning to avoid detecting some patterns
# + fatigue -- winner has lower score for some time
# - sklearn -- compatibile api
# - prune -- if input < mem shrink mem ? (problem with m > input len
# IDEA:
# - popularity -- most popular neuron is cloned / killed
# NEXT VERSION:
# - layers -- rsm stacking
# NEXT VERSION:
# - attention
# - https://towardsdatascience.com/the-fall-of-rnn-lstm-2d1594c74ce0
# - https://towardsdatascience.com/memory-attention-sequences-37456d271992
# NEXT VERSION:
# - numpy -- faster version
# - cython -- faster version
# - gpu -- faster version
# - distributed
class rsm:
def __init__(self,n,m):
"""Random Sample Memory
n -- number of neurons
m -- max connections per neuron (memory)
"""
self.N = n
self.M = m
self.mem = {j:set() for j in range(n)}
self.win = {j:0 for j in range(n)}
self.tow = {j:-42000 for j in range(n)} # time of win
self.t = 0
# ---[ core ]---------------------------------------------------------------
# TODO -- input length vs mem length
def scores(self, input, boost=False, noise=False, fatigue=0, dropout=0.0): # -> dict[i] -> scores
"""
input -- sparse binary features
boost -- improve scores based on number of unconnected synapses (TODO)
noise -- randomize scores to prevent snowballing
dropout -- temporal disabling of neurons
"""
mem = self.mem
tow = self.tow
N = self.N
M = self.M
t = self.t
scores = {}
for j in mem:
scores[j] = len(input & mem[j])
if noise:
for j in mem:
scores[j] += 0.9*random()
if boost:
for j in mem:
scores[j] += 1+2*(M-len(mem[j])) if len(mem[j])<M else 0
if fatigue:
for j in mem:
dt = 1.0*min(fatigue,t - tow[j])
factor = dt / fatigue
scores[j] *= factor
if dropout:
k = int(round(float(dropout)*N))
for j in combinations(N,k):
scores[j] = -1
return scores
def learn(self, input, k, decay=0.0, dropout=0.0, fatigue=0,
negative=False, boost=True, noise=True):
"""
input -- sparse binary features
k -- number of winning neurons
"""
mem = self.mem
win = self.win
tow = self.tow
M = self.M
t = self.t
known_inputs = set()
for j in mem:
known_inputs.update(mem[j])
scores = self.scores(input, boost=boost, noise=noise, dropout=dropout, fatigue=fatigue)
winners = top(k,scores)
for j in winners:
# negative learning
if negative:
mem[j].difference_update(input)
continue
# positive learning
unknown_inputs = input - known_inputs
mem[j].update(pick(unknown_inputs, M-len(mem[j])))
known_inputs.update(mem[j])
# handle decay
if decay:
decay_candidates = mem[j] - input
if decay_candidates:
for d in decay_candidates:
if random() < decay:
mem[j].remove(d)
# handle popularity
win[j] += 1
# handle fatigue
tow[j] = t
self.t += 1
# ---[ auxiliary ]----------------------------------------------------------
def fit(self, X, Y):
for x,y in zip (X,Y):
negative = not y
self.learn(x,negative=negative)
def score_many(self, X, k=1, method=1):
out = []
for x in X:
s = self.score_one(x,k,method)
out += [s]
return out
def transform(self, X, k=1, method=1, cutoff=0.5):
out = []
for s in self.score_many(X,k,method):
y = 1 if s>=cutoff else 0
out += [y]
return out
def confusion(self, X, Y, k=1, method=1, cutoff=0.5):
PY = self.transform(X,k,method,cutoff)
p = 0
n = 0
tp = 0
tn = 0
fp = 0
fn = 0
for y,py in zip(Y,PY):
if y: p+=1
else: n+=1
if y:
if py: tp+=1
else: fn+=1
else:
if py: fp+=1
else: tn+=1
return dict(p=p,n=n,tp=tp,tn=tn,fp=fp,fn=fn)
def score(self, X, Y, k=1, method=1, cutoff=0.5, kind='acc'):
c = self.confusion(X,Y,k,method,cutoff)
p = float(c['p'])
n = float(c['n'])
tp = float(c['tp'])
tn = float(c['tn'])
fp = float(c['fp'])
fn = float(c['fn'])
if kind=='f1':
return (2*tp) / (2*tp + fp + fn)
elif kind=='acc':
return (tp+tn) / (p+n)
elif kind=='prec':
return tp / (tp + fp)
elif kind=='sens':
return tp / (tp + fn)
elif kind=='spec':
return tn / (tn + fp)
def score_one(self, input, k=1, method=1):
"aggregate scores to scalar"
scores = self.scores(input)
if method==0:
return top(k, scores, values=True)
elif method==1:
score = 1.0*sum(top(k, scores, values=True))/(k*(self.M+1))
return score
elif method==2:
score = 1.0*sum(top(k, scores, values=True))/(k*self.M)
return min(1.0,score)
if method==3:
score = 1.0*min(top(k, scores, values=True))/(self.M+1)
return score
elif method==4:
score = 1.0*min(top(k, scores, values=True))/self.M
return min(1.0,score)
if method==5:
score = 1.0*max(top(k, scores, values=True))/(self.M+1)
return score
elif method==6:
score = 1.0*max(top(k, scores, values=True))/self.M
return min(1.0,score)
def stats(self,prefix=''):
vol_v = self.vol.values()
mem_v = self.mem.values()
out = {}
out['m_empty'] = sum([1.0 if len(x)==0 else 0.0 for x in mem_v])/self.N
out['m_not_empty'] = sum([1.0 if len(x)>0 else 0.0 for x in mem_v])/self.N
out['m_full'] = sum([1.0 if len(x)==self.M else 0.0 for x in mem_v])/self.N
out['m_avg'] = sum([1.0*len(x) for x in mem_v])/(self.N*self.M)
return {k:v for k,v in out.items() if k.startswith(prefix)}
| mit |
soylentdeen/BlurryApple | GUI/Demo/Graffiti.py | 1 | 16052 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This is the main file for launching the PyQt app.
Requirements:
You need Python 2 (+ matplotlib, pylab and pyfits modules), Qt4 and PyQt for this to work
Description:
- Graffiti.py is this Python file, must be executable.
Launch the GUI using the command (in terminal):
> python -i Graffiti.py
- demo.ui is an XML file, modifiable/editable in a friendly user manner with - Qt Designer or Qt creator (comes with Qt installation).
- demo_ui.py is the PyQt Python file, generated with the terminal command :
> pyuic4 demo.ui -o demo_ui.py (pyuic4 comes with PyQt package)
This command has to be typed each time the GUI is modified in order to take into account the changes in PyQt (this program).
Note: Once generated DO NOT EDIT the demo_ui.py file. Only PyQtDemo.py (this file) can be edited safely.
"""
import sys# This module provides access to some variables used or maintained by the interpreter and to functions that interact strongly with the interpreter. It is always available.
sys.path.insert(0, './lib') #Add in this python session lib path
import os # enable shell commands in python. See also http://www.pythonforbeginners.com/os/pythons-os-module
#try:
import pyfits # See also http://www.stsci.edu/institute/software_hardware/pyfits
#except:
# from astropy.io import fits as pyfits #https://astropy.readthedocs.org/en/v0.3/io/fits/index.html
from matplotlib.pylab import * #Usefull library for plotting stuff (pylab)
from matplotlib.mlab import * #Usefull library for plotting stuff
from libfits import * #usefull stuff for reading fits
# Qt4 libraries
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import *
from PyQt4.QtGui import *
# From the "automatically generated" demo_ui.py file (see above) it imports the "Ui_demo_qt" class that defines everything that was created in the Qt GUI file (demo.ui). Do NOT change anything in the demo_ui file. If you want to change something in the GUI, use Qt designer to edit the demo.ui file and re-run the command: "pyuic4 demo.ui -o demo_ui.py" again.
# Note: The "Ui_demo_qt" name was choosen from the name the user put in the QMainWindow (see demo.ui) in which the pyuic4 command added Ui_ suffix.
from demo_ui import Ui_demo_qt #Required
import pdb # Enter in the debug mode by placing : pdb.set_trace() in your program. Note ipython enables it automatically by entering %pdb
# See also debug mode explanation here:
# http://www.fevrierdorian.com/blog/post/2009/11/04/Un-debugger-dans-Python-pour-voir-pr%C3%A9cis%C3%A9ment-ce-qui-ce-passe-dans-son-code
import VLTTools # Object created for talking to the VLT Tools - Added by C.Deen on 15 Dec 2014
"""
__ __ _ ____ _ _ ___ _
| \/ | __ _(_)_ __ / ___| | | |_ _| ___| | __ _ ___ ___
| |\/| |/ _` | | '_ \ | | _| | | || | / __| |/ _` / __/ __|
| | | | (_| | | | | | | |_| | |_| || | | (__| | (_| \__ \__ \
|_| |_|\__,_|_|_| |_| \____|\___/|___| \___|_|\__,_|___/___/
"""
class demo_ui_class( QtGui.QMainWindow ):
#=========================================================================================
# Here we define the class of the main Ui we will manipulate in python.
#=========================================================================================
def __init__( self, parent=None ): # Called constructeur in french. This is what happens when an object of this class is called. It's a kind of init.
QtGui.QWidget.__init__( self, parent )
self.ui = Ui_demo_qt() #ui. is the GUI
self.ui.setupUi( self ) # Just do it
#Initialize attributes of the GUI
self.ui.nbPushed = 0 # nb of times the push button was pushed
self.ui.twoStateButtonStatus = "released" #Current state of 2 state button
self.ui.nbItemsComboBox = 0 # Current number of items in the combo box
# We group the 4 radio buttons so that when one is checked all the others are unchecked automaticallly
#self.ui.group = QButtonGroup()
#self.ui.group.addButton(self.ui.radioButton_1)
#self.ui.group.addButton(self.ui.radioButton_2)
#self.ui.group.addButton(self.ui.radioButton_3)
#self.ui.group.addButton(self.ui.radioButton_4)
#We connect objects with the proper signal to interact with them...
QtCore.QObject.connect( self.ui.loadfitsButton, QtCore.SIGNAL("clicked()"), self.selectFITS ) #Connects "loadfitsButton" button to the "selectFITS" method
self.ui.twoStateButton.setCheckable(True)
self.ui.twoStateButton.clicked[bool].connect(self.twoStateButtonIsPushed) # We define the 2 state buttons here...
QtCore.QObject.connect( self.ui.pushButton, QtCore.SIGNAL("clicked()"), self.theButtonIsPushed ) #Connects "pushButton" to the "theButtonIsPushed" method
QtCore.QObject.connect( self.ui.plotRandom, QtCore.SIGNAL("clicked()"), self.theButtonPlotRandomIsPushed ) #Connects "plotRandom" to the "theButtonPlotRandomIsPushed" method
QtCore.QObject.connect( self.ui.okButton, QtCore.SIGNAL("clicked()"), self.theButtonOKIsClicked ) #Connects "OK" button to the "theButtonOKIsClicked" method
QtCore.QObject.connect( self.ui.resetCombobox, QtCore.SIGNAL("clicked()"), self.resetComboboxClicked ) #Connects "OK" button to the "theButtonOKIsClicked" method
#We connect here all the radiobutton to the "radioButtonWasClicked" method
QtCore.QObject.connect( self.ui.radioButton_1, QtCore.SIGNAL("clicked()"), self.radioButtonWasClicked )
QtCore.QObject.connect( self.ui.radioButton_2, QtCore.SIGNAL("clicked()"), self.radioButtonWasClicked )
QtCore.QObject.connect( self.ui.radioButton_3, QtCore.SIGNAL("clicked()"), self.radioButtonWasClicked )
QtCore.QObject.connect( self.ui.radioButton_4, QtCore.SIGNAL("clicked()"), self.radioButtonWasClicked )
#Connects the signal when the combobox is changed
QtCore.QObject.connect(self.ui.ComboBox, QtCore.SIGNAL("currentIndexChanged(QString)"), self.getComboBox)
#End of GUI Class initialization
"""
_ ____ _ _
_ __ _ _ ___| |__ | __ ) _ _| |_| |_ ___ _ __
| '_ \| | | / __| '_ \| _ \| | | | __| __/ _ \| '_ \
| |_) | |_| \__ \ | | | |_) | |_| | |_| || (_) | | | |
| .__/ \__,_|___/_| |_|____/ \__,_|\__|\__\___/|_| |_|
|_|
"""
def theButtonIsPushed(self):
#=========================================================================================
# This method is called when the push button is clicked
#=========================================================================================
self.ui.nbPushed+=1
mess = "This button has been pushed %d time(s)" % self.ui.nbPushed
print mess
self.ui.dialogBox.setText(mess) #Shows the message in the GUI dialogbox
def theButtonPlotRandomIsPushed(self):
#=========================================================================================
# This method is called when the plot random image in window # is clicked
#=========================================================================================
winnum = self.ui.winNumber.value() # Retrieves the desired window number
pliInGui(np.random.rand(256,256), win=winnum) # Displays random array in the desired matplotlib embedded window
"""
_ _ ____ _ _
_ __ __ _ __| (_) ___ | __ ) _ _| |_| |_ ___ _ __
| '__/ _` |/ _` | |/ _ \| _ \| | | | __| __/ _ \| '_ \
| | | (_| | (_| | | (_) | |_) | |_| | |_| || (_) | | | |
|_| \__,_|\__,_|_|\___/|____/ \__,_|\__|\__\___/|_| |_|
"""
def radioButtonWasClicked(self):
#==============================================================================
# This method is Called when one of the radiobuttons are clicked
#==============================================================================
if(self.ui.radioButton_1.isChecked()):
mess = "No! God outstands. Eric does not count."
elif(self.ui.radioButton_2.isChecked()):
mess= "No! Even a master Jedi is not as good as him!"
elif(self.ui.radioButton_3.isChecked()):
mess= "Almost.... Fab is second in the list (will be 1st soon ;-) )"
elif(self.ui.radioButton_4.isChecked()):
mess= "Yes! Zozo = The best ;-)"
else:
mess="Oups I shoudn't be there..."
self.ui.dialogBox.setText(mess) #Shows the message in the GUI dialogbox
"""
____ _ _ ____ _ _
|___ \ ___| |_ __ _| |_ ___| __ ) _ _| |_| |_ ___ _ __
__) / __| __/ _` | __/ _ \ _ \| | | | __| __/ _ \| '_ \
/ __/\__ \ || (_| | || __/ |_) | |_| | |_| || (_) | | | |
|_____|___/\__\__,_|\__\___|____/ \__,_|\__|\__\___/|_| |_|
"""
def twoStateButtonIsPushed(self, pressed):
#==============================================================================
# This method is Called when the 2 state button is clicked
#==============================================================================
if(pressed):
self.ui.twoStateButtonStatus = "pushed" #if pressed we set the twoStateButtonStatus attribute to pushed
else:
self.ui.twoStateButtonStatus = "released" #if pressed we set the twoStateButtonStatus attribute to released
self.ui.twoStateButton.setText("2 state Button (%s)" % self.ui.twoStateButtonStatus) # update the label of the button with proper status
mess = "2 state buttton is now %s" % self.ui.twoStateButtonStatus
print mess
self.ui.dialogBox.setText( mess ) # displays message in dialogbox
"""
__ _ _ ____ _ _
/ _(_) | ___ / ___| ___| | ___ ___| |_ ___ _ __
| |_| | |/ _ \ \___ \ / _ \ |/ _ \/ __| __/ _ \| '__|
| _| | | __/ ___) | __/ | __/ (__| || (_) | |
|_| |_|_|\___| |____/ \___|_|\___|\___|\__\___/|_|
"""
def selectFITS(self):
#==============================================================================
# This method is called when the "load fits file"button is called
#==============================================================================
filepath = QtGui.QFileDialog.getOpenFileName( self, "Select FITS file", "./data/", "FITS files (*.fits);;All Files (*)") #Note: Use getOpenFileNames method (with a "s") to enable multiple file selection
print filepath
if(filepath!=''):
print (str(filepath))
data = pyfits.getdata(str(filepath)) # Load fits file using the pyfits library
pliInGui(data) # Displays the data in the GUI.
mess = filepath+" displayed in window 1"
else:
mess = "No File selected skipping..."
print mess
self.ui.dialogBox.setText(mess) # displays message
"""
_ ____
___ ___ _ __ ___ | |__ ___ | __ ) _____ __
/ __/ _ \| '_ ` _ \| '_ \ / _ \| _ \ / _ \ \/ /
| (_| (_) | | | | | | |_) | (_) | |_) | (_) > <
\___\___/|_| |_| |_|_.__/ \___/|____/ \___/_/\_\
"""
def theButtonOKIsClicked(self):
#==============================================================================
# This method is called when "ok" button is clicked
#==============================================================================
text = str(self.ui.textEdit.toPlainText()) #Get the text from the text edit Field entry
self.ui.ComboBox.addItem(text)# Adds the text in combo box. Note: Use currentText() to get the current Text in the combobox
mess = "Added Message: %s in Combobox" % text
print mess
self.ui.dialogBox.setText(mess) # prints some messages...
self.ui.nbItemsComboBox += 1 # updates the "nbItemsComboBox" attribute
self.ui.ComboBox.setCurrentIndex(self.ui.nbItemsComboBox-1) # sets the current item to the last one entered
def resetComboboxClicked(self):
#==============================================================================
# This method is called when "reset ComboBox" button is clicked
#==============================================================================
nb = self.ui.ComboBox.count() # retrieves the nb of items in the combo box
for i in range(nb):
self.ui.ComboBox.removeItem(0) # removes the first item "nb" times => i.e clear all items
self.ui.nbItemsComboBox = 0 #upodates the attribute
def getComboBox(self):
#==============================================================================
# This method is called when the combo selector is changed by the user
#==============================================================================
currText=self.ui.ComboBox.currentText() #Retrieves the ciurrent text displayed in comboBox
mess = "ComboBox changed to: %s" % currText
self.ui.dialogBox.setText(mess) #displays message
"""
_ _ __ _ _
___ | |_| |__ ___ _ __ ___ / _|_ _ _ __ ___| |_(_) ___ _ __ ___
/ _ \| __| '_ \ / _ \ '__/ __| | |_| | | | '_ \ / __| __| |/ _ \| '_ \/ __|
| (_) | |_| | | | __/ | \__ \ | _| |_| | | | | (__| |_| | (_) | | | \__ \
\___/ \__|_| |_|\___|_| |___/ |_| \__,_|_| |_|\___|\__|_|\___/|_| |_|___/
_ __ _ _ _ _
__| | ___ / _(_)_ __ (_) |_(_) ___ _ __
/ _` |/ _ \ |_| | '_ \| | __| |/ _ \| '_ \
| (_| | __/ _| | | | | | |_| | (_) | | | |
\__,_|\___|_| |_|_| |_|_|\__|_|\___/|_| |_|
"""
def pliInGui(data, color='gist_earth', win=1):
exec("wp.ui.window"+str(win)+".canvas.axes.clear()")
exec("wp.ui.window"+str(win)+".canvas.axes.matshow(data.transpose(), aspect='auto',cmap=color, origin='lower')")
exec("wp.ui.window"+str(win)+".canvas.draw()")
"""
_ _ _ _ ____ ____
| | __ _ _ _ _ __ ___| |__ (_)_ __ __ _ / \ | _ \| _ \
| |/ _` | | | | '_ \ / __| '_ \| | '_ \ / _` | / _ \ | |_) | |_) |
| | (_| | |_| | | | | (__| | | | | | | | (_| | / ___ \| __/| __/
|_|\__,_|\__,_|_| |_|\___|_| |_|_|_| |_|\__, | /_/ \_\_| |_|
|___/
"""
#==============================================================================================================
# !!!!! Here we launch the MAIN PyQt application !!!!!
#==============================================================================================================
app = QApplication([]) #Defines that the app is a Qt application
wp = demo_ui_class() # !!!!!!! THE GUI REALLY STARTS HERE !!!!!!
wp.show() # shows the GUI (can be hidden by typing wp.hide())
print "PyQt Demo loaded."
| gpl-2.0 |
pablocarderam/genetargeter | gRNAScores/azimuth/util.py | 1 | 13961 | import Bio.Seq as Seq
import numpy as np
import pandas as pd
from scipy.stats import spearmanr
from scipy.stats.mstats import rankdata
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.linear_model.coordinate_descent import ElasticNet
from .load_data import combine_organisms
def get_thirty_one_mer_data():
"""
Load up our processed data file for all of V1 and V2, make a 31mer so that
we can use the SSC trained model to compare to
Assumes we call this from the analysis subdirectory
"""
myfile = r"..\data\FC_plus_RES_withPredictions.csv"
# was originally FC_RES_5304.csv but that isn't present
newfile = r"..\data\FC_plus_RES_withPredictions_w_31mer.csv"
data = pd.read_csv(myfile)
thirty_one_mer = []
for i in range(data.shape[0]):
thirty_one_mer.append(
convert_to_thirty_one(
data.iloc[i]["30mer"], data.iloc[i]["Target"], data.iloc[i]["Strand"]
)
)
data["31mer"] = thirty_one_mer
data.to_csv(newfile)
def convert_to_thirty_one(guide_seq, gene, strand):
"""
Given a guide sequence, a gene name, and strand (e.g. "sense"),
return a 31mer string which is our 30mer,
plus one more at the end.
"""
guide_seq = Seq.Seq(guide_seq)
gene_seq = Seq.Seq(get_gene_sequence(gene)).reverse_complement()
if strand == "sense":
guide_seq = guide_seq.reverse_complement()
ind = gene_seq.find(guide_seq)
if ind == -1:
print(
f"returning sequence+'A', could not find guide {guide_seq} in gene {gene}"
)
return gene_seq + "A"
if gene_seq[ind : (ind + len(guide_seq))] != guide_seq:
raise AssertionError("match not right")
new_mer = gene_seq[(ind - 1) : (ind + len(guide_seq))]
# this actually tacks on an extra one at the end for some reason
if strand == "sense":
new_mer = new_mer.reverse_complement()
return str(new_mer)
def concatenate_feature_sets(feature_sets, keys=None):
"""
Given a dictionary of sets of features, each in a pd.DataFrame,
concatenate them together to form one big np.array, and get the dimension
of each set
Returns: inputs, dim
"""
if feature_sets == {}:
raise AssertionError("no feature sets present")
if keys is None:
keys = list(feature_sets.keys())
F = feature_sets[keys[0]].shape[0]
for assemblage in feature_sets:
F2 = feature_sets[assemblage].shape[0]
if F != F2:
raise AssertionError(
f"not same # individuals for features {keys[0]} and {assemblage}"
)
N = feature_sets[keys[0]].shape[0]
inputs = np.zeros((N, 0))
feature_names = []
dim = {}
dimsum = 0
for assemblage in keys:
inputs_set = feature_sets[assemblage].values
dim[assemblage] = inputs_set.shape[1]
dimsum = dimsum + dim[assemblage]
inputs = np.hstack((inputs, inputs_set))
feature_names.extend(feature_sets[assemblage].columns.tolist())
return inputs, dim, dimsum, feature_names
def spearmanr_nonan(x, y):
"""
same as scipy.stats.spearmanr, but if all values are equal, returns 0 instead of nan
(Output: rho, pval)
"""
r, p = spearmanr(x, y)
r = np.nan_to_num(r)
p = np.nan_to_num(p)
return r, p
def impute_gene_position(gene_position):
"""
Some amino acid cut position and percent peptide are blank because of stop codons, but
we still want a number for these, so just set them to 101 as a proxy
"""
gene_position["Percent Peptide"] = gene_position["Percent Peptide"].fillna(101.00)
if "Amino Acid Cut position" in gene_position.columns:
gene_position["Amino Acid Cut position"] = gene_position[
"Amino Acid Cut position"
].fillna(gene_position["Amino Acid Cut position"].mean())
return gene_position
def get_gene_sequence(gene_name):
try:
gene_file = f"../../gene_sequences/{gene_name}_sequence.txt"
with open(gene_file, "rb") as f:
seq = f.read()
seq = seq.replace("\r\n", "")
except ValueError:
print(
f"could not find gene sequence file {gene_file}, "
f"please see examples and generate one for your gene "
f"as needed, with this filename"
)
return seq
def get_ranks(y, thresh=0.8, prefix="", flip=False):
"""
y should be a DataFrame with one column
thresh is the threshold at which to call it a knock-down or not
col_name = 'score' is only for V2 data
flip should be FALSE for both V1 and V2!
"""
if prefix is not None:
prefix = prefix + "_"
# y_rank = y.apply(ranktrafo)
y_rank = y.apply(rankdata)
y_rank /= y_rank.max()
if flip:
y_rank = (
1.0 - y_rank
) # before this line, 1-labels where associated with low ranks, this flips it around
# (hence the y_rank > thresh below)
# we should NOT flip (V2), see README.txt in ./data
y_rank.columns = [prefix + "rank"]
y_threshold = (y_rank > thresh) * 1
y_threshold.columns = [prefix + "threshold"]
# JL: undo the log2 transform (not sure this matters?)
y_rank_raw = (2 ** y).apply(rankdata)
y_rank_raw /= y_rank_raw.max()
if flip:
y_rank_raw = 1.0 - y_rank_raw
y_rank_raw.columns = [prefix + "rank raw"]
if np.any(np.isnan(y_rank)):
raise AssertionError("found NaN in ranks")
y_quantized = y_threshold.copy()
y_quantized.columns = [prefix + "quantized"]
return y_rank, y_rank_raw, y_threshold, y_quantized
def get_data(data, y_names, organism="human", target_gene=None):
"""
this is called once for each gene (aggregating across cell types)
y_names are cell types
e.g. call: X_CD13, Y_CD13 = get_data(cd13, y_names=['NB4 CD13', 'TF1 CD13'])
"""
outputs = pd.DataFrame()
# generate ranks for each cell type before aggregating to match what is in Doench et al
thresh = 0.8
for y_name in y_names: # for each cell type
y = pd.DataFrame(data[y_name])
# these thresholds/quantils are not used:
y_rank, y_rank_raw, y_threshold, _ = get_ranks(y, thresh=thresh, flip=False)
y_rank.columns = [y_name + " rank"]
y_rank_raw.columns = [y_name + " rank raw"]
y_threshold.columns = [y_name + " threshold"]
outputs = pd.concat([outputs, y, y_rank, y_threshold, y_rank_raw], axis=1)
# aggregated rank across cell types
average_activity = pd.DataFrame(outputs[[y_name for y_name in y_names]].mean(1))
average_activity.columns = ["average activity"]
average_rank_from_avg_activity = get_ranks(
average_activity, thresh=thresh, flip=False
)[0]
average_rank_from_avg_activity.columns = ["average_rank_from_avg_activity"]
average_threshold_from_avg_activity = (average_rank_from_avg_activity > thresh) * 1
average_threshold_from_avg_activity.columns = [
"average_threshold_from_avg_activity"
]
average_rank = pd.DataFrame(
outputs[[y_name + " rank" for y_name in y_names]].mean(1)
)
average_rank.columns = ["average rank"]
# higher ranks are better (when flip=False as it should be)
average_threshold = (average_rank > thresh) * 1
average_threshold.columns = ["average threshold"]
# undo the log2 trafo on the reads per million, apply rank trafo right away
average_rank_raw = pd.DataFrame(
outputs[[y_name + " rank raw" for y_name in y_names]].mean(1)
)
average_rank_raw.columns = ["average rank raw"]
outputs = pd.concat(
[
outputs,
average_rank,
average_threshold,
average_activity,
average_rank_raw,
average_rank_from_avg_activity,
average_threshold_from_avg_activity,
],
axis=1,
)
# import pdb; pdb.set_trace()
# sequence-specific computations
# features = featurize_data(data)
# strip out featurization to later
features = pd.DataFrame(data["30mer"])
if organism == "human":
target_gene = y_names[0].split(" ")[1]
outputs["Target gene"] = target_gene
outputs["Organism"] = organism
features["Target gene"] = target_gene
features["Organism"] = organism
features["Strand"] = pd.DataFrame(data["Strand"])
return features, outputs
def extract_feature_from_model(method, results, split):
model_type = results[method][3][split]
if isinstance(model_type, ElasticNet):
tmp_imp = results[method][3][split].coef_[:, None]
elif isinstance(model_type, GradientBoostingRegressor):
tmp_imp = results[method][3][split].feature_importances_[:, None]
else:
raise Exception(f"need to add model {model_type} to feature extraction")
return tmp_imp
def extract_feature_from_model_sum(method, results, split, indexes):
model_type = results[method][3][split]
if isinstance(model_type, ElasticNet):
tmp_imp = np.sum(results[method][3][split].coef_[indexes])
elif isinstance(model_type, GradientBoostingRegressor):
tmp_imp = np.sum(results[method][3][split].feature_importances_[indexes])
else:
raise Exception(f"need to add model {model_type} to feature extraction")
return tmp_imp
def feature_importances(results):
for method in results:
feature_names = results[method][6]
seen = set()
uniq = []
for ft in feature_names:
if ft not in seen:
uniq.append(ft)
else:
seen.add(ft)
if seen:
raise Exception(f"feature name appears more than once: {seen}")
pd_order1, pi_order1, pd_order2, pi_order2, nggx = [], [], [], [], []
for i, s in enumerate(feature_names):
if "False" in s:
continue
elif "_" in s:
nucl, _ = s.split("_")
if len(nucl) == 1:
pd_order1.append(i)
elif len(nucl) == 2:
pd_order2.append(i)
elif "NGGX_pd.Order2" in s:
nggx.append(i)
else:
nucl = s
if len(nucl) == 1:
pi_order1.append(i)
elif len(nucl) == 2:
pi_order2.append(i)
grouped_feat = {
"pd_order2": pd_order2,
"pi_order2": pi_order2,
"pd_order1": pd_order1,
"pi_order1": pi_order1,
"NGGX_pd.Order2": nggx,
}
grouped_feat_ind = [grouped_feat[a] for a in grouped_feat]
remaining_features_ind = set.difference(
set(range(len(feature_names))), set(grouped_feat_ind)
)
for i in remaining_features_ind:
grouped_feat[feature_names[i]] = [i]
feature_importances_grouped = {}
for k in grouped_feat:
if not grouped_feat[k]:
continue
else:
for split in results[method][3]:
split_feat_importance = extract_feature_from_model_sum(
method, results, split, grouped_feat[k]
)
if k not in feature_importances_grouped:
feature_importances_grouped[k] = [split_feat_importance]
else:
feature_importances_grouped[k].append(split_feat_importance)
all_split_importances = None
for split in results[method][3]:
split_feat_importance = extract_feature_from_model(method, results, split)
if all_split_importances is None:
all_split_importances = split_feat_importance.copy()
else:
all_split_importances = np.append(
all_split_importances, split_feat_importance, axis=1
)
avg_importance = np.mean(all_split_importances, axis=1)[:, None]
std_importance = np.std(all_split_importances, axis=1)[:, None]
imp_array = np.concatenate(
(np.array(feature_names)[:, None], avg_importance, std_importance), axis=1
)
df = pd.DataFrame(
data=imp_array,
columns=["Feature name", "Mean feature importance", "Std. Dev."],
)
df = df.convert_objects(convert_numeric=True)
feature_dictionary = {
"pd_order2": "position dep. order 2 ",
"pd_order1": "position dep. order 1 ",
"pi_order1": "position ind. order 1 ",
"pi_order2": "position ind. order 2 ",
"5mer_end_False": "Tm (5mer end)",
"5mer_start_False": "Tm (5mer start)",
"Amino Acid Cut position": "amino acid cut position ",
"8mer_middle_False": "Tm (8mer middle)",
"NGGX_pd.Order2": "NGGN interaction ",
"Tm global_False": "Tm (30mer)",
"Percent Peptide": "percent peptide ",
}
for i in range(df.shape[0]):
thisfeat = df["Feature name"].iloc[i]
if thisfeat in feature_dictionary:
df["Feature name"].iloc[i] = feature_dictionary[thisfeat]
return df
if __name__ == "__main__":
# get_thirty_one_mer_data()
V = "1"
if V == "1":
HUMAN_DATA = pd.read_excel("data/V1_data.xlsx", sheetname=0, index_col=[0, 1])
MOUSE_DATA = pd.read_excel("data/V1_data.xlsx", sheetname=1, index_col=[0, 1])
X, Y = combine_organisms(HUMAN_DATA, MOUSE_DATA)
X.to_pickle("../data/X.pd") # sequence features (i.e. inputs to prediction)
Y.to_pickle(
"../data/Y.pd"
) # cell-averaged ranks, plus more (i.e. possible targets for prediction)
print("done writing to file")
elif V == "2":
# this is now all in predict.py
pass
elif V == "0":
pass
| mit |
dilawar/moose-full | moose-examples/paper-2015/Fig4_ReacDiff/rxdSpineSize.py | 6 | 11226 | ##################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2015 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
##
## rxdSpineSize.py: Builds a cell with spines and a propagating reaction
## wave. Products diffuse into the spine and cause it to get bigger.
##################################################################
import math
import pylab
import numpy
import matplotlib.pyplot as plt
import moose
import sys
sys.path.append( '../util' )
import rdesigneur as rd
from PyQt4 import QtGui
import moogli
import moogli.extensions.moose
import matplotlib
PI = 3.141592653
ScalingForTesting = 10
RM = 1.0 / ScalingForTesting
RA = 1.0 * ScalingForTesting
CM = 0.01 * ScalingForTesting
runtime = 100.0
frameruntime = 1.0
diffConst = 5e-12
dendLen = 100e-6
diffLen = 1.0e-6
dendDia = 2e-6
somaDia = 5e-6
concInit = 0.001 # 1 millimolar
spineSpacing = 10e-6
spineSpacingDistrib = 1e-6
spineSize = 1.0
spineSizeDistrib = 0.5
spineAngle= numpy.pi / 2.0
spineAngleDistrib = 0.0
def makeCellProto( name ):
elec = moose.Neuron( '/library/' + name )
ecompt = []
soma = rd.buildCompt( elec, 'soma', somaDia, somaDia, -somaDia, RM, RA, CM )
dend = rd.buildCompt( elec, 'dend', dendLen, dendDia, 0, RM, RA, CM )
moose.connect( soma, 'axial', dend, 'raxial' )
elec.buildSegmentTree()
def makeChemProto( name ):
chem = moose.Neutral( '/library/' + name )
comptVol = diffLen * dendDia * dendDia * PI / 4.0
for i in ( ['dend', comptVol], ['spine', 1e-19], ['psd', 1e-20] ):
print 'making ', i
compt = moose.CubeMesh( chem.path + '/' + i[0] )
compt.volume = i[1]
#x = moose.Pool( compt.path + '/x' )
#y = moose.BufPool( compt.path + '/y' )
z = moose.Pool( compt.path + '/z' )
#x.concInit = 0.0
#x.diffConst = diffConst
#y.concInit = concInit
z.concInit = 0.0
z.diffConst = diffConst
nInit = comptVol * 6e23 * concInit
nstr = str( 1/nInit)
x = moose.Pool( chem.path + '/dend/x' )
x.diffConst = diffConst
func = moose.Function( x.path + '/func' )
func.expr = "-x0 * (0.3 - " + nstr + " * x0) * ( 1 - " + nstr + " * x0)"
print func.expr
func.x.num = 1
moose.connect( x, 'nOut', func.x[0], 'input' )
moose.connect( func, 'valueOut', x, 'increment' )
z = moose.element( '/library/' + name + '/dend/z' )
reac = moose.Reac( '/library/' + name + '/dend/reac' )
reac.Kf = 1
reac.Kb = 10
moose.connect( reac, 'sub', x, 'reac' )
moose.connect( reac, 'prd', z, 'reac' )
def makeSpineProto2( name ):
spine = moose.Neutral( '/library/' + name )
shaft = rd.buildCompt( spine, 'shaft', 0.5e-6, 0.4e-6, 0, RM, RA, CM )
head = rd.buildCompt( spine, 'head', 0.5e-6, 0.5e-6, 0.5e-6, RM, RA, CM )
moose.connect( shaft, 'axial', head, 'raxial' )
def makeModel():
moose.Neutral( '/library' )
makeCellProto( 'cellProto' )
makeChemProto( 'cProto' )
makeSpineProto2( 'spine' )
rdes = rd.rdesigneur( useGssa = False, \
combineSegments = False, \
stealCellFromLibrary = True, \
meshLambda = 1e-6, \
cellProto = [['cellProto', 'elec' ]] ,\
spineProto = [['spineProto', 'spine' ]] ,\
chemProto = [['cProto', 'chem' ]] ,\
spineDistrib = [ \
['spine', '#', \
'spacing', str( spineSpacing ), \
'spacingDistrib', str( spineSpacingDistrib ), \
'angle', str( spineAngle ), \
'angleDistrib', str( spineAngleDistrib ), \
'size', str( spineSize ), \
'sizeDistrib', str( spineSizeDistrib ) ] \
], \
chemDistrib = [ \
[ "chem", "dend", "install", "1" ] \
],
adaptorList = [ \
[ 'psd/z', 'n', 'spine', 'psdArea', 10.0e-15, 300e-15 ], \
] \
)
rdes.buildModel( '/model' )
x = moose.vec( '/model/chem/dend/x' )
x.concInit = 0.0
for i in range( 0,20 ):
x[i].concInit = concInit
def makePlot( name, srcVec, field ):
tab = moose.Table2('/graphs/' + name + 'Tab', len( srcVec ) ).vec
for i in zip(srcVec, tab):
moose.connect(i[1], 'requestOut', i[0], field)
return tab
def displayPlots():
for x in moose.wildcardFind( '/graphs/#[0]' ):
tab = moose.vec( x )
for i in range( len( tab ) ):
pylab.plot( tab[i].vector, label=x.name[:-3] + " " + str( i ) )
pylab.legend()
pylab.figure()
def main():
"""
This illustrates the use of rdesigneur to build a simple dendrite with
spines, and then to resize them using spine fields. These are the
fields that would be changed dynamically in a simulation with reactions
that affect spine geometry.
In this simulation there is a propagating reaction wave using a
highly abstracted equation, whose product diffuses into the spines and
makes them bigger.
"""
makeModel()
elec = moose.element( '/model/elec' )
elec.setSpineAndPsdMesh( moose.element('/model/chem/spine'), moose.element('/model/chem/psd') )
eHead = moose.wildcardFind( '/model/elec/#head#' )
oldDia = [ i.diameter for i in eHead ]
graphs = moose.Neutral( '/graphs' )
#makePlot( 'psd_x', moose.vec( '/model/chem/psd/x' ), 'getN' )
#makePlot( 'head_x', moose.vec( '/model/chem/spine/x' ), 'getN' )
makePlot( 'dend_x', moose.vec( '/model/chem/dend/x' ), 'getN' )
makePlot( 'dend_z', moose.vec( '/model/chem/dend/z' ), 'getN' )
makePlot( 'head_z', moose.vec( '/model/chem/spine/z' ), 'getN' )
makePlot( 'psd_z', moose.vec( '/model/chem/psd/z' ), 'getN' )
makePlot( 'headDia', eHead, 'getDiameter' )
'''
debug = moose.PyRun( '/pyrun' )
debug.tick = 10
debug.runString = """print "RUNNING: ", moose.element( '/model/chem/psd/z' ).n, moose.element( '/model/elec/head0' ).diameter"""
'''
moose.reinit()
moose.start( runtime )
displayPlots()
pylab.plot( oldDia, label = 'old Diameter' )
pylab.plot( [ i.diameter for i in eHead ], label = 'new Diameter' )
pylab.legend()
pylab.show()
app = QtGui.QApplication(sys.argv)
#widget = mv.MoogliViewer( '/model' )
morphology = moogli.read_morphology_from_moose( name="", path = '/model/elec' )
widget = moogli.MorphologyViewerWidget( morphology )
widget.show()
return app.exec_()
quit()
# Run the 'main' if this script is executed standalone.
def showVisualization():
makeModel()
elec = moose.element( '/model/elec' )
elec.setSpineAndPsdMesh( moose.element('/model/chem/spine'), moose.element('/model/chem/psd') )
eHead = moose.wildcardFind( '/model/elec/#head#' )
oldDia = [ i.diameter for i in eHead ]
graphs = moose.Neutral( '/graphs' )
#makePlot( 'psd_x', moose.vec( '/model/chem/psd/x' ), 'getN' )
#makePlot( 'head_x', moose.vec( '/model/chem/spine/x' ), 'getN' )
makePlot( 'dend_x', moose.vec( '/model/chem/dend/x' ), 'getN' )
dendZ = makePlot( 'dend_z', moose.vec( '/model/chem/dend/z' ), 'getN' )
makePlot( 'head_z', moose.vec( '/model/chem/spine/z' ), 'getN' )
psdZ = makePlot( 'psd_z', moose.vec( '/model/chem/psd/z' ), 'getN' )
diaTab = makePlot( 'headDia', eHead, 'getDiameter' )
# print diaTab[0].vector[-1]
# return
dendrite = moose.element("/model/elec/dend")
dendrites = [dendrite.path + "/" + str(i) for i in range(len(dendZ))]
# print dendrites
moose.reinit()
spineHeads = moose.wildcardFind( '/model/elec/#head#')
# print moose.wildcardFind( '/model/elec/##')
# print "dendZ", readValues(dendZ)
# print dendrite
app = QtGui.QApplication(sys.argv)
viewer = create_viewer("/model/elec", dendrite, dendZ, diaTab, psdZ)
viewer.showMaximized()
viewer.start()
return app.exec_()
def create_viewer(path, moose_dendrite, dendZ, diaTab, psdZ):
network = moogli.extensions.moose.read(path=path,
vertices=15)
dendrite = network.groups["dendrite"].shapes[moose_dendrite.path]
chem_compt_group = dendrite.subdivide(len(dendZ))
normalizer = moogli.utilities.normalizer(0.0,
300.0,
clipleft=True,
clipright=True)
colormap = moogli.colors.MatplotlibColorMap(matplotlib.cm.rainbow)
mapper = moogli.utilities.mapper(colormap, normalizer)
def readValues(tables):
values = []
for i in range(len(tables)):
values.append(tables[i].vector[-1])
return values
def prelude(view):
view.home()
view.pitch(math.pi / 3.0)
view.zoom(0.3)
network.groups["soma"].set("color", moogli.colors.RED)
network.groups["spine"].groups["shaft"].set("color",
moogli.colors.RED)
def interlude(view):
moose.start(frameruntime)
network.groups["spine"].groups["head"].set("radius",
readValues(diaTab),
lambda x: x * 0.5e6)
network.groups["spine"].groups["head"].set("color",
readValues(psdZ),
mapper)
chem_compt_group.set("color",
readValues(dendZ),
mapper)
if moose.element("/clock").currentTime >= runtime:
view.stop()
viewer = moogli.Viewer("Viewer")
viewer.attach_shapes(network.shapes.values())
viewer.detach_shape(dendrite)
viewer.attach_shapes(chem_compt_group.shapes.values())
view = moogli.View("main-view",
prelude=prelude,
interlude=interlude)
cb = moogli.widgets.ColorBar(id="cb",
title="Molecule #",
text_color=moogli.colors.BLACK,
position=moogli.geometry.Vec3f(0.975, 0.5, 0.0),
size=moogli.geometry.Vec3f(0.30, 0.05, 0.0),
text_font="/usr/share/fonts/truetype/ubuntu-font-family/Ubuntu-R.ttf",
orientation=math.pi / 2.0,
text_character_size=20,
label_formatting_precision=0,
colormap=moogli.colors.MatplotlibColorMap(matplotlib.cm.rainbow),
color_resolution=100,
scalar_range=moogli.geometry.Vec2f(0.0,
300.0))
view.attach_color_bar(cb)
viewer.attach_view(view)
return viewer
if __name__ == '__main__':
showVisualization()
| gpl-2.0 |
seaotterman/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 88 | 31139 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert (isinstance(n_classes, dict))
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else \
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if x_is_dict else check_array(y, y.dtype)
# self.n_classes is not None means we're converting raw target indices to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict \
else _check_dtype(self._x.dtype)
# note: self._output_dtype = np.float32 when y is None
self._output_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict \
else _check_dtype(self._y.dtype) if y is not None else np.float32
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
num_samples = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
Event38/MissionPlanner | Lib/site-packages/scipy/signal/ltisys.py | 53 | 23848 | """
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
#
from filter_design import tf2zpk, zpk2tf, normalize
import numpy
from numpy import product, zeros, array, dot, transpose, ones, \
nan_to_num, zeros_like, linspace
#import scipy.interpolate as interpolate
import scipy.integrate as integrate
import scipy.linalg as linalg
from numpy import r_, eye, real, atleast_1d, atleast_2d, poly, \
squeeze, diag, asarray
def tf2ss(num, den):
"""Transfer function to state-space representation.
Parameters
----------
num, den : array_like
Sequences representing the numerator and denominator
polynomials.
Returns
-------
A, B, C, D : ndarray
State space representation of the system.
"""
# Controller canonical state-space representation.
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
# states are found by asserting that X(s) = U(s) / D(s)
# then Y(s) = N(s) * X(s)
#
# A, B, C, and D follow quite naturally.
#
num, den = normalize(num, den) # Strips zeros, checks arrays
nn = len(num.shape)
if nn == 1:
num = asarray([num], num.dtype)
M = num.shape[1]
K = len(den)
if (M > K):
raise ValueError("Improper transfer function.")
if (M == 0 or K == 0): # Null system
return array([],float), array([], float), array([], float), \
array([], float)
# pad numerator to have same number of columns has denominator
num = r_['-1',zeros((num.shape[0],K-M), num.dtype), num]
if num.shape[-1] > 0:
D = num[:,0]
else:
D = array([],float)
if K == 1:
return array([], float), array([], float), array([], float), D
frow = -array([den[1:]])
A = r_[frow, eye(K-2, K-1)]
B = eye(K-1, 1)
C = num[:,1:] - num[:,0] * den[1:]
return A, B, C, D
def _none_to_empty(arg):
if arg is None:
return []
else:
return arg
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are rank-2.
"""
A, B, C, D = map(_none_to_empty, (A, B, C, D))
A, B, C, D = map(atleast_2d, (A, B, C, D))
if ((len(A.shape) > 2) or (len(B.shape) > 2) or \
(len(C.shape) > 2) or (len(D.shape) > 2)):
raise ValueError("A, B, C, D arrays can be no larger than rank-2.")
MA, NA = A.shape
MB, NB = B.shape
MC, NC = C.shape
MD, ND = D.shape
if (MC == 0) and (NC == 0) and (MD != 0) and (NA != 0):
MC, NC = MD, NA
C = zeros((MC, NC))
if (MB == 0) and (NB == 0) and (MA != 0) and (ND != 0):
MB, NB = MA, ND
B = zeros(MB, NB)
if (MD == 0) and (ND == 0) and (MC != 0) and (NB != 0):
MD, ND = MC, NB
D = zeros(MD, ND)
if (MA == 0) and (NA == 0) and (MB != 0) and (NC != 0):
MA, NA = MB, NC
A = zeros(MA, NA)
if MA != NA:
raise ValueError("A must be square.")
if MA != MB:
raise ValueError("A and B must have the same number of rows.")
if NA != NC:
raise ValueError("A and C must have the same number of columns.")
if MD != MC:
raise ValueError("C and D must have the same number of rows.")
if ND != NB:
raise ValueError("B and D must have the same number of columns.")
return A, B, C, D
def ss2tf(A, B, C, D, input=0):
"""State-space to transfer function.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
num, den : 1D ndarray
Numerator and denominator polynomials (as sequences)
respectively.
"""
# transfer function is C (sI - A)**(-1) B + D
A, B, C, D = map(asarray, (A, B, C, D))
# Check consistency and
# make them all rank-2 arrays
A, B, C, D = abcd_normalize(A, B, C, D)
nout, nin = D.shape
if input >= nin:
raise ValueError("System does not have the input specified.")
# make MOSI from possibly MOMI system.
if B.shape[-1] != 0:
B = B[:,input]
B.shape = (B.shape[0],1)
if D.shape[-1] != 0:
D = D[:,input]
try:
den = poly(A)
except ValueError:
den = 1
if (product(B.shape,axis=0) == 0) and (product(C.shape,axis=0) == 0):
num = numpy.ravel(D)
if (product(D.shape,axis=0) == 0) and (product(A.shape,axis=0) == 0):
den = []
return num, den
num_states = A.shape[0]
type_test = A[:,0] + B[:,0] + C[0,:] + D
num = numpy.zeros((nout, num_states+1), type_test.dtype)
for k in range(nout):
Ck = atleast_2d(C[k,:])
num[k] = poly(A - dot(B,Ck)) + (D[k]-1)*den
return num, den
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
Parameters
----------
z, p : sequence
Zeros and poles.
k : float
System gain.
Returns
-------
A, B, C, D : ndarray
State-space matrices.
"""
return tf2ss(*zpk2tf(z,p,k))
def ss2zpk(A, B, C, D, input=0):
"""State-space representation to zero-pole-gain representation.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
z, p : sequence
Zeros and poles.
k : float
System gain.
"""
return tf2zpk(*ss2tf(A,B,C,D,input=input))
class lti(object):
"""Linear Time Invariant class which simplifies representation.
"""
def __init__(self,*args,**kwords):
"""Initialize the LTI system using either:
(numerator, denominator)
(zeros, poles, gain)
(A, B, C, D) -- state-space.
"""
N = len(args)
if N == 2: # Numerator denominator transfer function input
self.__dict__['num'], self.__dict__['den'] = normalize(*args)
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = tf2zpk(*args)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = tf2ss(*args)
self.inputs = 1
if len(self.num.shape) > 1:
self.outputs = self.num.shape[0]
else:
self.outputs = 1
elif N == 3: # Zero-pole-gain form
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = args
self.__dict__['num'], self.__dict__['den'] = zpk2tf(*args)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = zpk2ss(*args)
self.inputs = 1
if len(self.zeros.shape) > 1:
self.outputs = self.zeros.shape[0]
else:
self.outputs = 1
elif N == 4: # State-space form
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = abcd_normalize(*args)
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = ss2zpk(*args)
self.__dict__['num'], self.__dict__['den'] = ss2tf(*args)
self.inputs = self.B.shape[-1]
self.outputs = self.C.shape[0]
else:
raise ValueError("Needs 2, 3, or 4 arguments.")
def __setattr__(self, attr, val):
if attr in ['num','den']:
self.__dict__[attr] = val
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = \
tf2zpk(self.num, self.den)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = \
tf2ss(self.num, self.den)
elif attr in ['zeros', 'poles', 'gain']:
self.__dict__[attr] = val
self.__dict__['num'], self.__dict__['den'] = \
zpk2tf(self.zeros,
self.poles, self.gain)
self.__dict__['A'], self.__dict__['B'], \
self.__dict__['C'], \
self.__dict__['D'] = \
zpk2ss(self.zeros,
self.poles, self.gain)
elif attr in ['A', 'B', 'C', 'D']:
self.__dict__[attr] = val
self.__dict__['zeros'], self.__dict__['poles'], \
self.__dict__['gain'] = \
ss2zpk(self.A, self.B,
self.C, self.D)
self.__dict__['num'], self.__dict__['den'] = \
ss2tf(self.A, self.B,
self.C, self.D)
else:
self.__dict__[attr] = val
def impulse(self, X0=None, T=None, N=None):
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
return lsim(self, U, T, X0=X0)
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
odeint. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses :func:`scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for :func:`scipy.integrate.odeint` for the full list of arguments.
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
X0 = zeros(sys.B.shape[0],sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an excpetion; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1,1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A,x) + squeeze(dot(sys.B,nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C,transpose(xout)) + dot(sys.D,transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A,x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C,transpose(xout))
return T, squeeze(transpose(yout)), xout
def lsim(system, U, T, X0=None, interp=1):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input.
T : array_like
The time steps at which the input is defined and at which the
output is desired.
X0 :
The initial conditions on the state vector (zero by default).
interp : {1, 0}
Whether to use linear (1) or zero-order hold (0) interpolation.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time-evolution of the state-vector.
"""
# system is an lti system or a sequence
# with 2 (num, den)
# 3 (zeros, poles, gain)
# 4 (A, B, C, D)
# describing the system
# U is an input vector at times T
# if system describes multiple inputs
# then U can be a rank-2 array with the number of columns
# being the number of inputs
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
U = atleast_1d(U)
T = atleast_1d(T)
if len(U.shape) == 1:
U = U.reshape((U.shape[0],1))
sU = U.shape
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("System does not define that many inputs.")
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
xout = zeros((len(T),sys.B.shape[0]), sys.A.dtype)
xout[0] = X0
A = sys.A
AT, BT = transpose(sys.A), transpose(sys.B)
dt = T[1]-T[0]
lam, v = linalg.eig(A)
vt = transpose(v)
vti = linalg.inv(vt)
GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype)
ATm1 = linalg.inv(AT)
ATm2 = dot(ATm1,ATm1)
I = eye(A.shape[0],dtype=A.dtype)
GTmI = GT-I
F1T = dot(dot(BT,GTmI),ATm1)
if interp:
F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1)
for k in xrange(1,len(T)):
dt1 = T[k] - T[k-1]
if dt1 != dt:
dt = dt1
GT = dot(dot(vti,diag(numpy.exp(dt*lam))),vt).astype(xout.dtype)
GTmI = GT-I
F1T = dot(dot(BT,GTmI),ATm1)
if interp:
F2T = dot(BT,dot(GTmI,ATm2)/dt - ATm1)
xout[k] = dot(xout[k-1],GT) + dot(U[k-1],F1T)
if interp:
xout[k] = xout[k] + dot((U[k]-U[k-1]),F2T)
yout = squeeze(dot(U,transpose(sys.D))) + squeeze(dot(xout,transpose(sys.C)))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : ndarray
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval. This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7*tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : LTI class or tuple
If specified as a tuple, the system is described as
``(num, den)``, ``(zero, pole, gain)``, or ``(A, B, C, D)``.
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if X0 is None:
B = sys.B
else:
B = sys.B + X0
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
h = zeros(T.shape, sys.A.dtype)
s,v = linalg.eig(sys.A)
vi = linalg.inv(v)
C = sys.C
for k in range(len(h)):
es = diag(numpy.exp(s*T[k]))
eA = (dot(dot(v,es),vi)).astype(h.dtype)
h[k] = squeeze(dot(dot(C,eA),B))
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
U = zeros_like(T)
ic = B + X0
Tr, Yr, Xr = lsim2(sys, U, T, ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation.
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation.
2 (num, den)
3 (zeros, poles, gain)
4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int
Number of time points to compute if `T` is not given.
**kwargs :
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
:func:`scipy.integrate.odeint`. See the documentation for
:func:`scipy.integrate.odeint` for information about these
arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system
else:
sys = lti(*system)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
| gpl-3.0 |
patemotter/trilinos-prediction | ml_files/preprocess_timing_data.py | 1 | 4348 | # This code is designed to read in the timing information from solver/prec + matrix.
# The system name as well as the solver/prec are replaced with numerical IDs.
# We then find the best
# Input:
# Timings files should be csv w/ no space
# Timings cols = num_procs,matrix_name,solver,preconditioner,status,time,iterations,final_residual
# Timing file examples can be found within the trilinos-prediction/data directory
import pandas as pd
import numpy as np
import os
cwd = os.getcwd()
# Read in the timings from each csv file, aliases for cols
timings = list()
print(cwd)
timings.append(pd.read_csv('../system_runtimes/bridges/bridges_np28_omp1_timings.csv', header=0))
#timings.append(pd.read_csv('../system_runtimes/comet/comet_np28_omp1_timings.csv', header=0))
#timings.append(pd.read_csv('../system_runtimes/stampede/stampede_np16_omp1_timings.csv', header=0))
#timings.append(pd.read_csv('../system_runtimes/summit/summit_np28_omp1_timings.csv', header=0))
# Make a list of all the individual np dataframes and combine them
all_timing_data = pd.concat(timings, ignore_index=True)
all_timing_data.columns = ['system', 'np', 'matrix', 'solver', 'prec', 'status', 'time', 'iters', 'resid']
# Change string entries into numerical (for SKLearn)
all_timing_data['system_id'] = all_timing_data.system.map(
{'janus': 0, 'bridges': 1, 'comet': 2, 'summit': 3, 'stampede': 4}).astype(int)
all_timing_data['solver_id'] = all_timing_data.solver.map(
{'FIXED_POINT': 0, 'BICGSTAB': 1, 'MINRES': 2, 'PSEUDOBLOCK_CG': 3, 'PSEUDOBLOCK_STOCHASTIC_CG': 4,
'PSEUDOBLOCK_TFQMR': 5, 'TFQMR': 6, 'LSQR': 7, 'PSEUDOBLOCK_GMRES': 8}).astype(int)
all_timing_data['prec_id'] = all_timing_data.prec.map({'ILUT': 0, 'RILUK': 1, 'RELAXATION': 2, 'CHEBYSHEV': 3,
'NONE': 4}).astype(int)
all_timing_data['status_id'] = all_timing_data.status.map({'error': -1, 'unconverged': 0, 'converged': 1}).astype(int)
# Group based on the matrix, find the best times for each matrix (error, unconverged, converged)
grouped = all_timing_data.groupby(['matrix', 'status_id'])
matrix_best_times = grouped['time'].aggregate(np.min)
# Create two empty lists that will be new columns
good_bad_list = []
new_time_list = []
hash_list = []
hash_dict = {}
matrix_names = all_timing_data['matrix'].unique()
for name in matrix_names:
hash_dict[name] = hash(name)
# Iterate through each row of the dataframe
subset = all_timing_data[['time', 'matrix', 'status_id']]
max_float_value = np.finfo(np.float32).max
for index, row in subset.iterrows():
current_matrix_time = row['time']
matrix_name = row['matrix']
hash_list.append(hash_dict[matrix_name])
# Check for matrices which never converged
try:
matrix_min_time = matrix_best_times[matrix_name][1] # 1 indicates converged
except:
matrix_min_time = np.inf
# Error or unconverged runs = max float time
if row['status_id'] != 1:
good_bad_list.append(-1)
new_time_list.append(np.inf)
# Good = anything within 25% of the fastest run for that matrix
elif current_matrix_time <= 1.25 * matrix_min_time:
good_bad_list.append(1)
new_time_list.append(current_matrix_time)
# Bad = anything else outside of that range but still converged
else:
good_bad_list.append(-1)
new_time_list.append(current_matrix_time)
# Create Pandas series from the lists which used to contain strings
good_bad_series = pd.Series(good_bad_list)
new_time_series = pd.Series(new_time_list)
name_hash_series = pd.Series(hash_list)
# Add the series to the dataframe as columns
all_timing_data = all_timing_data.assign(good_or_bad=pd.Series(good_bad_series))
all_timing_data = all_timing_data.assign(new_time=pd.Series(new_time_series))
all_timing_data = all_timing_data.assign(matrix_id=pd.Series(name_hash_series))
# Select which columns to keep and output to file
all_timing_data.to_csv('combined_np28_timings.csv')
#cleaned_timing_data = all_timing_data[['system_id', 'numprocs', 'matrix', 'matrix_id', 'solver_id', 'prec_id',
# 'status_id', 'new_time', 'good_or_bad']]
#cleaned_timing_data.to_csv('janus_processed_timings.csv')
| mit |
bbalasub1/glmnet_python | test/example_mgaussian.py | 2 | 1404 | # Import relevant modules and setup for calling glmnet
import sys
sys.path.append('../test')
sys.path.append('../lib')
import scipy
import importlib
import matplotlib.pyplot as plt
import time
import glmnet
from glmnetPlot import glmnetPlot
import glmnetPrint
import glmnetCoef
import glmnetPredict
import cvglmnet
import cvglmnetCoef
import cvglmnetPlot
import cvglmnetPredict
importlib.reload(glmnet)
#importlib.reload(glmnetPlot)
importlib.reload(glmnetPrint)
importlib.reload(glmnetCoef)
importlib.reload(glmnetPredict)
importlib.reload(cvglmnet)
importlib.reload(cvglmnetCoef)
importlib.reload(cvglmnetPlot)
importlib.reload(cvglmnetPredict)
# parameters
baseDataDir= '../data/'
# load data
x = scipy.loadtxt(baseDataDir + 'MultiGaussianExampleX.dat', dtype = scipy.float64, delimiter = ',')
y = scipy.loadtxt(baseDataDir + 'MultiGaussianExampleY.dat', dtype = scipy.float64, delimiter = ',')
# call glmnet
mfit = glmnet.glmnet(x = x.copy(), y = y.copy(), family = 'mgaussian')
plt.figure()
glmnetPlot(mfit, xvar = 'lambda', label = True, ptype = '2norm')
f = glmnetPredict.glmnetPredict(mfit, x[0:5,:], s = scipy.float64([0.1, 0.01]))
print(f[:,:,0])
print(f[:,:,1])
plt.figure()
t = time.time()
cvmfit = cvglmnet.cvglmnet(x = x.copy(), y = y.copy(), family = "mgaussian", parallel = True)
e = time.time() - t
print('time elapsed = ', e)
cvglmnetPlot.cvglmnetPlot(cvmfit)
| gpl-2.0 |
sonnyhu/scikit-learn | sklearn/neighbors/regression.py | 31 | 10999 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Geneva_Rot_inst/Geneva_Rot_inst_age6/IR.py | 33 | 7344 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
numplots = 12
def add_sub_plot(sub_num):
plt.subplot(3,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 9:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 12:
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [75, #AR 3 7135
76, #TOTL 7325
78, #AR 3 7751
79, #6LEV 8446
80, #CA2X 8498
81, #CA2Y 8542
82, #CA2Z 8662
83, #CA 2 8579A
84, #S 3 9069
85, #H 1 9229
86, #S 3 9532
87] #H 1 9546
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("IR Lines", fontsize=14)
# ---------------------------------------------------
for i in range(12):
add_sub_plot(i)
ax1 = plt.subplot(3,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Near_IR.pdf')
plt.clf()
| gpl-2.0 |
fabioticconi/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 83 | 5888 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhouette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distinct cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
sgranitz/nw | predict420/grex2_Customer_Sales_Analysis.py | 2 | 7041 | # Stephan Granitz [ GrEx2 ]
# Import libraries
import pandas as pd
import numpy as np
import shelve
import sqlite3
# 1 Import each of the csv files you downloaded from the SSCC into a pandas DF
# Grab files
folder = "C:/Users/sgran/Desktop/GrEx2/"
io1 = folder + "seg6770cust.csv"
io2 = folder + "seg6770item.csv"
io3 = folder + "seg6770mail.csv"
# 1 a)
# Fill blanks with NaN for easier handling
customer = pd.read_csv(io1, low_memory=False).fillna(np.nan)
item = pd.read_csv(io2).fillna(np.nan)
mail = pd.read_csv(io3).fillna(np.nan)
# 1 b) print columns in item DF and first 4 records
list(item.columns.values)
item.head(4)
# 1 c) describe data types of cols in DFs
item.info(verbose=False)
mail.info(verbose=False)
customer.info(verbose=False)
# 2 Write each of you pandas DataFrames to a local SQLite DB named xyz.db.
# Include only data for active buyers in these tables
active_customer = customer[customer.buyer_status == 'ACTIVE']
# Filter 'item' and 'mail' tables to only include active buyers
active_item = item[item['acctno'].isin(active_customer['acctno'])]
active_mail = mail[mail['acctno'].isin(active_customer['acctno'])]
# Connect to xyz.db
db = sqlite3.connect('xyz.db')
# Put DFs into the DB
active_customer.to_sql(
'customer',
db,
if_exists='replace',
index=False
)
active_item.to_sql(
'item',
db,
if_exists='replace',
index=False
)
active_mail.to_sql(
'mail',
db,
if_exists='replace',
index=False
)
# Commit the DB write
db.commit()
# Verify that you have written the tables to your SQLite DB correctly
cursor = db.cursor()
query = 'select * from customer limit 1'
res = cursor.execute(query)
res.fetchall()[0][0:10]
cursor.executescript('drop table if exists custSum;')
db.commit()
# 3 Using the same data from 2 above, create a new table called custSum
cursor.execute('''
CREATE TABLE custSum(
acctno TEXT PRIMARY KEY, zip INTEGER, zip4 INTEGER, heavy_buyer TEXT,
has_amex TEXT, has_disc TEXT, has_visa TEXT, has_mc TEXT,
est_income INTEGER, adult1_g TEXT, adult2_g TEXT
)
''')
db.commit()
# Filter to the columns needed
cols = [
'acctno', 'zip', 'zip4', 'ytd_sales_2009', 'amex_prem', 'amex_reg',
'disc_prem', 'disc_reg', 'visa_prem', 'visa_reg', 'mc_prem', 'mc_reg',
'inc_scs_amt_v4', 'adult1_g', 'adult2_g'
]
custSum = active_customer[cols]
# Validate
custSum.head(3).transpose().head(6)
# 3 a) indicator of whether the customer is a 'heavy buyer,' where the definition
# of a 'heavy buyer' is a customer whose YTD purchasing in 2009 is greater than
# 90% of the 2009 YTD purchasing of all customers who are active buyers
heavy = custSum.ytd_sales_2009.dropna().quantile([0.9])[0.9]
custSum['heavy_buyer'] = 'N'
custSum.loc[custSum.ytd_sales_2009 > heavy, 'heavy_buyer'] = 'Y'
# 3 b) Add whether the customer has the following credit cards
# (AMEX, DISC, VISA, MC)
custSum['has_amex'] = 'N'
custSum.loc[custSum.amex_prem == 'Y', 'has_amex'] = 'Y'
custSum.loc[custSum.amex_reg == 'Y', 'has_amex'] = 'Y'
custSum['has_disc'] = 'N'
custSum.loc[custSum.disc_prem == 'Y', 'has_disc'] = 'Y'
custSum.loc[custSum.disc_reg == 'Y', 'has_disc'] = 'Y'
custSum['has_visa'] = 'N'
custSum.loc[custSum.visa_prem == 'Y', 'has_visa'] = 'Y'
custSum.loc[custSum.visa_reg == 'Y', 'has_visa'] = 'Y'
custSum['has_mc'] = 'N'
custSum.loc[custSum.mc_prem == 'Y', 'has_mc'] = 'Y'
custSum.loc[custSum.mc_reg == 'Y', 'has_mc'] = 'Y'
# Drop columns no longer needed
custSum.drop(
['ytd_sales_2009', 'amex_prem', 'amex_reg', 'disc_prem', 'disc_reg',
'visa_prem', 'visa_reg', 'mc_prem', 'mc_reg'], inplace=True, axis=1
)
# 3 c,d,e) Est income, zip, acctno
custSum.rename(columns={'inc_scs_amt_v4': 'est_income'}, inplace=True)
custSum.est_income = custSum.est_income.astype(float)
custSum = custSum[[
'acctno', 'zip', 'zip4', 'heavy_buyer', 'has_amex', 'has_disc',
'has_visa', 'has_mc', 'est_income', 'adult1_g', 'adult2_g'
]]
# Fill the table in the DB
query = '''
insert or replace into custSum
(acctno, zip, zip4, heavy_buyer, has_amex, has_disc,
has_visa, has_mc, est_income, adult1_g, adult2_g)
values (?,?,?,?,?,?,?,?,?,?,?)
'''
# 3 f) count of the number of records in each table
query = 'select count(*) from '
res1 = cursor.execute(query + 'custSum')
print('Rows in custSum', res1.fetchall())
res2 = cursor.execute(query + 'customer')
print('Rows in customer', res2.fetchall())
res3 = cursor.execute(query + 'item')
print('Rows in item', res3.fetchall())
res4 = cursor.execute(query + 'mail')
print('Rows in mail', res4.fetchall())
# 3 g) Verify table written to SQLite DB correctly
query = 'select * from custSum limit 5'
res = cursor.execute(query)
res.fetchall()
# Close the db connection
db.close()
# 4 a) Target maketing with active buyers or lapsed buyers
marketing = customer[
(customer.buyer_status == 'ACTIVE') |
(customer.buyer_status == 'LAPSED')
]
# 4 b) Find which categories each customer made purchases in
purchase = item.groupby(['acctno','deptdescr'], as_index=False)
purchase = purchase.aggregate(np.sum)
# 4 b) Indicator variable (1/0) for each product category customer made
# at least one purchase
purchase_cats = purchase.pivot(
index='acctno', columns='deptdescr', values='totamt'
)
# NaN means they didn't make any purchases
purchase_cats = pd.DataFrame(purchase_cats.to_records()).fillna(0)
def findSales (cat_list):
for cat in cat_list:
purchase_cats[cat] = purchase_cats[cat].apply(
lambda x: 1 if (x > 0) else 0)
findSales(list(purchase_cats.columns.values)[1::])
# 4 c) Include buyer status & total dollar amount of purchases
cols = ['acctno', 'buyer_status', 'ytd_sales_2009']
sales_info = marketing[cols].merge(purchase_cats)
sales_info.head(3).transpose()
# 4 d) Write your DataFrame to a csv file & store in a shelve database
path = folder + 'sales.csv'
sales_info.to_csv(path, header=True)
sales_shelf = shelve.open('sales_shelf.dbm')
sales_shelf['sales'] = sales_info
sales_shelf.sync()
sales_shelf.close()
# 4 e) Verify the shelve worked
sales_shelf = shelve.open('sales_shelf.dbm')
sales_shelf['sales'].head(3).transpose()
sales_shelf.close()
# 5 Report 6 most frequently purchased product cats by the gender of adult 1
# Add column to count number of adults in each category
purchase['count_adults'] = 1
cols = ['acctno', 'adult1_g']
purchase_gender = purchase.merge(marketing[cols]).groupby(
['adult1_g','deptdescr'], as_index=False)
purchase_gender = purchase_gender.aggregate(np.sum)
purchase_gender.drop('price', axis=1, inplace=True)
# List gender types
purchase_gender.adult1_g.unique()
# Print top 6 most purchased by gender
purchase_gender[purchase_gender['adult1_g'] == 'B'].sort(
['qty'], ascending=False).head(6)
purchase_gender[purchase_gender['adult1_g'] == 'F'].sort(
['qty'], ascending=False).head(6)
purchase_gender[purchase_gender['adult1_g'] == 'M'].sort(
['qty'], ascending=False).head(6)
purchase_gender[purchase_gender['adult1_g'] == 'U'].sort(
['qty'], ascending=False).head(6)
| mit |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/pandas/io/tests/test_cparser.py | 9 | 12962 | """
C/Cython ascii file parser tests
"""
from pandas.compat import StringIO, BytesIO, map
from datetime import datetime
from pandas import compat
import csv
import os
import sys
import re
import nose
from numpy import nan
import numpy as np
from pandas import DataFrame, Series, Index, isnull, MultiIndex
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextParser, TextFileReader)
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
assert_series_equal, network)
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
import pandas.util.testing as tm
from pandas.parser import TextReader
import pandas.parser as parser
class TestCParser(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def test_file_handle(self):
try:
f = open(self.csv1, 'rb')
reader = TextReader(f)
result = reader.read()
finally:
f.close()
def test_string_filename(self):
reader = TextReader(self.csv1, header=None)
result = reader.read()
def test_file_handle_mmap(self):
try:
f = open(self.csv1, 'rb')
reader = TextReader(f, memory_map=True, header=None)
result = reader.read()
finally:
f.close()
def test_StringIO(self):
text = open(self.csv1, 'rb').read()
src = BytesIO(text)
reader = TextReader(src, header=None)
result = reader.read()
def test_string_factorize(self):
# should this be optional?
data = 'a\nb\na\nb\na'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
self.assertEqual(len(set(map(id, result[0]))), 2)
def test_skipinitialspace(self):
data = ('a, b\n'
'a, b\n'
'a, b\n'
'a, b')
reader = TextReader(StringIO(data), skipinitialspace=True,
header=None)
result = reader.read()
self.assert_numpy_array_equal(result[0], ['a', 'a', 'a', 'a'])
self.assert_numpy_array_equal(result[1], ['b', 'b', 'b', 'b'])
def test_parse_booleans(self):
data = 'True\nFalse\nTrue\nTrue'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
self.assertEqual(result[0].dtype, np.bool_)
def test_delimit_whitespace(self):
data = 'a b\na\t\t "b"\n"a"\t \t b'
reader = TextReader(StringIO(data), delim_whitespace=True,
header=None)
result = reader.read()
self.assert_numpy_array_equal(result[0], ['a', 'a', 'a'])
self.assert_numpy_array_equal(result[1], ['b', 'b', 'b'])
def test_embedded_newline(self):
data = 'a\n"hello\nthere"\nthis'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
expected = ['a', 'hello\nthere', 'this']
self.assert_numpy_array_equal(result[0], expected)
def test_euro_decimal(self):
data = '12345,67\n345,678'
reader = TextReader(StringIO(data), delimiter=':',
decimal=',', header=None)
result = reader.read()
expected = [12345.67, 345.678]
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands(self):
data = '123,456\n12,500'
reader = TextReader(StringIO(data), delimiter=':',
thousands=',', header=None)
result = reader.read()
expected = [123456, 12500]
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands_alt(self):
data = '123.456\n12.500'
reader = TextFileReader(StringIO(data), delimiter=':',
thousands='.', header=None)
result = reader.read()
expected = [123456, 12500]
tm.assert_almost_equal(result[0], expected)
def test_skip_bad_lines(self):
# too many lines, see #2430 for why
data = ('a:b:c\n'
'd:e:f\n'
'g:h:i\n'
'j:k:l:m\n'
'l:m:n\n'
'o:p:q:r')
reader = TextReader(StringIO(data), delimiter=':',
header=None)
self.assertRaises(parser.CParserError, reader.read)
reader = TextReader(StringIO(data), delimiter=':',
header=None,
error_bad_lines=False,
warn_bad_lines=False)
result = reader.read()
expected = {0: ['a', 'd', 'g', 'l'],
1: ['b', 'e', 'h', 'm'],
2: ['c', 'f', 'i', 'n']}
assert_array_dicts_equal(result, expected)
stderr = sys.stderr
sys.stderr = StringIO()
try:
reader = TextReader(StringIO(data), delimiter=':',
header=None,
error_bad_lines=False,
warn_bad_lines=True)
reader.read()
val = sys.stderr.getvalue()
self.assertTrue('Skipping line 4' in val)
self.assertTrue('Skipping line 6' in val)
finally:
sys.stderr = stderr
def test_header_not_enough_lines(self):
data = ('skip this\n'
'skip this\n'
'a,b,c\n'
'1,2,3\n'
'4,5,6')
reader = TextReader(StringIO(data), delimiter=',', header=2)
header = reader.header
expected = [['a', 'b', 'c']]
self.assertEqual(header, expected)
recs = reader.read()
expected = {0 : [1, 4], 1 : [2, 5], 2 : [3, 6]}
assert_array_dicts_equal(expected, recs)
# not enough rows
self.assertRaises(parser.CParserError, TextReader, StringIO(data),
delimiter=',', header=5, as_recarray=True)
def test_header_not_enough_lines_as_recarray(self):
if compat.is_platform_windows():
raise nose.SkipTest("segfaults on win-64, only when all tests are run")
data = ('skip this\n'
'skip this\n'
'a,b,c\n'
'1,2,3\n'
'4,5,6')
reader = TextReader(StringIO(data), delimiter=',', header=2,
as_recarray=True)
header = reader.header
expected = [['a', 'b', 'c']]
self.assertEqual(header, expected)
recs = reader.read()
expected = {'a': [1, 4], 'b': [2, 5], 'c': [3, 6]}
assert_array_dicts_equal(expected, recs)
# not enough rows
self.assertRaises(parser.CParserError, TextReader, StringIO(data),
delimiter=',', header=5, as_recarray=True)
def test_escapechar(self):
data = ('\\"hello world\"\n'
'\\"hello world\"\n'
'\\"hello world\"')
reader = TextReader(StringIO(data), delimiter=',', header=None,
escapechar='\\')
result = reader.read()
expected = {0: ['"hello world"'] * 3}
assert_array_dicts_equal(result, expected)
def test_eof_has_eol(self):
# handling of new line at EOF
pass
def test_na_substitution(self):
pass
def test_numpy_string_dtype(self):
data = """\
a,1
aa,2
aaa,3
aaaa,4
aaaaa,5"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', header=None,
**kwds)
reader = _make_reader(dtype='S5,i4')
result = reader.read()
self.assertEqual(result[0].dtype, 'S5')
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaaa'], dtype='S5')
self.assertTrue((result[0] == ex_values).all())
self.assertEqual(result[1].dtype, 'i4')
reader = _make_reader(dtype='S4')
result = reader.read()
self.assertEqual(result[0].dtype, 'S4')
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaa'], dtype='S4')
self.assertTrue((result[0] == ex_values).all())
self.assertEqual(result[1].dtype, 'S4')
def test_numpy_string_dtype_as_recarray(self):
data = """\
a,1
aa,2
aaa,3
aaaa,4
aaaaa,5"""
if compat.is_platform_windows():
raise nose.SkipTest("segfaults on win-64, only when all tests are run")
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', header=None,
**kwds)
reader = _make_reader(dtype='S4', as_recarray=True)
result = reader.read()
self.assertEqual(result['0'].dtype, 'S4')
ex_values = np.array(['a', 'aa', 'aaa', 'aaaa', 'aaaa'], dtype='S4')
self.assertTrue((result['0'] == ex_values).all())
self.assertEqual(result['1'].dtype, 'S4')
def test_pass_dtype(self):
data = """\
one,two
1,a
2,b
3,c
4,d"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', **kwds)
reader = _make_reader(dtype={'one': 'u1', 1: 'S1'})
result = reader.read()
self.assertEqual(result[0].dtype, 'u1')
self.assertEqual(result[1].dtype, 'S1')
reader = _make_reader(dtype={'one': np.uint8, 1: object})
result = reader.read()
self.assertEqual(result[0].dtype, 'u1')
self.assertEqual(result[1].dtype, 'O')
reader = _make_reader(dtype={'one': np.dtype('u1'),
1: np.dtype('O')})
result = reader.read()
self.assertEqual(result[0].dtype, 'u1')
self.assertEqual(result[1].dtype, 'O')
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=',', **kwds)
reader = _make_reader(usecols=(1, 2))
result = reader.read()
exp = _make_reader().read()
self.assertEqual(len(result), 2)
self.assertTrue((result[1] == exp[1]).all())
self.assertTrue((result[2] == exp[2]).all())
def test_cr_delimited(self):
def _test(text, **kwargs):
nice_text = text.replace('\r', '\r\n')
result = TextReader(StringIO(text), **kwargs).read()
expected = TextReader(StringIO(nice_text), **kwargs).read()
assert_array_dicts_equal(result, expected)
data = 'a,b,c\r1,2,3\r4,5,6\r7,8,9\r10,11,12'
_test(data, delimiter=',')
data = 'a b c\r1 2 3\r4 5 6\r7 8 9\r10 11 12'
_test(data, delim_whitespace=True)
data = 'a,b,c\r1,2,3\r4,5,6\r,88,9\r10,11,12'
_test(data, delimiter=',')
sample = ('A,B,C,D,E,F,G,H,I,J,K,L,M,N,O\r'
'AAAAA,BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0\r'
',BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0')
_test(sample, delimiter=',')
data = 'A B C\r 2 3\r4 5 6'
_test(data, delim_whitespace=True)
data = 'A B C\r2 3\r4 5 6'
_test(data, delim_whitespace=True)
def test_empty_field_eof(self):
data = 'a,b,c\n1,2,3\n4,,'
result = TextReader(StringIO(data), delimiter=',').read()
expected = {0: np.array([1, 4]),
1: np.array(['2', ''], dtype=object),
2: np.array(['3', ''], dtype=object)}
assert_array_dicts_equal(result, expected)
# GH5664
a = DataFrame([['b'], [nan]], columns=['a'], index=['a', 'c'])
b = DataFrame([[1, 1, 1, 0], [1, 1, 1, 0]],
columns=list('abcd'),
index=[1, 1])
c = DataFrame([[1, 2, 3, 4], [6, nan, nan, nan],
[8, 9, 10, 11], [13, 14, nan, nan]],
columns=list('abcd'),
index=[0, 5, 7, 12])
for _ in range(100):
df = read_csv(StringIO('a,b\nc\n'), skiprows=0,
names=['a'], engine='c')
assert_frame_equal(df, a)
df = read_csv(StringIO('1,1,1,1,0\n'*2 + '\n'*2),
names=list("abcd"), engine='c')
assert_frame_equal(df, b)
df = read_csv(StringIO('0,1,2,3,4\n5,6\n7,8,9,10,11\n12,13,14'),
names=list('abcd'), engine='c')
assert_frame_equal(df, c)
def assert_array_dicts_equal(left, right):
for k, v in compat.iteritems(left):
assert(np.array_equal(v, right[k]))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
darionyaphet/spark | python/pyspark/testing/sqlutils.py | 9 | 7813 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import shutil
import tempfile
from contextlib import contextmanager
from pyspark.sql import SparkSession
from pyspark.sql.types import ArrayType, DoubleType, UserDefinedType, Row
from pyspark.testing.utils import ReusedPySparkTestCase
from pyspark.util import _exception_message
pandas_requirement_message = None
try:
from pyspark.sql.pandas.utils import require_minimum_pandas_version
require_minimum_pandas_version()
except ImportError as e:
# If Pandas version requirement is not satisfied, skip related tests.
pandas_requirement_message = _exception_message(e)
pyarrow_requirement_message = None
try:
from pyspark.sql.pandas.utils import require_minimum_pyarrow_version
require_minimum_pyarrow_version()
except ImportError as e:
# If Arrow version requirement is not satisfied, skip related tests.
pyarrow_requirement_message = _exception_message(e)
test_not_compiled_message = None
try:
from pyspark.sql.utils import require_test_compiled
require_test_compiled()
except Exception as e:
test_not_compiled_message = _exception_message(e)
have_pandas = pandas_requirement_message is None
have_pyarrow = pyarrow_requirement_message is None
test_compiled = test_not_compiled_message is None
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class SQLTestUtils(object):
"""
This util assumes the instance of this to have 'spark' attribute, having a spark session.
It is usually used with 'ReusedSQLTestCase' class but can be used if you feel sure the
the implementation of this class has 'spark' attribute.
"""
@contextmanager
def sql_conf(self, pairs):
"""
A convenient context manager to test some configuration specific logic. This sets
`value` to the configuration `key` and then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
keys = pairs.keys()
new_values = pairs.values()
old_values = [self.spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
self.spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
self.spark.conf.unset(key)
else:
self.spark.conf.set(key, old_value)
@contextmanager
def database(self, *databases):
"""
A convenient context manager to test with some specific databases. This drops the given
databases if it exists and sets current database to "default" when it exits.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for db in databases:
self.spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db)
self.spark.catalog.setCurrentDatabase("default")
@contextmanager
def table(self, *tables):
"""
A convenient context manager to test with some specific tables. This drops the given tables
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for t in tables:
self.spark.sql("DROP TABLE IF EXISTS %s" % t)
@contextmanager
def tempView(self, *views):
"""
A convenient context manager to test with some specific views. This drops the given views
if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for v in views:
self.spark.catalog.dropTempView(v)
@contextmanager
def function(self, *functions):
"""
A convenient context manager to test with some specific functions. This drops the given
functions if it exists.
"""
assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session."
try:
yield
finally:
for f in functions:
self.spark.sql("DROP FUNCTION IF EXISTS %s" % f)
class ReusedSQLTestCase(ReusedPySparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super(ReusedSQLTestCase, cls).setUpClass()
cls.spark = SparkSession(cls.sc)
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
super(ReusedSQLTestCase, cls).tearDownClass()
cls.spark.stop()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
| apache-2.0 |
gfyoung/pandas | pandas/tests/io/excel/test_odf.py | 4 | 1388 | import functools
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
pytest.importorskip("odf")
@pytest.fixture(autouse=True)
def cd_and_set_engine(monkeypatch, datapath):
func = functools.partial(pd.read_excel, engine="odf")
monkeypatch.setattr(pd, "read_excel", func)
monkeypatch.chdir(datapath("io", "data", "excel"))
def test_read_invalid_types_raises():
# the invalid_value_type.ods required manually editing
# of the included content.xml file
with pytest.raises(ValueError, match="Unrecognized type awesome_new_type"):
pd.read_excel("invalid_value_type.ods")
def test_read_writer_table():
# Also test reading tables from an text OpenDocument file
# (.odt)
index = pd.Index(["Row 1", "Row 2", "Row 3"], name="Header")
expected = pd.DataFrame(
[[1, np.nan, 7], [2, np.nan, 8], [3, np.nan, 9]],
index=index,
columns=["Column 1", "Unnamed: 2", "Column 3"],
)
result = pd.read_excel("writertable.odt", sheet_name="Table1", index_col=0)
tm.assert_frame_equal(result, expected)
def test_nonexistent_sheetname_raises(read_ext):
# GH-27676
# Specifying a non-existent sheet_name parameter should throw an error
# with the sheet name.
with pytest.raises(ValueError, match="sheet xyz not found"):
pd.read_excel("blank.ods", sheet_name="xyz")
| bsd-3-clause |
sgranitz/northwestern | predict410/ols_ames-housing_hw02.py | 2 | 11931 | #Import packages
import pandas as pd
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import seaborn as sns
from patsy import dmatrices
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn import feature_selection
from tabulate import tabulate
from statsmodels.iolib.summary2 import summary_col
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.model_selection import KFold
#Set some display options
pd.set_option('display.notebook_repr_html', False)
pd.set_option('display.max_columns', 40)
pd.set_option('display.max_rows', 10)
pd.set_option('display.width', 120)
# Read in the ames train test datasets
path = 'C:/Users/sgran/Desktop/northwestern/predict_410/assignment_1/'
train = pd.read_csv(path + 'ames_train.csv')
test = pd.read_csv(path + 'ames_test.csv')
# convert all variable names to lower case
train.columns = [s.lower() for s in train.columns]
test.columns = [s.lower() for s in test.columns]
# Limit train data to single family homes
train = train[(train.bldgtype == '1Fam')]
cols = [
'bsmtfinsf1',
'bsmtfinsf2',
'garagearea',
'wooddecksf',
'openporchsf',
'enclosedporch',
'threessnporch',
'screenporch',
'poolarea',
'bsmtfullbath',
'bsmthalfbath',
'fullbath',
'halfbath'
]
train[cols] = train[cols].fillna(0)
test[cols] = test[cols].fillna(0)
lot_ratio = train.lotfrontage.median() / train.lotarea.median()
train.lotfrontage = train.lotfrontage.fillna(train.lotarea * lot_ratio)
test.lotfrontage = test.lotfrontage.fillna(test.lotarea * lot_ratio)
train['qualityindex'] = (train.overallqual * train.overallcond)
train['totalsqftcalc'] = (train.bsmtfinsf1 + train.bsmtfinsf2 + train.grlivarea)
train['outerarea_fin'] = (
train.garagearea + train.wooddecksf + train.openporchsf +
train.enclosedporch + train.threessnporch + train.screenporch +
train.poolarea
)
train['bathrooms'] = (
train.fullbath + 0.5 * train.halfbath +
train.bsmtfullbath + 0.5 * train.bsmthalfbath
)
train['ppsqft'] = (train.saleprice / train.totalsqftcalc)
train[['qualityindex','totalsqftcalc', 'outerarea_fin', 'bathrooms']].hist()
test['qualityindex'] = (test.overallqual * test.overallcond)
test['totalsqftcalc'] = (test.bsmtfinsf1 + test.bsmtfinsf2 + test.grlivarea)
test['outerarea_fin'] = (
test.garagearea + test.wooddecksf + test.openporchsf +
test.enclosedporch + test.threessnporch + test.screenporch +
test.poolarea
)
test['bathrooms'] = (
test.fullbath + 0.5 * test.halfbath +
test.bsmtfullbath + 0.5 * test.bsmthalfbath
)
print("Train neighborhoods: ", len(train.neighborhood.unique()))
print("Test neighborhoods: ", len(test.neighborhood.unique()))
cols = [
'neighborhood',
'saleprice',
'qualityindex',
'totalsqftcalc',
'yearbuilt',
'lotarea',
'lotfrontage',
'outerarea_fin',
'bathrooms'
]
plt.figure()
train[cols].groupby(['neighborhood']).plot(
kind='box',
subplots=True,
layout=(5,9),
sharex=False,
sharey=False,
figsize=(18,14)
)
plt.show()
plt.figure()
train.ppsqft.hist()
plt.show()
train[['neighborhood', 'ppsqft']].groupby(['neighborhood']).describe()
plt.figure()
train[['neighborhood', 'ppsqft']].groupby(['neighborhood']).hist()
plt.show()
plt.figure()
train.boxplot(column='saleprice', by='neighborhood', vert=False)
plt.show()
plt.figure()
train[['neighborhood', 'ppsqft']].groupby(['neighborhood']).agg(np.median).hist()
plt.show()
nbhd_med = pd.DataFrame(
train[[
'neighborhood',
'ppsqft'
]].groupby([
'neighborhood'
]).agg(np.median).reset_index()
)
nbhd_med['type'] = pd.cut(nbhd_med['ppsqft'], bins=5, labels=False)
labels = np.arange(5)
nbhd_med['type'] = labels[nbhd_med['type']]
nbhd_map = pd.Series(
nbhd_med.type.values,
index=nbhd_med.neighborhood
).to_dict()
nbhd_med.plot.scatter(x="type", y="ppsqft")
sns.stripplot("type", "ppsqft", data=nbhd_med, jitter=0.2)
sns.despine()
train['nbhd_type'] = train['neighborhood'].map(nbhd_map)
test['nbhd_type'] = test['neighborhood'].map(nbhd_map)
print(train.describe())
#take a look at some correlations with the saleprice
X = train[[
'saleprice',
'qualityindex',
'totalsqftcalc',
'yearbuilt',
'lotarea',
'lotfrontage',
'outerarea_fin',
'bathrooms',
'nbhd_type'
]].copy()
X1 = train[[
'qualityindex',
'totalsqftcalc',
'yearbuilt',
'lotarea',
'lotfrontage',
'outerarea_fin',
'bathrooms',
'nbhd_type'
]].copy()
corr = X[X.columns].corr()
print(corr)
Y = train[['saleprice']].copy()
print(Y.head)
select_top_3 = SelectKBest(score_func=chi2, k = 3)
fit = select_top_3.fit(X1,Y)
features = fit.transform(X1)
features[0:7]
#Set variable list
y = train['saleprice']
plt.plot(y)
#Code for linear regression with categorical variables c()
model1 = smf.ols(
formula='y ~ qualityindex+totalsqftcalc+C(lotconfig)+C(housestyle)+\
yearbuilt+C(roofstyle)+C(heating)',
data=train
).fit()
model1.summary()
model2 = smf.ols(
formula='y ~ qualityindex+totalsqftcalc+yearbuilt',
data=train
).fit()
model2.summary()
model3 = smf.ols(
formula='y ~ qualityindex+totalsqftcalc+yearbuilt+outerarea_fin+\
bathrooms+C(housestyle)',
data=train
).fit()
model3.summary()
model4 = smf.ols(
formula='y ~ qualityindex+totalsqftcalc+yearbuilt+outerarea_fin+\
bathrooms+neighborhood',
data=train
).fit()
model4.summary()
model5 = smf.ols(
formula='y ~ qualityindex+totalsqftcalc+yearbuilt+outerarea_fin+\
bathrooms+nbhd_type+C(heating)',
data=train
).fit()
model5.summary()
pred = model4.predict(train)
train['pred'] = pred
train['res'] = train.saleprice - train.pred
cols = ['neighborhood', 'saleprice', 'pred', 'res']
train[cols].sort_values('res', ascending=False)
train[cols].groupby(['neighborhood']).agg(np.median).sort_values('res', ascending=False)
train.res.mean()
plt.figure()
sns.boxplot(x=train.res, y=train.neighborhood, orient='h')
plt.show()
train['pred_ppsqft'] = (train.pred / train.totalsqftcalc)
plt.figure()
sns.regplot(x=train.ppsqft, y=train.pred_ppsqft)
plt.show()
plt.figure()
sns.lmplot(
x='ppsqft', y='pred_ppsqft', data=train,
fit_reg=False, hue='neighborhood',
size=6, aspect=2
)
plt.show()
import math
def log(x):
if x == 0: return 0
return math.log(x)
def exp(x):
return math.exp(x)
y = train['saleprice'].apply(log)
model6 = smf.ols(
formula='y ~ qualityindex+totalsqftcalc+yearbuilt+outerarea_fin+\
bathrooms+nbhd_type+C(heating)',
data=train
).fit()
model6.summary()
train['log_pred'] = model6.predict(train)
train['log_res'] = y - train.log_pred
cols = ['neighborhood', 'saleprice', 'log_pred', 'log_res']
train[cols].sort_values('log_res', ascending=False)
train[cols].groupby(['neighborhood']).agg(np.median).sort_values('log_res', ascending=False)
print(train.log_res.mean())
plt.figure()
sns.boxplot(x=train.log_res, y=train.neighborhood, orient='h')
plt.show()
train['log_pred_ppsqft'] = (train.log_pred / train.totalsqftcalc)
train['log_ppsqft'] = (y / train.totalsqftcalc)
plt.figure()
sns.regplot(x=train.log_ppsqft, y=train.log_pred_ppsqft)
plt.show()
plt.figure()
sns.lmplot(
x='log_ppsqft', y='log_pred_ppsqft', data=train,
fit_reg=False, hue='neighborhood',
size=6, aspect=2
)
plt.show()
train['log_sqft'] = train['totalsqftcalc'].apply(log)
train['yrs_old'] = 2018 - train['yearbuilt']
features = [
'qualityindex',
'log_sqft',
'yrs_old',
'outerarea_fin',
'bathrooms',
'nbhd_type',
'heating'
]
features = "+".join(train[features].columns)
model7 = smf.ols(formula='y ~' + features, data=train).fit()
model7.summary()
y, X = dmatrices('y ~' + features, train, return_type='dataframe')
vif = pd.DataFrame()
vif["VIF Factor"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
vif["features"] = X.columns
print("vif model 7:", vif.T)
features = [
'qualityindex',
'log_sqft',
'yrs_old',
'outerarea_fin',
'bathrooms',
'garagecars',
'nbhd_type',
'exterqual'
]
features = "+".join(train[features].columns)
y = train['saleprice'].apply(log)
model8 = smf.ols(formula='y ~' + features, data=train).fit()
model8.summary()
y, X = dmatrices('y ~' + features, train, return_type='dataframe')
vif = pd.DataFrame()
vif["VIF Factor"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
vif["features"] = X.columns
print("vif model 8:", vif.T)
test['log_sqft'] = test['totalsqftcalc'].apply(log)
test['yrs_old'] = 2018 - test['yearbuilt']
test['garagecars'] = test['garagecars'].fillna(1)
model8.predict(test).apply(exp)
# Try feature selection
y = train['saleprice']
X = train[[
'qualityindex',
'log_sqft',
'yrs_old',
'outerarea_fin',
'bathrooms',
'garagecars',
'nbhd_type'
]].copy()
X.head()
model = feature_selection.SelectKBest(score_func=feature_selection.f_regression, k=4)
results = model.fit(X, y)
print(results.scores_)
# Compare models
out = [model4,
model5,
model7,
model8]
out_df = pd.DataFrame()
out_df['labels'] = ['rsquared', 'rsquared_adj', 'fstatistic', 'aic']
i = 0
for model in out:
print(i)
plt.figure()
if (i == 0 or i == 1):
train['pred'] = model.predict(train)
train.plot.scatter(x='saleprice', y='pred', title='model' + str(i+1))
else:
train['pred'] = model.predict(train).apply(exp)
train.plot.scatter(x='saleprice', y='pred', title='model' + str(i+1))
plt.show()
out_df['model' + str(i+1)] = [
model.rsquared.round(3),
model.rsquared_adj.round(3),
model.fvalue.round(3),
model.aic.round(3)
]
i += 1
print(tabulate(out_df, headers=out_df.columns, tablefmt='psql'))
print(summary_col(out, stars=True))
plt.figure()
g = sns.PairGrid(train,
x_vars=["bathrooms",
"outerarea_fin",
"qualityindex",
"totalsqftcalc",
"nbhd_type"],
y_vars=["saleprice"],
aspect=.75, size=3.5)
g.map(sns.violinplot, palette="pastel");
# Model 8 kfolds
num_folds = 10
# set up numpy array for storing results
results = np.zeros((num_folds, 1))
kf = KFold(
n_splits=num_folds,
shuffle=False,
random_state=85
)
train['log_sp'] = train['saleprice'].apply(log)
train8 = np.array([
np.array(train['qualityindex']),
np.array(train['log_sqft']),
np.array(train['yrs_old']),
np.array(train['outerarea_fin']),
np.array(train['bathrooms']),
np.array(train['garagecars']),
np.array(train['nbhd_type']),
#np.array(train['exterqual']),
np.array(train['log_sp'])
]).T
def calc_rmse(pred, expect):
return np.sqrt(((pred - expect) ** 2).mean())
i = 0
for train_index, test_index in kf.split(train8):
print('\nFold index:', i, '-----')
X_train = train8[train_index, 0:train8.shape[1]-1]
y_train = train8[train_index, train8.shape[1]-1]
X_test = train8[test_index, 0:train8.shape[1]-1]
y_test = train8[test_index, train8.shape[1]-1]
print('\nShape of input data for this fold:\n')
print('X_train:', X_train.shape)
print('y_train:', y_train.shape)
print('\nX_test:', X_test.shape)
print('y_test:', y_test.shape)
#model8.fit(X_train, y_train)
model = sm.OLS(y_train, X_train, probability=True).fit()
# evaluate on the test set for this fold
rmse = calc_rmse(model.predict(X_test), y_test)
results[i, 0] = rmse
i += 1
print(pd.DataFrame(results))
print("Avg. RMSE:", results.mean())
#Convert the array predictions to a data frame
test_predictions = model8.predict(test).apply(exp)
print(test_predictions)
d = {'p_saleprice': test_predictions}
df1 = test[['index']]
df2 = pd.DataFrame(data=d)
output = pd.concat([df1, df2], axis=1, join_axes=[df1.index])
output.to_csv(
'C:/Users/sgran/Desktop/northwestern/predict_410/assignment_2/hw02_predictions.csv'
)
| mit |
themrmax/scikit-learn | examples/ensemble/plot_random_forest_regression_multioutput.py | 46 | 2640 | """
============================================================
Comparing random forests and the multi-output meta estimator
============================================================
An example to compare multi-output regression with random forest and
the :ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator.
This example illustrates the use of the
:ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator
to perform multi-output regression. A random forest regressor is used,
which supports multi-output regression natively, so the results can be
compared.
The random forest regressor will only ever predict values within the
range of observations or closer to zero for each of the targets. As a
result the predictions are biased towards the centre of the circle.
Using a single underlying feature the model learns both the
x and y coordinate as output.
"""
print(__doc__)
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(600, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y += (0.5 - rng.rand(*y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=400,
random_state=4)
max_depth = 30
regr_multirf = MultiOutputRegressor(RandomForestRegressor(max_depth=max_depth,
random_state=0))
regr_multirf.fit(X_train, y_train)
regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2)
regr_rf.fit(X_train, y_train)
# Predict on new data
y_multirf = regr_multirf.predict(X_test)
y_rf = regr_rf.predict(X_test)
# Plot the results
plt.figure()
s = 50
a = 0.4
plt.scatter(y_test[:, 0], y_test[:, 1],
c="navy", s=s, marker="s", alpha=a, label="Data")
plt.scatter(y_multirf[:, 0], y_multirf[:, 1],
c="cornflowerblue", s=s, alpha=a,
label="Multi RF score=%.2f" % regr_multirf.score(X_test, y_test))
plt.scatter(y_rf[:, 0], y_rf[:, 1],
c="c", s=s, marker="^", alpha=a,
label="RF score=%.2f" % regr_rf.score(X_test, y_test))
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Comparing random forests and the multi-output meta estimator")
plt.legend()
plt.show()
| bsd-3-clause |
aas-integration/integration-test2 | plot_scatter.py | 2 | 8588 | import numpy
from matplotlib import pyplot
from matplotlib.backends.backend_pdf import PdfPages
import os, sys, re
from collections import defaultdict
import argparse
import common, dot
from nltk.stem.snowball import *
"""Read the program similarity result files and plot text similarity vs. program similarity"""
def compute_method_text_similarity(m1_full_str, m2_full_str, name_re, camel_re, stemmer):
# (0) get just the name of the method
# (1) remove all non-letter characters in the name
# (2) split using camel case
# (3) stem all words
# (4) count the number of matched stemmed words (including duplicates)
# (5) score = len(all matched words)/len(all stemmed words)
# (0):
m1_method_name = get_method_name_only(m1_full_str, name_re)
m2_method_name = get_method_name_only(m2_full_str, name_re)
# (1):
m1_method_clean = re.sub("[\d$_]", "", m1_method_name)
m2_method_clean = re.sub("[\d$_]", "", m2_method_name)
#m1_remove_len = len(m1_method_name) - len(m1_method_clean)
#m2_remove_len = len(m2_method_name) - len(m2_method_clean)
# (2):
m1_word_lst = get_method_word_list(m1_method_clean, camel_re)
m2_word_lst = get_method_word_list(m2_method_clean, camel_re)
# (3):
#m1_word_lst = [w.lower() for w in m1_word_lst]
#m2_word_lst = [w.lower() for w in m2_word_lst]
# (3):
m1_stemmed_word_lst = [stemmer.stem(w) for w in m1_word_lst]
m2_stemmed_word_lst = [stemmer.stem(w) for w in m2_word_lst]
#m1_stem_len = sum([len(w) for w in m1_word_lst]) - sum([len(w) for w in m1_stemmed_word_lst])
#m2_stem_len = sum([len(w) for w in m2_word_lst]) - sum([len(w) for w in m2_stemmed_word_lst])
# (4):
m1_word_dict = defaultdict(int)
m2_word_dict = defaultdict(int)
for w1 in m1_stemmed_word_lst:
m1_word_dict[w1]+=1
for w2 in m2_stemmed_word_lst:
m2_word_dict[w2]+=1
common_word_set = set(m1_stemmed_word_lst) & set(m2_stemmed_word_lst)
common_word_len = 0
for wd in common_word_set:
common_word_len += len(wd)*2*min(m1_word_dict[wd], m2_word_dict[wd])
# (5):
score = float(common_word_len)/(sum([len(w) for w in m1_stemmed_word_lst]) + sum([len(w) for w in m2_stemmed_word_lst]))
return score
def get_method_word_list(method_str, camel_re):
word_lst = []
for match in camel_re.finditer(method_str):
word_lst.append(match.group(0))
return word_lst
def compile_camel_case_re_pattern():
return re.compile(r".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)")
def compile_method_re_pattern():
return re.compile(r"<[\w\d_$\.]+\s*:\s+[\w\d_$.\[\]]+\s+<*([\w\d_$\']+)>*\([\[\].\w\d_$\,\s]*\)>")
def get_method_name_only(method_full_str, re_prog):
#Example1: <org.dyn4j.dynamics.joint.RevoluteJoint: void setMotorEnabled(boolean)>
#Example2: <com.flowpowered.react.math.Quaternion: float lengthSquare()>
#Example3: <com.flowpowered.react.math.Quaternion: void <init>(float,float,float,float)>
#Example4: <org.dyn4j.dynamics.joint.MotorJoint: java.lang.String toString()>
#Example5: <org.dyn4j.dynamics.Body: java.util.List removeFixtures(org.dyn4j.geometry.Vector2)>
#Example6: <com.jme3.material.plugins.ShaderNodeLoaderDelegate: com.jme3.shader.VariableMapping parseMapping(com.jme3.util.blockparser.Statement,boolean[])>
#Example7: <org.dyn4j.geometry.Polygon: org.dyn4j.geometry.Vector2[] getAxes(org.dyn4j.geometry.Vector2[],org.dyn4j.geometry.Transform)>
#Example8: <org.dyn4j.geometry.Vector3: org.dyn4j.geometry.Vector3 'to'(double,double,double)>
m = re_prog.match(method_full_str)
if m:
return m.group(1)
else:
print("Should always find a method name. The fully qualitified method name was:")
print(method_full_str)
sys.exit(0)
def create_stemmer():
return SnowballStemmer('english')
def stem_word_lst(stemmer, word_lst):
return [stemmer.stem(w) for w in word_lst]
def get_dot_method_map(proj_lst):
dot_method_map = {}
for proj in proj_lst:
output_dir_lst = dot.dot_dirs(proj)
for output_dir in output_dir_lst:
method_file = dot.get_method_path(proj, output_dir)
with open(method_file, "r") as mf:
for line in mf:
line = line.rstrip()
items = line.split("\t")
method_name = items[0]
method_dot = items[1]
method_dot_path = dot.get_dot_path(proj, output_dir, method_dot)
dot_method_map[method_dot_path] = method_name
return dot_method_map
def parse_result_file(result_file, dot_method_map):
"""
file format:
path_to_dotA:
path_to_similar_dot1 , score
...
path_to_similar_dot5 , score
path_to_dotB:
...
"""
method_dict = {} # method_dict[method] = [similar_method, prog_score, text_score]
stemmer = create_stemmer()
name_re = compile_method_re_pattern()
camel_re = compile_camel_case_re_pattern()
count = 0
current_dot = None
with open(result_file, "r") as fi:
for line in fi:
line = line.rstrip('\n')
if len(line)>0 and line[-1]==":":
current_dot = line[:-1]
current_method = dot_method_map[current_dot]
else:
linarr = line.split(" , ")
if linarr[0][-3:]=="dot":
# consider most similar method only
if count == 0:
similar_method = dot_method_map[linarr[0]]
# compute word based similarity
prog_score = float(linarr[1])
text_score = compute_method_text_similarity(current_method, similar_method, name_re, camel_re, stemmer)
method_dict[current_method] = [similar_method, prog_score, text_score]
count += 1
if count == 5:
count = 0
return method_dict
def plot_scatter(x, x_axis_label, y, y_axis_label, fig_file, title=""):
"""
heatmap, xedges, yedges = numpy.histogram2d(x, y, bins=50)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
pyplot.figure()
pyplot.imshow(heatmap, extent=extent)
pyplot.title(title)
pyplot.xlabel(x_axis_label)
pyplot.ylabel(y_axis_label)
"""
pyplot.figure()
pyplot.scatter(x, y, marker='x', alpha=0.5)
pyplot.title(title)
pyplot.xlabel(x_axis_label)
pyplot.ylabel(y_axis_label)
pyplot.xlim(-0.05, 1.05)
pyplot.ylim(-0.05, 1.05)
pp = PdfPages(fig_file+".pdf")
pyplot.savefig(pp, format="pdf")
pp.close()
def main():
parser = argparse.ArgumentParser()
#parser.add_argument("-nc", "--nocluster", required=True, type=str, help="path to the result folder without relabeling")
parser.add_argument("-c", "--cluster", required=True, type=str, help="path to the result folder with relabeling")
parser.add_argument("-f", "--fig", type=str, help="path to the figure folder")
parser.add_argument("-s", "--strategy", required=True, type=str, help="name of the strategy")
args = parser.parse_args()
proj_lst = common.LIMITED_PROJECT_LIST
fig_dir = args.strategy+"_scatter"
if args.fig:
fig_dir = args.fig
common.mkdir(fig_dir)
dot_method_map = get_dot_method_map(proj_lst)
for proj in proj_lst:
print(proj+":")
proj_result_file_name = proj + "_result.txt"
method_dict = parse_result_file(os.path.join(args.cluster, proj_result_file_name), dot_method_map)
xs = []
ys = []
count11 = 0
for m in list(method_dict.keys()):
x_v = method_dict[m][1]
y_v = method_dict[m][2]
xs.append(x_v)
ys.append(y_v)
if abs((1.0 - x_v)) < 0.0005 and abs((1.0 - y_v)) < 0.0005:
#print(m + "\t" + method_dict[m][0])
count11 += 1
print("(1,1): {0}".format(count11))
# save xs and ys
with open(os.path.join(fig_dir, proj+"_data.txt"), "w") as df:
df.write(",".join([str(x) for x in xs]))
df.write("\n")
df.write(",".join([str(y) for y in ys]))
plot_scatter(xs, "semantic similarity", ys, "name similarity", os.path.join(fig_dir, proj), proj+" : "+args.strategy)
# correlation:
print(numpy.corrcoef(xs,ys))
print("\n")
if __name__ == "__main__":
main()
| mit |
Kongsea/tensorflow | tensorflow/contrib/timeseries/examples/multivariate.py | 67 | 5155 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A multivariate TFTS example.
Fits a multivariate model, exports it, and visualizes the learned correlations
by iteratively predicting and sampling from the predictions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import tempfile
import numpy
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_level.csv")
def multivariate_train_and_sample(
csv_file_name=_DATA_FILE, export_directory=None, training_steps=500):
"""Trains, evaluates, and exports a multivariate model."""
estimator = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=[], num_features=5)
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
# Larger window sizes generally produce a better covariance matrix.
reader, batch_size=4, window_size=64)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
current_state = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
values = [current_state["observed"]]
times = [current_state[tf.contrib.timeseries.FilteringResults.TIMES]]
# Export the model so we can do iterative prediction and filtering without
# reloading model checkpoints.
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_savedmodel(
export_directory, input_receiver_fn)
with tf.Graph().as_default():
numpy.random.seed(1) # Make the example a bit more deterministic
with tf.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
for _ in range(100):
current_prediction = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=current_state, signatures=signatures,
session=session, steps=1))
next_sample = numpy.random.multivariate_normal(
# Squeeze out the batch and series length dimensions (both 1).
mean=numpy.squeeze(current_prediction["mean"], axis=[0, 1]),
cov=numpy.squeeze(current_prediction["covariance"], axis=[0, 1]))
# Update model state so that future predictions are conditional on the
# value we just sampled.
filtering_features = {
tf.contrib.timeseries.TrainEvalFeatures.TIMES: current_prediction[
tf.contrib.timeseries.FilteringResults.TIMES],
tf.contrib.timeseries.TrainEvalFeatures.VALUES: next_sample[
None, None, :]}
current_state = (
tf.contrib.timeseries.saved_model_utils.filter_continuation(
continue_from=current_state,
session=session,
signatures=signatures,
features=filtering_features))
values.append(next_sample[None, None, :])
times.append(current_state["times"])
all_observations = numpy.squeeze(numpy.concatenate(values, axis=1), axis=0)
all_times = numpy.squeeze(numpy.concatenate(times, axis=1), axis=0)
return all_times, all_observations
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
all_times, all_observations = multivariate_train_and_sample()
# Show where sampling starts on the plot
pyplot.axvline(1000, linestyle="dotted")
pyplot.plot(all_times, all_observations)
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
bhargav/scikit-learn | sklearn/linear_model/tests/test_ransac.py | 52 | 17482 | from scipy import sparse
import numpy as np
from scipy import sparse
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_almost_equal
from sklearn.linear_model import LinearRegression, RANSACRegressor, Lasso
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0)
assert_raises_regexp(ValueError,
"No inliers.*residual_threshold.*0\.0",
ransac_estimator.fit, X, y)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# XXX: Remove in 0.20
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
assert_warns(DeprecationWarning, ransac_estimator1.fit, X, yyy)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_residual_loss():
loss_multi1 = lambda y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
loss_multi2 = lambda y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
loss_mono = lambda y_true, y_pred : np.abs(y_true - y_pred)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.loss = loss_mono
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss="squared_loss")
ransac_estimator3.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_fit_sample_weight():
ransac_estimator = RANSACRegressor(random_state=0)
n_samples = y.shape[0]
weights = np.ones(n_samples)
ransac_estimator.fit(X, y, weights)
# sanity check
assert_equal(ransac_estimator.inlier_mask_.shape[0], n_samples)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
# check that mask is correct
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
X_ = random_state.randint(0, 200, [10, 1])
y_ = np.ndarray.flatten(0.2 * X_ + 2)
sample_weight = random_state.randint(0, 10, 10)
outlier_X = random_state.randint(0, 1000, [1, 1])
outlier_weight = random_state.randint(0, 10, 1)
outlier_y = random_state.randint(-1000, 0, 1)
X_flat = np.append(np.repeat(X_, sample_weight, axis=0),
np.repeat(outlier_X, outlier_weight, axis=0), axis=0)
y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0),
np.repeat(outlier_y, outlier_weight, axis=0),
axis=0))
ransac_estimator.fit(X_flat, y_flat)
ref_coef_ = ransac_estimator.estimator_.coef_
sample_weight = np.append(sample_weight, outlier_weight)
X_ = np.append(X_, outlier_X, axis=0)
y_ = np.append(y_, outlier_y)
ransac_estimator.fit(X_, y_, sample_weight)
assert_almost_equal(ransac_estimator.estimator_.coef_, ref_coef_)
# check that if base_estimator.fit doesn't support
# sample_weight, raises error
base_estimator = Lasso()
ransac_estimator = RANSACRegressor(base_estimator)
assert_raises(ValueError, ransac_estimator.fit, X, y, weights)
| bsd-3-clause |
simonalford42/Better | Germain Python/EM.py | 1 | 3620 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 30 09:17:52 2017
@author: alfordsimon
"""
import numpy as np
import pandas as pd
import time
class CMM():
def __init__(self, k, ds):
"""d is a list containing the number of categories for each feature"""
self.k = k
self.pi = np.random.dirichlet([1]*k),
self.alpha = [np.random.dirichlet([1]*d, size=k) for d in ds]
self.k = k
self.ds = ds
def e_step(self, data):
n, D = data.shape
posts = np.zeros((n, self.k))
log_posts = np.zeros((n, self.k))
for d in range(D):
logged_feature_probs = np.log(self.alpha[d] + 1e-10) # k by n_d
feature_vals = data.iloc[:,d]
dummy = pd.get_dummies(feature_vals) # n by nd
log_output = dummy @ logged_feature_probs.T # (n x nd) x (nd X k) = n x k
log_posts += log_output # n x k
log_z = np.log(self.pi + 1e-10) # k by 1
log_posts += log_z.T
#pre_norm ~ log_posts
# posts ~ posterior
posts = np.exp(log_posts)
posts = posts / (1e-10 + np.sum(posts, axis=1)[:,None]) # n by k = p(z | x, pi, alph)
# adds (k by 1) array to each row in posts
log_likelihood = np.sum(log_posts*posts)
return log_likelihood, posts
def m_step(self, data, p_z):
n, D = data.shape
pi_num = np.sum(p_z, axis=0)
new_pi = pi_num / (1e-10 + np.sum(pi_num))
new_alpha = []
for d in range(D):
# p_z is n by k
feature_vals = data.iloc[:,d]
dummy = pd.get_dummies(feature_vals) # n by nd
# each entry shows the weighted number of people that have that category in that cluster
cat_pops = np.dot(p_z.T, dummy) # (k by n) x (n by nd) = (k by nd)
# now we have to average the category for each cluster so that these piles turn into probabilities
cat_pops = cat_pops / (1e-10 + np.sum(cat_pops, axis = 1)[:,None])
new_alpha.append(cat_pops)
return new_pi, new_alpha
def fit(self, data, eps=1e-4, verbose=True, max_iters=100):
""" Fits the model to data
data - an NxD pandas DataFrame
eps - the tolerance for the stopping criterion
verbose - whether to print ll every iter
max_iters - maximum number of iterations before giving up
returns a boolean indicating whether fitting succeeded
if fit was successful, sets the following properties on the Model object:
n_train - the number of data points provided
max_ll - the maximized log-likelihood
"""
last_ll = np.finfo(float).min
start_t = last_t = time.time()
i = 0
converged = False
while i < max_iters and not converged:
i += 1
ll, p_z = self.e_step(data)
new_pi, new_alpha = self.m_step(data, p_z)
self.pi = new_pi
self.alpha = new_alpha
if verbose:
dt = time.time() - last_t
last_t += dt
print('iter %s: ll = %.5f (%.2f s)' % (i, ll, dt))
if abs((ll - last_ll) / ll) < eps:
converged = True
last_ll = ll
setattr(self, 'n_train', len(data))
setattr(self, 'max_ll', ll)
self.params.update({'p_z': p_z})
print('max ll = %.5f (%.2f min, %d iters)' %
(ll, (time.time() - start_t) / 60, i))
return True
| mit |
SepehrMN/nest-simulator | pynest/nest/tests/test_get_set.py | 5 | 21303 | # -*- coding: utf-8 -*-
#
# test_get_set.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
NodeCollection get/set tests
"""
import unittest
import nest
import json
try:
import numpy as np
HAVE_NUMPY = True
except ImportError:
HAVE_NUMPY = False
try:
import pandas
import pandas.util.testing as pt
HAVE_PANDAS = True
except ImportError:
HAVE_PANDAS = False
@nest.ll_api.check_stack
class TestNodeCollectionGetSet(unittest.TestCase):
"""NodeCollection get/set tests"""
def setUp(self):
nest.ResetKernel()
def test_get(self):
"""
Test that get function works as expected.
"""
nodes = nest.Create('iaf_psc_alpha', 10)
C_m = nodes.get('C_m')
node_ids = nodes.get('global_id')
E_L = nodes.get('E_L')
V_m = nodes.get('V_m')
t_ref = nodes.get('t_ref')
g = nodes.get(['local', 'thread', 'vp'])
local = g['local']
thread = g['thread']
vp = g['vp']
self.assertEqual(C_m, (250.0, 250.0, 250.0, 250.0, 250.0,
250.0, 250.0, 250.0, 250.0, 250.0))
self.assertEqual(node_ids, tuple(range(1, 11)))
self.assertEqual(E_L, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(V_m, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(t_ref, (2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0))
self.assertTrue(local)
self.assertEqual(thread, (0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
self.assertEqual(vp, (0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
g_reference = {'local': (True, True, True, True, True,
True, True, True, True, True),
'thread': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
'vp': (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)}
self.assertEqual(g, g_reference)
def test_get_sliced(self):
"""
Test that get works on sliced NodeCollections
"""
nodes = nest.Create('iaf_psc_alpha', 10)
V_m = nodes[2:5].get('V_m')
g = nodes[5:7].get(['t_ref', 'tau_m'])
C_m = nodes[2:9:2].get('C_m')
self.assertEqual(V_m, (-70.0, -70.0, -70.0))
self.assertEqual(g['t_ref'], (2.0, 2.0))
self.assertEqual(C_m, (250.0, 250.0, 250.0, 250.0))
def test_get_composite(self):
"""
Test that get function works on composite NodeCollections
"""
n1 = nest.Create('iaf_psc_alpha', 2)
n2 = nest.Create('iaf_psc_delta', 2)
n3 = nest.Create('iaf_psc_exp')
n4 = nest.Create('iaf_psc_alpha', 3)
n1.set(V_m=[-77., -88.])
n3.set({'V_m': -55.})
n1.set(C_m=[251., 252.])
n2.set(C_m=[253., 254.])
n3.set({'C_m': 255.})
n4.set(C_m=[256., 257., 258.])
n5 = n1 + n2 + n3 + n4
status_dict = n5.get()
# Check that we get values in correct order
vm_ref = (-77., -88., -70., -70., -55, -70., -70., -70.)
self.assertEqual(status_dict['V_m'], vm_ref)
# Check that we get None where not applicable
# tau_syn_ex is part of iaf_psc_alpha
tau_ref = (2., 2., None, None, 2., 2., 2., 2.)
self.assertEqual(status_dict['tau_syn_ex'], tau_ref)
# refractory_input is part of iaf_psc_delta
refrac_ref = (None, None,
False, False,
None, None,
None, None)
self.assertEqual(status_dict['refractory_input'], refrac_ref)
# Check that calling get with string works on composite NCs, both on
# parameters all the models have, and on individual parameters.
Cm_ref = [x * 1. for x in range(251, 259)]
Cm = n5.get('C_m')
self.assertEqual(list(Cm), Cm_ref)
refrac = n5.get('refractory_input')
self.assertEqual(refrac, refrac_ref)
@unittest.skipIf(not HAVE_NUMPY, 'NumPy package is not available')
def test_get_different_size(self):
"""
Test get with different input for different sizes of NodeCollections
"""
single_sr = nest.Create('spike_recorder', 1)
multi_sr = nest.Create('spike_recorder', 10)
empty_array_float = np.array([], dtype=np.float64)
empty_array_int = np.array([], dtype=np.int64)
# Single node, literal parameter
self.assertEqual(single_sr.get('start'), 0.0)
# Single node, array parameter
self.assertEqual(single_sr.get(['start', 'time_in_steps']),
{'start': 0.0, 'time_in_steps': False})
# Single node, hierarchical with literal parameter
np.testing.assert_array_equal(single_sr.get('events', 'times'),
empty_array_float)
# Multiple nodes, hierarchical with literal parameter
values = multi_sr.get('events', 'times')
for v in values:
np.testing.assert_array_equal(v, empty_array_float)
# Single node, hierarchical with array parameter
values = single_sr.get('events', ['senders', 'times'])
self.assertEqual(len(values), 2)
self.assertTrue('senders' in values)
self.assertTrue('times' in values)
np.testing.assert_array_equal(values['senders'], empty_array_int)
np.testing.assert_array_equal(values['times'], empty_array_float)
# Multiple nodes, hierarchical with array parameter
values = multi_sr.get('events', ['senders', 'times'])
self.assertEqual(len(values), 2)
self.assertTrue('senders' in values)
self.assertTrue('times' in values)
self.assertEqual(len(values['senders']), len(multi_sr))
for v in values['senders']:
np.testing.assert_array_equal(v, empty_array_int)
for v in values['times']:
np.testing.assert_array_equal(v, empty_array_float)
# Single node, no parameter (gets all values)
values = single_sr.get()
num_values_single_sr = len(values.keys())
self.assertEqual(values['start'], 0.0)
# Multiple nodes, no parameter (gets all values)
values = multi_sr.get()
self.assertEqual(len(values.keys()), num_values_single_sr)
self.assertEqual(values['start'],
tuple(0.0 for i in range(len(multi_sr))))
@unittest.skipIf(not HAVE_PANDAS, 'Pandas package is not available')
def test_get_pandas(self):
"""
Test that get function with Pandas output works as expected.
"""
single_sr = nest.Create('spike_recorder', 1)
multi_sr = nest.Create('spike_recorder', 10)
empty_array_float = np.array([], dtype=np.float64)
# Single node, literal parameter
pt.assert_frame_equal(single_sr.get('start', output='pandas'),
pandas.DataFrame({'start': [0.0]},
index=tuple(single_sr.tolist())))
# Multiple nodes, literal parameter
pt.assert_frame_equal(multi_sr.get('start', output='pandas'),
pandas.DataFrame(
{'start': [0.0 for i in range(
len(multi_sr))]},
index=tuple(multi_sr.tolist())))
# Single node, array parameter
pt.assert_frame_equal(single_sr.get(['start', 'n_events'],
output='pandas'),
pandas.DataFrame({'start': [0.0],
'n_events': [0]},
index=tuple(single_sr.tolist())))
# Multiple nodes, array parameter
ref_dict = {'start': [0.0 for i in range(len(multi_sr))],
'n_events': [0]}
pt.assert_frame_equal(multi_sr.get(['start', 'n_events'],
output='pandas'),
pandas.DataFrame(ref_dict,
index=tuple(multi_sr.tolist())))
# Single node, hierarchical with literal parameter
pt.assert_frame_equal(single_sr.get('events', 'times',
output='pandas'),
pandas.DataFrame({'times': [[]]},
index=tuple(single_sr.tolist())))
# Multiple nodes, hierarchical with literal parameter
ref_dict = {'times': [empty_array_float
for i in range(len(multi_sr))]}
pt.assert_frame_equal(multi_sr.get('events', 'times',
output='pandas'),
pandas.DataFrame(ref_dict,
index=tuple(multi_sr.tolist())))
# Single node, hierarchical with array parameter
ref_df = pandas.DataFrame(
{'times': [[]], 'senders': [[]]}, index=tuple(single_sr.tolist()))
ref_df = ref_df.reindex(sorted(ref_df.columns), axis=1)
pt.assert_frame_equal(single_sr.get(
'events', ['senders', 'times'], output='pandas'),
ref_df)
# Multiple nodes, hierarchical with array parameter
ref_dict = {'times': [[] for i in range(len(multi_sr))],
'senders': [[] for i in range(len(multi_sr))]}
ref_df = pandas.DataFrame(
ref_dict,
index=tuple(multi_sr.tolist()))
ref_df = ref_df.reindex(sorted(ref_df.columns), axis=1)
sr_df = multi_sr.get('events', ['senders', 'times'], output='pandas')
sr_df = sr_df.reindex(sorted(sr_df.columns), axis=1)
pt.assert_frame_equal(sr_df,
ref_df)
# Single node, no parameter (gets all values)
values = single_sr.get(output='pandas')
num_values_single_sr = values.shape[1]
self.assertEqual(values['start'][tuple(single_sr.tolist())[0]], 0.0)
# Multiple nodes, no parameter (gets all values)
values = multi_sr.get(output='pandas')
self.assertEqual(values.shape, (len(multi_sr), num_values_single_sr))
pt.assert_series_equal(values['start'],
pandas.Series({key: 0.0
for key in tuple(multi_sr.tolist())},
dtype=np.float64,
name='start'))
# With data in events
nodes = nest.Create('iaf_psc_alpha', 10)
pg = nest.Create('poisson_generator', {'rate': 70000.0})
nest.Connect(pg, nodes)
nest.Connect(nodes, single_sr)
nest.Connect(nodes, multi_sr, 'one_to_one')
nest.Simulate(39)
ref_dict = {'times': [[31.8, 36.1, 38.5]],
'senders': [[17, 12, 20]]}
ref_df = pandas.DataFrame(ref_dict, index=tuple(single_sr.tolist()))
ref_df = ref_df.reindex(sorted(ref_df.columns), axis=1)
pt.assert_frame_equal(single_sr.get('events', ['senders', 'times'],
output='pandas'),
ref_df)
ref_dict = {'times': [[36.1], [], [], [], [], [31.8], [], [], [38.5],
[]],
'senders': [[12], [], [], [], [], [17], [], [], [20], []]}
ref_df = pandas.DataFrame(ref_dict, index=tuple(multi_sr.tolist()))
ref_df = ref_df.reindex(sorted(ref_df.columns), axis=1)
pt.assert_frame_equal(multi_sr.get('events', ['senders', 'times'],
output='pandas'),
ref_df)
def test_get_JSON(self):
"""
Test that get function with json output works as expected.
"""
single_sr = nest.Create('spike_recorder', 1)
multi_sr = nest.Create('spike_recorder', 10)
# Single node, literal parameter
self.assertEqual(json.loads(
single_sr.get('start', output='json')), 0.0)
# Multiple nodes, literal parameter
self.assertEqual(
json.loads(multi_sr.get('start', output='json')),
len(multi_sr) * [0.0])
# Single node, array parameter
ref_dict = {'start': 0.0, 'n_events': 0}
self.assertEqual(
json.loads(single_sr.get(['start', 'n_events'], output='json')),
ref_dict)
# Multiple nodes, array parameter
ref_dict = {'start': len(multi_sr) * [0.0],
'n_events': len(multi_sr) * [0]}
self.assertEqual(
json.loads(multi_sr.get(['start', 'n_events'], output='json')),
ref_dict)
# Single node, hierarchical with literal parameter
self.assertEqual(json.loads(single_sr.get(
'events', 'times', output='json')), [])
# Multiple nodes, hierarchical with literal parameter
ref_list = len(multi_sr) * [[]]
self.assertEqual(
json.loads(multi_sr.get('events', 'times', output='json')),
ref_list)
# Single node, hierarchical with array parameter
ref_dict = {'senders': [], 'times': []}
self.assertEqual(
json.loads(single_sr.get(
'events', ['senders', 'times'], output='json')),
ref_dict)
# Multiple nodes, hierarchical with array parameter
ref_dict = {'times': len(multi_sr) * [[]],
'senders': len(multi_sr) * [[]]}
self.assertEqual(
json.loads(multi_sr.get(
'events', ['senders', 'times'], output='json')),
ref_dict)
# Single node, no parameter (gets all values)
values = json.loads(single_sr.get(output='json'))
num_values_single_sr = len(values)
self.assertEqual(values['start'], 0.0)
# Multiple nodes, no parameter (gets all values)
values = json.loads(multi_sr.get(output='json'))
self.assertEqual(len(values), num_values_single_sr)
self.assertEqual(values['start'], len(multi_sr) * [0.0])
# With data in events
nodes = nest.Create('iaf_psc_alpha', 10)
pg = nest.Create('poisson_generator', {'rate': 70000.0})
nest.Connect(pg, nodes)
nest.Connect(nodes, single_sr)
nest.Connect(nodes, multi_sr, 'one_to_one')
nest.Simulate(39)
ref_dict = {'times': [31.8, 36.1, 38.5],
'senders': [17, 12, 20]}
self.assertEqual(
json.loads(single_sr.get(
'events', ['senders', 'times'], output='json')),
ref_dict)
ref_dict = {'times': [[36.1], [], [], [], [], [31.8], [], [], [38.5],
[]],
'senders': [[12], [], [], [], [], [17], [], [], [20], []]}
self.assertEqual(
json.loads(multi_sr.get(
'events', ['senders', 'times'], output='json')),
ref_dict)
def test_set(self):
"""
Test that set function works as expected.
"""
nodes = nest.Create('iaf_psc_alpha', 10)
# Dict to set same value for all nodes.
nodes.set({'C_m': 100.0})
C_m = nodes.get('C_m')
self.assertEqual(C_m, (100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0, 100.0, 100.0, 100.0))
# Set same value for all nodes.
nodes.set(tau_Ca=500.0)
tau_Ca = nodes.get('tau_Ca')
self.assertEqual(tau_Ca, (500.0, 500.0, 500.0, 500.0, 500.0,
500.0, 500.0, 500.0, 500.0, 500.0))
# List of dicts, where each dict corresponds to a single node.
nodes.set(({'V_m': 10.0}, {'V_m': 20.0}, {'V_m': 30.0}, {'V_m': 40.0},
{'V_m': 50.0}, {'V_m': 60.0}, {'V_m': 70.0}, {'V_m': 80.0},
{'V_m': 90.0}, {'V_m': -100.0}))
V_m = nodes.get('V_m')
self.assertEqual(V_m, (10.0, 20.0, 30.0, 40.0, 50.0,
60.0, 70.0, 80.0, 90.0, -100.0))
# Set value of a parameter based on list. List must be length of nodes.
nodes.set(V_reset=[-85., -82., -80., -77., -75.,
-72., -70., -67., -65., -62.])
V_reset = nodes.get('V_reset')
self.assertEqual(V_reset, (-85., -82., -80., -77., -75.,
-72., -70., -67., -65., -62.))
with self.assertRaises(IndexError):
nodes.set(V_reset=[-85., -82., -80., -77., -75.])
# Set different parameters with a dictionary.
nodes.set({'t_ref': 44.0, 'tau_m': 2.0, 'tau_minus': 42.0})
g = nodes.get(['t_ref', 'tau_m', 'tau_minus'])
self.assertEqual(g['t_ref'], (44.0, 44.0, 44.0, 44.0, 44.0,
44.0, 44.0, 44.0, 44.0, 44.0))
self.assertEqual(g['tau_m'], (2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0))
self.assertEqual(g['tau_minus'], (42.0, 42.0, 42.0, 42.0, 42.0,
42.0, 42.0, 42.0, 42.0, 42.0))
with self.assertRaises(nest.kernel.NESTError):
nodes.set({'vp': 2})
def test_set_composite(self):
"""
Test that set works on composite NodeCollections
"""
nodes = nest.Create('iaf_psc_alpha', 10)
nodes[2:5].set(({'V_m': -50.0}, {'V_m': -40.0}, {'V_m': -30.0}))
nodes[5:7].set({'t_ref': 4.4, 'tau_m': 3.0})
nodes[2:9:2].set(C_m=111.0)
V_m = nodes.get('V_m')
g = nodes.get(['t_ref', 'tau_m'])
C_m = nodes.get('C_m')
self.assertEqual(V_m, (-70.0, -70.0, -50.0, -40.0, -30.0,
-70.0, -70.0, -70.0, -70.0, -70.0,))
self.assertEqual(g, {'t_ref': (2.0, 2.0, 2.0, 2.0, 2.0,
4.4, 4.4, 2.0, 2.0, 2.0),
'tau_m': (10.0, 10.0, 10.0, 10.0, 10.0,
3.00, 3.00, 10.0, 10.0, 10.0)})
self.assertEqual(C_m, (250.0, 250.0, 111.0, 250.0, 111.0,
250.0, 111.0, 250.0, 111.0, 250.0))
def test_get_attribute(self):
"""Test get using getattr"""
nodes = nest.Create('iaf_psc_alpha', 10)
self.assertEqual(nodes.C_m, (250.0, 250.0, 250.0, 250.0, 250.0,
250.0, 250.0, 250.0, 250.0, 250.0))
self.assertEqual(nodes.global_id, tuple(range(1, 11)))
self.assertEqual(nodes.E_L, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(nodes.V_m, (-70.0, -70.0, -70.0, -70.0, -70.0,
-70.0, -70.0, -70.0, -70.0, -70.0))
self.assertEqual(nodes.t_ref, (2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0))
with self.assertRaises(KeyError):
print(nodes.nonexistent_attribute)
self.assertIsNone(nodes.spatial)
spatial_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid([2, 2]))
self.assertIsNotNone(spatial_nodes.spatial)
spatial_reference = {'network_size': 4,
'center': (0.0, 0.0),
'edge_wrap': False,
'extent': (1.0, 1.0),
'shape': (2, 2)}
self.assertEqual(spatial_nodes.spatial, spatial_reference)
def test_set_attribute(self):
"""Test set using setattr"""
nodes = nest.Create('iaf_psc_alpha', 10)
nodes.C_m = 100.0
self.assertEqual(nodes.get('C_m'), (100.0, 100.0, 100.0, 100.0, 100.0,
100.0, 100.0, 100.0, 100.0, 100.0))
v_reset_reference = (-85., -82., -80., -77., -75., -72., -70., -67., -65., -62.)
nodes.V_reset = v_reset_reference
self.assertEqual(nodes.get('V_reset'), v_reset_reference)
with self.assertRaises(IndexError):
nodes.V_reset = [-85., -82., -80., -77., -75.]
with self.assertRaises(nest.kernel.NESTError):
nodes.nonexistent_attribute = 1.
def suite():
suite = unittest.makeSuite(TestNodeCollectionGetSet, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| gpl-2.0 |
bthirion/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
3manuek/scikit-learn | sklearn/decomposition/truncated_svd.py | 199 | 7744 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
rubikloud/scikit-learn | sklearn/neighbors/nearest_centroid.py | 38 | 7356 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_X_y, check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
from ..utils.multiclass import check_classification_targets
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = check_X_y(X, y, ['csc'])
else:
X, y = check_X_y(X, y, ['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
check_classification_targets(y)
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self, 'centroids_')
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
lbishal/scikit-learn | examples/mixture/plot_gmm_selection.py | 36 | 3271 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['navy', 'turquoise', 'cornflowerblue',
'darkorange'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
karllessard/tensorflow | tensorflow/python/autograph/core/config.py | 11 | 1959 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.core import config_lib
Action = config_lib.Action
Convert = config_lib.Convert
DoNotConvert = config_lib.DoNotConvert
# This list is evaluated in order and stops at the first rule that tests True
# for a definitely_convert of definitely_bypass call.
CONVERSION_RULES = (
# Known packages
Convert('tensorflow.python.training.experimental'),
# Builtin modules
DoNotConvert('collections'),
DoNotConvert('copy'),
DoNotConvert('cProfile'),
DoNotConvert('inspect'),
DoNotConvert('ipdb'),
DoNotConvert('linecache'),
DoNotConvert('mock'),
DoNotConvert('pathlib'),
DoNotConvert('pdb'),
DoNotConvert('posixpath'),
DoNotConvert('pstats'),
DoNotConvert('re'),
DoNotConvert('threading'),
DoNotConvert('urllib'),
# Known libraries
DoNotConvert('matplotlib'),
DoNotConvert('numpy'),
DoNotConvert('pandas'),
DoNotConvert('tensorflow'),
DoNotConvert('PIL'),
# TODO(b/133417201): Remove.
DoNotConvert('tensorflow_probability'),
# TODO(b/133842282): Remove.
DoNotConvert('tensorflow_datasets.core'),
)
| apache-2.0 |
zonemercy/Kaggle | quora/solution/utils/skl_utils.py | 2 | 6124 | # -*- coding: utf-8 -*-
"""
@author: Chenglong Chen <[email protected]>
@brief: utils for scikit-learn models
"""
import numpy as np
import sklearn.svm
import sklearn.neighbors
import sklearn.ensemble
from sklearn.linear_model import Ridge
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from . import dist_utils
class SVR:
def __init__(self, kernel='rbf', degree=3, gamma='auto', C=1.0,
epsilon=0.1, normalize=True, cache_size=2048):
svr = sklearn.svm.SVR(kernel=kernel, degree=degree,
gamma=gamma, C=C, epsilon=epsilon)
if normalize:
self.model = Pipeline([('ss', StandardScaler()), ('svr', svr)])
else:
self.model = svr
def __str__(self):
return "SVR"
def fit(self, X, y):
self.model.fit(X, y)
return self
def predict(self, X):
y_pred = self.model.predict(X)
return y_pred
class LinearSVR:
def __init__(self, epsilon=0.0, C=1.0, loss='epsilon_insensitive',
random_state=None, normalize=True):
lsvr = sklearn.svm.LinearSVR(epsilon=epsilon, C=C,
loss=loss, random_state=random_state)
if normalize:
self.model = Pipeline([('ss', StandardScaler()), ('lsvr', lsvr)])
else:
self.model = lsvr
def __str__(self):
return "LinearSVR"
def fit(self, X, y):
self.model.fit(X, y)
return self
def predict(self, X):
y_pred = self.model.predict(X)
return y_pred
class KNNRegressor:
def __init__(self, n_neighbors=5, weights='uniform', leaf_size=30,
metric='minkowski', normalize=True):
if metric == 'cosine':
metric = lambda x,y: dist_utils._cosine_sim(x, y)
knn = sklearn.neighbors.KNeighborsRegressor(n_neighbors=n_neighbors, weights=weights,
leaf_size=leaf_size, metric=metric)
if normalize:
self.model = Pipeline([('ss', StandardScaler()), ('knn', knn)])
else:
self.model = knn
def __str__(self):
return "KNNRegressor"
def fit(self, X, y):
self.model.fit(X, y)
return self
def predict(self, X):
y_pred = self.model.predict(X)
return y_pred
class AdaBoostRegressor:
def __init__(self, base_estimator=None, n_estimators=50, max_features=1.0,
max_depth=6, learning_rate=1.0, loss='linear', random_state=None):
if base_estimator and base_estimator == 'etr':
base_estimator = ExtraTreeRegressor(max_depth=max_depth,
max_features=max_features)
else:
base_estimator = DecisionTreeRegressor(max_depth=max_depth,
max_features=max_features)
self.model = sklearn.ensemble.AdaBoostRegressor(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state,
loss=loss)
def __str__(self):
return "AdaBoostRegressor"
def fit(self, X, y):
self.model.fit(X, y)
return self
def predict(self, X):
y_pred = self.model.predict(X)
return y_pred
class RandomRidge:
def __init__(self, alpha=1.0, normalize=True, poly=False,
n_estimators=10, max_features=1.0,
bootstrap=True, subsample=1.0,
random_state=2016):
self.alpha = alpha
self.normalize = normalize
self.poly = poly
self.n_estimators = n_estimators
if isinstance(max_features, float):
assert max_features > 0 and max_features <= 1
self.max_features = max_features
self.bootstrap = bootstrap
assert subsample > 0 and subsample <= 1
self.subsample = subsample
self.random_state = random_state
self.ridge_list = [0]*self.n_estimators
self.feature_idx_list = [0]*self.n_estimators
def __str__(self):
return "RandomRidge"
def _random_feature_idx(self, fdim, random_state):
rng = np.random.RandomState(random_state)
if isinstance(self.max_features, int):
size = min(fdim, self.max_features)
else:
size = int(fdim * self.max_features)
idx = rng.permutation(fdim)[:size]
return idx
def _random_sample_idx(self, sdim, random_state):
rng = np.random.RandomState(random_state)
size = int(sdim * self.subsample)
if self.bootstrap:
idx = rng.randint(sdim, size=size)
else:
idx = rng.permutation(sdim)[:size]
return idx
def fit(self, X, y):
sdim, fdim = X.shape
for i in range(self.n_estimators):
ridge = Ridge(alpha=self.alpha, normalize=self.normalize, random_state=self.random_state)
fidx = self._random_feature_idx(fdim, self.random_state+i*100)
sidx = self._random_sample_idx(sdim, self.random_state+i*10)
X_tmp = X[sidx][:,fidx]
if self.poly:
X_tmp = PolynomialFeatures(degree=2).fit_transform(X_tmp)[:,1:]
ridge.fit(X_tmp, y[sidx])
self.ridge_list[i] = ridge
self.feature_idx_list[i] = fidx
return self
def predict(self, X):
y_pred = np.zeros((X.shape[0], self.n_estimators))
for i in range(self.n_estimators):
fidx = self.feature_idx_list[i]
ridge = self.ridge_list[i]
X_tmp = X[:,fidx]
if self.poly:
X_tmp = PolynomialFeatures(degree=2).fit_transform(X_tmp)[:,1:]
y_pred[:,i] = ridge.predict(X_tmp)
y_pred = np.mean(y_pred, axis=1)
return y_pred
| mit |
wzhfy/spark | dev/sparktestsupport/modules.py | 3 | 17852 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from functools import total_ordering
import itertools
import re
all_modules = []
@total_ordering
class Module(object):
"""
A module is the basic abstraction in our test runner script. Each module consists of a set
of source files, a set of test commands, and a set of dependencies on other modules. We use
modules to define a dependency graph that let us determine which tests to run based on which
files have changed.
"""
def __init__(self, name, dependencies, source_file_regexes, build_profile_flags=(), environ={},
sbt_test_goals=(), python_test_goals=(), excluded_python_implementations=(),
test_tags=(), should_run_r_tests=False, should_run_build_tests=False):
"""
Define a new module.
:param name: A short module name, for display in logging and error messages.
:param dependencies: A set of dependencies for this module. This should only include direct
dependencies; transitive dependencies are resolved automatically.
:param source_file_regexes: a set of regexes that match source files belonging to this
module. These regexes are applied by attempting to match at the beginning of the
filename strings.
:param build_profile_flags: A set of profile flags that should be passed to Maven or SBT in
order to build and test this module (e.g. '-PprofileName').
:param environ: A dict of environment variables that should be set when files in this
module are changed.
:param sbt_test_goals: A set of SBT test goals for testing this module.
:param python_test_goals: A set of Python test goals for testing this module.
:param excluded_python_implementations: A set of Python implementations that are not
supported by this module's Python components. The values in this set should match
strings returned by Python's `platform.python_implementation()`.
:param test_tags A set of tags that will be excluded when running unit tests if the module
is not explicitly changed.
:param should_run_r_tests: If true, changes in this module will trigger all R tests.
:param should_run_build_tests: If true, changes in this module will trigger build tests.
"""
self.name = name
self.dependencies = dependencies
self.source_file_prefixes = source_file_regexes
self.sbt_test_goals = sbt_test_goals
self.build_profile_flags = build_profile_flags
self.environ = environ
self.python_test_goals = python_test_goals
self.excluded_python_implementations = excluded_python_implementations
self.test_tags = test_tags
self.should_run_r_tests = should_run_r_tests
self.should_run_build_tests = should_run_build_tests
self.dependent_modules = set()
for dep in dependencies:
dep.dependent_modules.add(self)
all_modules.append(self)
def contains_file(self, filename):
return any(re.match(p, filename) for p in self.source_file_prefixes)
def __repr__(self):
return "Module<%s>" % self.name
def __lt__(self, other):
return self.name < other.name
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return not (self.name == other.name)
def __hash__(self):
return hash(self.name)
tags = Module(
name="tags",
dependencies=[],
source_file_regexes=[
"common/tags/",
]
)
kvstore = Module(
name="kvstore",
dependencies=[tags],
source_file_regexes=[
"common/kvstore/",
],
sbt_test_goals=[
"kvstore/test",
],
)
network_common = Module(
name="network-common",
dependencies=[tags],
source_file_regexes=[
"common/network-common/",
],
sbt_test_goals=[
"network-common/test",
],
)
network_shuffle = Module(
name="network-shuffle",
dependencies=[tags],
source_file_regexes=[
"common/network-shuffle/",
],
sbt_test_goals=[
"network-shuffle/test",
],
)
unsafe = Module(
name="unsafe",
dependencies=[tags],
source_file_regexes=[
"common/unsafe",
],
sbt_test_goals=[
"unsafe/test",
],
)
launcher = Module(
name="launcher",
dependencies=[tags],
source_file_regexes=[
"launcher/",
],
sbt_test_goals=[
"launcher/test",
],
)
core = Module(
name="core",
dependencies=[kvstore, network_common, network_shuffle, unsafe, launcher],
source_file_regexes=[
"core/",
],
sbt_test_goals=[
"core/test",
],
)
catalyst = Module(
name="catalyst",
dependencies=[tags, core],
source_file_regexes=[
"sql/catalyst/",
],
sbt_test_goals=[
"catalyst/test",
],
)
sql = Module(
name="sql",
dependencies=[catalyst],
source_file_regexes=[
"sql/core/",
],
sbt_test_goals=[
"sql/test",
],
)
hive = Module(
name="hive",
dependencies=[sql],
source_file_regexes=[
"sql/hive/",
"bin/spark-sql",
],
build_profile_flags=[
"-Phive",
],
sbt_test_goals=[
"hive/test",
],
test_tags=[
"org.apache.spark.tags.ExtendedHiveTest"
]
)
repl = Module(
name="repl",
dependencies=[hive],
source_file_regexes=[
"repl/",
],
sbt_test_goals=[
"repl/test",
],
)
hive_thriftserver = Module(
name="hive-thriftserver",
dependencies=[hive],
source_file_regexes=[
"sql/hive-thriftserver",
"sbin/start-thriftserver.sh",
],
build_profile_flags=[
"-Phive-thriftserver",
],
sbt_test_goals=[
"hive-thriftserver/test",
]
)
avro = Module(
name="avro",
dependencies=[sql],
source_file_regexes=[
"external/avro",
],
sbt_test_goals=[
"avro/test",
]
)
sql_kafka = Module(
name="sql-kafka-0-10",
dependencies=[sql],
source_file_regexes=[
"external/kafka-0-10-sql",
],
sbt_test_goals=[
"sql-kafka-0-10/test",
]
)
sketch = Module(
name="sketch",
dependencies=[tags],
source_file_regexes=[
"common/sketch/",
],
sbt_test_goals=[
"sketch/test"
]
)
graphx = Module(
name="graphx",
dependencies=[tags, core],
source_file_regexes=[
"graphx/",
],
sbt_test_goals=[
"graphx/test"
]
)
streaming = Module(
name="streaming",
dependencies=[tags, core],
source_file_regexes=[
"streaming",
],
sbt_test_goals=[
"streaming/test",
]
)
# Don't set the dependencies because changes in other modules should not trigger Kinesis tests.
# Kinesis tests depends on external Amazon kinesis service. We should run these tests only when
# files in streaming_kinesis_asl are changed, so that if Kinesis experiences an outage, we don't
# fail other PRs.
streaming_kinesis_asl = Module(
name="streaming-kinesis-asl",
dependencies=[tags, core],
source_file_regexes=[
"external/kinesis-asl/",
"external/kinesis-asl-assembly/",
],
build_profile_flags=[
"-Pkinesis-asl",
],
environ={
"ENABLE_KINESIS_TESTS": "1"
},
sbt_test_goals=[
"streaming-kinesis-asl/test",
]
)
streaming_kafka_0_10 = Module(
name="streaming-kafka-0-10",
dependencies=[streaming, core],
source_file_regexes=[
# The ending "/" is necessary otherwise it will include "sql-kafka" codes
"external/kafka-0-10/",
"external/kafka-0-10-assembly",
"external/kafka-0-10-token-provider",
],
sbt_test_goals=[
"streaming-kafka-0-10/test",
"token-provider-kafka-0-10/test"
]
)
mllib_local = Module(
name="mllib-local",
dependencies=[tags, core],
source_file_regexes=[
"mllib-local",
],
sbt_test_goals=[
"mllib-local/test",
]
)
mllib = Module(
name="mllib",
dependencies=[mllib_local, streaming, sql],
source_file_regexes=[
"data/mllib/",
"mllib/",
],
sbt_test_goals=[
"mllib/test",
]
)
examples = Module(
name="examples",
dependencies=[graphx, mllib, streaming, hive],
source_file_regexes=[
"examples/",
],
sbt_test_goals=[
"examples/test",
]
)
pyspark_core = Module(
name="pyspark-core",
dependencies=[core],
source_file_regexes=[
"python/(?!pyspark/(ml|mllib|sql|streaming))"
],
python_test_goals=[
# doctests
"pyspark.rdd",
"pyspark.context",
"pyspark.conf",
"pyspark.broadcast",
"pyspark.accumulators",
"pyspark.serializers",
"pyspark.profiler",
"pyspark.shuffle",
"pyspark.util",
# unittests
"pyspark.tests.test_appsubmit",
"pyspark.tests.test_broadcast",
"pyspark.tests.test_conf",
"pyspark.tests.test_context",
"pyspark.tests.test_daemon",
"pyspark.tests.test_join",
"pyspark.tests.test_profiler",
"pyspark.tests.test_rdd",
"pyspark.tests.test_rddbarrier",
"pyspark.tests.test_readwrite",
"pyspark.tests.test_serializers",
"pyspark.tests.test_shuffle",
"pyspark.tests.test_taskcontext",
"pyspark.tests.test_util",
"pyspark.tests.test_worker",
]
)
pyspark_sql = Module(
name="pyspark-sql",
dependencies=[pyspark_core, hive, avro],
source_file_regexes=[
"python/pyspark/sql"
],
python_test_goals=[
# doctests
"pyspark.sql.types",
"pyspark.sql.context",
"pyspark.sql.session",
"pyspark.sql.conf",
"pyspark.sql.catalog",
"pyspark.sql.column",
"pyspark.sql.dataframe",
"pyspark.sql.group",
"pyspark.sql.functions",
"pyspark.sql.readwriter",
"pyspark.sql.streaming",
"pyspark.sql.udf",
"pyspark.sql.window",
"pyspark.sql.avro.functions",
"pyspark.sql.pandas.conversion",
"pyspark.sql.pandas.map_ops",
"pyspark.sql.pandas.group_ops",
"pyspark.sql.pandas.types",
"pyspark.sql.pandas.serializers",
"pyspark.sql.pandas.typehints",
"pyspark.sql.pandas.utils",
# unittests
"pyspark.sql.tests.test_arrow",
"pyspark.sql.tests.test_catalog",
"pyspark.sql.tests.test_column",
"pyspark.sql.tests.test_conf",
"pyspark.sql.tests.test_context",
"pyspark.sql.tests.test_dataframe",
"pyspark.sql.tests.test_datasources",
"pyspark.sql.tests.test_functions",
"pyspark.sql.tests.test_group",
"pyspark.sql.tests.test_pandas_cogrouped_map",
"pyspark.sql.tests.test_pandas_grouped_map",
"pyspark.sql.tests.test_pandas_map",
"pyspark.sql.tests.test_pandas_udf",
"pyspark.sql.tests.test_pandas_udf_grouped_agg",
"pyspark.sql.tests.test_pandas_udf_scalar",
"pyspark.sql.tests.test_pandas_udf_typehints",
"pyspark.sql.tests.test_pandas_udf_window",
"pyspark.sql.tests.test_readwriter",
"pyspark.sql.tests.test_serde",
"pyspark.sql.tests.test_session",
"pyspark.sql.tests.test_streaming",
"pyspark.sql.tests.test_types",
"pyspark.sql.tests.test_udf",
"pyspark.sql.tests.test_utils",
]
)
pyspark_resource = Module(
name="pyspark-resource",
dependencies=[
pyspark_core
],
source_file_regexes=[
"python/pyspark/resource"
],
python_test_goals=[
# unittests
"pyspark.resource.tests.test_resources",
]
)
pyspark_streaming = Module(
name="pyspark-streaming",
dependencies=[
pyspark_core,
streaming,
streaming_kinesis_asl
],
source_file_regexes=[
"python/pyspark/streaming"
],
python_test_goals=[
# doctests
"pyspark.streaming.util",
# unittests
"pyspark.streaming.tests.test_context",
"pyspark.streaming.tests.test_dstream",
"pyspark.streaming.tests.test_kinesis",
"pyspark.streaming.tests.test_listener",
]
)
pyspark_mllib = Module(
name="pyspark-mllib",
dependencies=[pyspark_core, pyspark_streaming, pyspark_sql, mllib],
source_file_regexes=[
"python/pyspark/mllib"
],
python_test_goals=[
# doctests
"pyspark.mllib.classification",
"pyspark.mllib.clustering",
"pyspark.mllib.evaluation",
"pyspark.mllib.feature",
"pyspark.mllib.fpm",
"pyspark.mllib.linalg.__init__",
"pyspark.mllib.linalg.distributed",
"pyspark.mllib.random",
"pyspark.mllib.recommendation",
"pyspark.mllib.regression",
"pyspark.mllib.stat._statistics",
"pyspark.mllib.stat.KernelDensity",
"pyspark.mllib.tree",
"pyspark.mllib.util",
# unittests
"pyspark.mllib.tests.test_algorithms",
"pyspark.mllib.tests.test_feature",
"pyspark.mllib.tests.test_linalg",
"pyspark.mllib.tests.test_stat",
"pyspark.mllib.tests.test_streaming_algorithms",
"pyspark.mllib.tests.test_util",
],
excluded_python_implementations=[
"PyPy" # Skip these tests under PyPy since they require numpy and it isn't available there
]
)
pyspark_ml = Module(
name="pyspark-ml",
dependencies=[pyspark_core, pyspark_mllib],
source_file_regexes=[
"python/pyspark/ml/"
],
python_test_goals=[
# doctests
"pyspark.ml.classification",
"pyspark.ml.clustering",
"pyspark.ml.evaluation",
"pyspark.ml.feature",
"pyspark.ml.fpm",
"pyspark.ml.functions",
"pyspark.ml.image",
"pyspark.ml.linalg.__init__",
"pyspark.ml.recommendation",
"pyspark.ml.regression",
"pyspark.ml.stat",
"pyspark.ml.tuning",
# unittests
"pyspark.ml.tests.test_algorithms",
"pyspark.ml.tests.test_base",
"pyspark.ml.tests.test_evaluation",
"pyspark.ml.tests.test_feature",
"pyspark.ml.tests.test_image",
"pyspark.ml.tests.test_linalg",
"pyspark.ml.tests.test_param",
"pyspark.ml.tests.test_persistence",
"pyspark.ml.tests.test_pipeline",
"pyspark.ml.tests.test_stat",
"pyspark.ml.tests.test_training_summary",
"pyspark.ml.tests.test_tuning",
"pyspark.ml.tests.test_wrapper",
],
excluded_python_implementations=[
"PyPy" # Skip these tests under PyPy since they require numpy and it isn't available there
]
)
sparkr = Module(
name="sparkr",
dependencies=[hive, mllib],
source_file_regexes=[
"R/",
],
should_run_r_tests=True
)
docs = Module(
name="docs",
dependencies=[],
source_file_regexes=[
"docs/",
]
)
build = Module(
name="build",
dependencies=[],
source_file_regexes=[
".*pom.xml",
"dev/test-dependencies.sh",
],
should_run_build_tests=True
)
yarn = Module(
name="yarn",
dependencies=[],
source_file_regexes=[
"resource-managers/yarn/",
"common/network-yarn/",
],
build_profile_flags=["-Pyarn"],
sbt_test_goals=[
"yarn/test",
"network-yarn/test",
],
test_tags=[
"org.apache.spark.tags.ExtendedYarnTest"
]
)
mesos = Module(
name="mesos",
dependencies=[],
source_file_regexes=["resource-managers/mesos/"],
build_profile_flags=["-Pmesos"],
sbt_test_goals=["mesos/test"]
)
kubernetes = Module(
name="kubernetes",
dependencies=[],
source_file_regexes=["resource-managers/kubernetes"],
build_profile_flags=["-Pkubernetes"],
sbt_test_goals=["kubernetes/test"]
)
hadoop_cloud = Module(
name="hadoop-cloud",
dependencies=[],
source_file_regexes=["hadoop-cloud"],
build_profile_flags=["-Phadoop-cloud"],
sbt_test_goals=["hadoop-cloud/test"]
)
spark_ganglia_lgpl = Module(
name="spark-ganglia-lgpl",
dependencies=[],
build_profile_flags=["-Pspark-ganglia-lgpl"],
source_file_regexes=[
"external/spark-ganglia-lgpl",
]
)
# The root module is a dummy module which is used to run all of the tests.
# No other modules should directly depend on this module.
root = Module(
name="root",
dependencies=[build, core], # Changes to build should trigger all tests.
source_file_regexes=[],
# In order to run all of the tests, enable every test profile:
build_profile_flags=list(set(
itertools.chain.from_iterable(m.build_profile_flags for m in all_modules))),
sbt_test_goals=[
"test",
],
python_test_goals=list(itertools.chain.from_iterable(m.python_test_goals for m in all_modules)),
should_run_r_tests=True,
should_run_build_tests=True
)
| apache-2.0 |
mdeger/nest-simulator | pynest/examples/structural_plasticity.py | 13 | 13366 | # -*- coding: utf-8 -*-
#
# structural_plasticity.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Structural Plasticity example
-----------------------
This example shows a simple network of two populations where structural
plasticity is used. The network has 1000 neurons, 80% excitatory and
20% inhibitory. The simulation starts without any connectivity. A set of
homeostatic rules are defined, according to which structural plasticity will
create and delete synapses dynamically during the simulation until a desired
level of electrical activity is reached. The model of structural plasticity
used here corresponds to the formulation presented in Butz, M., & van Ooyen, A.
(2013). A simple rule for dendritic spine and axonal bouton formation can
account for cortical reorganization after focal retinal lesions.
PLoS Comput. Biol. 9 (10), e1003259.
At the end of the simulation, a plot of the evolution of the connectivity
in the network and the average calcium concentration in the neurons is created.
'''
import nest
import numpy
import matplotlib.pyplot as pl
import sys
'''
First, we have import all necessary modules.
'''
class StructralPlasticityExample:
def __init__(self):
'''
We define general simulation parameters
'''
# simulated time (ms)
self.t_sim = 200000.0
# simulation step (ms).
self.dt = 0.1
self.number_excitatory_neurons = 800
self.number_inhibitory_neurons = 200
# Structural_plasticity properties
self.update_interval = 1000
self.record_interval = 1000.0
# rate of background Poisson input
self.bg_rate = 10000.0
self.neuron_model = 'iaf_psc_exp'
'''
In this implementation of structural plasticity, neurons grow
connection points called synaptic elements. Synapses can be created
between compatible synaptic elements. The growth of these elements is
guided by homeostatic rules, defined as growth curves.
Here we specify the growth curves for synaptic elements of excitatory
and inhibitory neurons.
'''
# Excitatory synaptic elements of excitatory neurons
self.growth_curve_e_e = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': 0.05, # Ca2+
}
# Inhibitory synaptic elements of excitatory neurons
self.growth_curve_e_i = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': self.growth_curve_e_e['eps'], # Ca2+
}
# Excitatory synaptic elements of inhibitory neurons
self.growth_curve_i_e = {
'growth_curve': "gaussian",
'growth_rate': 0.0004, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': 0.2, # Ca2+
}
# Inhibitory synaptic elements of inhibitory neurons
self.growth_curve_i_i = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # (elements/ms)
'continuous': False,
'eta': 0.0, # Ca2+
'eps': self.growth_curve_i_e['eps'] # Ca2+
}
'''
Now we specify the neuron model.
'''
self.model_params = {'tau_m': 10.0, # membrane time constant (ms)
# excitatory synaptic time constant (ms)
'tau_syn_ex': 0.5,
# inhibitory synaptic time constant (ms)
'tau_syn_in': 0.5,
't_ref': 2.0, # absolute refractory period (ms)
'E_L': -65.0, # resting membrane potential (mV)
'V_th': -50.0, # spike threshold (mV)
'C_m': 250.0, # membrane capacitance (pF)
'V_reset': -65.0 # reset potential (mV)
}
self.nodes_e = None
self.nodes_i = None
self.mean_ca_e = []
self.mean_ca_i = []
self.total_connections_e = []
self.total_connections_i = []
'''
We initialize variables for the post-synaptic currents of the
excitatory, inhibitory and external synapses. These values were
calculated from a PSP amplitude of 1 for excitatory synapses,
-1 for inhibitory synapses and 0.11 for external synapses.
'''
self.psc_e = 585.0
self.psc_i = -585.0
self.psc_ext = 6.2
def prepare_simulation(self):
nest.ResetKernel()
nest.set_verbosity('M_ERROR')
'''
We set global kernel parameters. Here we define the resolution
for the simulation, which is also the time resolution for the update
of the synaptic elements.
'''
nest.SetKernelStatus(
{
'resolution': self.dt
}
)
'''
Set Structural Plasticity synaptic update interval which is how often
the connectivity will be updated inside the network. It is important
to notice that synaptic elements and connections change on different
time scales.
'''
nest.SetStructuralPlasticityStatus({
'structural_plasticity_update_interval': self.update_interval,
})
'''
Now we define Structural Plasticity synapses. In this example we create
two synapse models, one for excitatory and one for inhibitory synapses.
Then we define that excitatory synapses can only be created between a
pre synaptic element called 'Axon_ex' and a post synaptic element
called Den_ex. In a similar manner, synaptic elements for inhibitory
synapses are defined.
'''
nest.CopyModel('static_synapse', 'synapse_ex')
nest.SetDefaults('synapse_ex', {'weight': self.psc_e, 'delay': 1.0})
nest.CopyModel('static_synapse', 'synapse_in')
nest.SetDefaults('synapse_in', {'weight': self.psc_i, 'delay': 1.0})
nest.SetStructuralPlasticityStatus({
'structural_plasticity_synapses': {
'synapse_ex': {
'model': 'synapse_ex',
'post_synaptic_element': 'Den_ex',
'pre_synaptic_element': 'Axon_ex',
},
'synapse_in': {
'model': 'synapse_in',
'post_synaptic_element': 'Den_in',
'pre_synaptic_element': 'Axon_in',
},
}
})
def create_nodes(self):
'''
Now we assign the growth curves to the corresponding synaptic elements
'''
synaptic_elements = {
'Den_ex': self.growth_curve_e_e,
'Den_in': self.growth_curve_e_i,
'Axon_ex': self.growth_curve_e_e,
}
synaptic_elements_i = {
'Den_ex': self.growth_curve_i_e,
'Den_in': self.growth_curve_i_i,
'Axon_in': self.growth_curve_i_i,
}
'''
Then it is time to create a population with 80% of the total network
size excitatory neurons and another one with 20% of the total network
size of inhibitory neurons.
'''
self.nodes_e = nest.Create('iaf_psc_alpha',
self.number_excitatory_neurons,
{'synaptic_elements': synaptic_elements})
self.nodes_i = nest.Create('iaf_psc_alpha',
self.number_inhibitory_neurons,
{'synaptic_elements': synaptic_elements_i})
nest.SetStatus(self.nodes_e, 'synaptic_elements', synaptic_elements)
nest.SetStatus(self.nodes_i, 'synaptic_elements', synaptic_elements_i)
def connect_external_input(self):
'''
We create and connect the Poisson generator for external input
'''
noise = nest.Create('poisson_generator')
nest.SetStatus(noise, {"rate": self.bg_rate})
nest.Connect(noise, self.nodes_e, 'all_to_all',
{'weight': self.psc_ext, 'delay': 1.0})
nest.Connect(noise, self.nodes_i, 'all_to_all',
{'weight': self.psc_ext, 'delay': 1.0})
'''
In order to save the amount of average calcium concentration in each
population through time we create the function record_ca. Here we use the
GetStatus function to retrieve the value of Ca for every neuron in the
network and then store the average.
'''
def record_ca(self):
ca_e = nest.GetStatus(self.nodes_e, 'Ca'), # Calcium concentration
self.mean_ca_e.append(numpy.mean(ca_e))
ca_i = nest.GetStatus(self.nodes_i, 'Ca'), # Calcium concentration
self.mean_ca_i.append(numpy.mean(ca_i))
'''
In order to save the state of the connectivity in the network through time
we create the function record_connectivity. Here we use the GetStatus
function to retrieve the number of connected pre synaptic elements of each
neuron. The total amount of excitatory connections is equal to the total
amount of connected excitatory pre synaptic elements. The same applies for
inhibitory connections.
'''
def record_connectivity(self):
syn_elems_e = nest.GetStatus(self.nodes_e, 'synaptic_elements')
syn_elems_i = nest.GetStatus(self.nodes_i, 'synaptic_elements')
self.total_connections_e.append(sum(neuron['Axon_ex']['z_connected']
for neuron in syn_elems_e))
self.total_connections_i.append(sum(neuron['Axon_in']['z_connected']
for neuron in syn_elems_i))
'''
We define a function to plot the recorded values
at the end of the simulation.
'''
def plot_data(self):
fig, ax1 = pl.subplots()
ax1.axhline(self.growth_curve_e_e['eps'],
linewidth=4.0, color='#9999FF')
ax1.plot(self.mean_ca_e, 'b',
label='Ca Concentration Excitatory Neurons', linewidth=2.0)
ax1.axhline(self.growth_curve_i_e['eps'],
linewidth=4.0, color='#FF9999')
ax1.plot(self.mean_ca_i, 'r',
label='Ca Concentration Inhibitory Neurons', linewidth=2.0)
ax1.set_ylim([0, 0.275])
ax1.set_xlabel("Time in [s]")
ax1.set_ylabel("Ca concentration")
ax2 = ax1.twinx()
ax2.plot(self.total_connections_e, 'm',
label='Excitatory connections', linewidth=2.0, linestyle='--')
ax2.plot(self.total_connections_i, 'k',
label='Inhibitory connections', linewidth=2.0, linestyle='--')
ax2.set_ylim([0, 2500])
ax2.set_ylabel("Connections")
ax1.legend(loc=1)
ax2.legend(loc=4)
pl.savefig('StructuralPlasticityExample.eps', format='eps')
'''
It is time to specify how we want to perform the simulation. In this
function we first enable structural plasticity in the network and then we
simulate in steps. On each step we record the calcium concentration and the
connectivity. At the end of the simulation, the plot of connections and
calcium concentration through time is generated.
'''
def simulate(self):
if nest.NumProcesses() > 1:
sys.exit("For simplicity, this example only works " +
"for a single process.")
nest.EnableStructuralPlasticity()
print("Starting simulation")
sim_steps = numpy.arange(0, self.t_sim, self.record_interval)
for i, step in enumerate(sim_steps):
nest.Simulate(self.record_interval)
self.record_ca()
self.record_connectivity()
if i % 20 == 0:
print("Progress: " + str(i / 2) + "%")
print("Simulation finished successfully")
'''
Finally we take all the functions that we have defined and create the sequence
for our example. We prepare the simulation, create the nodes for the network,
connect the external input and then simulate. Please note that as we are
simulating 200 biological seconds in this example, it will take a few minutes
to complete.
'''
if __name__ == '__main__':
example = StructralPlasticityExample()
# Prepare simulation
example.prepare_simulation()
example.create_nodes()
example.connect_external_input()
# Start simulation
example.simulate()
example.plot_data()
| gpl-2.0 |
ctn-waterloo/nengo_theano | nengo_theano/test/test_neuron_neuron.py | 1 | 2564 | """This is a test file to test the neuron -> neuron connections through the
transform parameter in net.connect.
Tests
1. 2 populations same size
2. 2 populations different sizes
3. connecting network arrays
"""
import numpy as np
import matplotlib.pyplot as plt
import nengo_theano as nef
neurons = 100
dimensions = 1
array_size = 3
net = nef.Network('WeightMatrix Test')
net.make_input('in1', 1, zero_after_time=2.5)
net.make_input('in2', [1, .5, -.5])
net.make('A', neurons=neurons, dimensions=dimensions, intercept=(.1, 1))
net.make('B', neurons=neurons, dimensions=dimensions) # for test 1
'''net.make('B2', neurons=neurons, dimensions=dimensions, array_size=array_size) # for test 2
net.make('B3', neurons=neurons, dimensions=dimensions, array_size=array_size) # for test 3
net.make('B4', neurons=neurons, dimensions=dimensions, array_size=array_size) # for test 4'''
# setup inhibitory scaling matrix
weight_matrix = [[np.random.rand()*.002 - .001] for i in range(dimensions)] * neurons # for test 1 and 2
# define our weight matrices and connect up!
net.connect('in1', 'A')
net.connect_neurons('A', 'B', weight_matrix=weight_matrix, pstc=.1) # for test 1
'''net.connect_neurons('A', 'B2', weight_matrix=weight_matrix) # for test 2
net.connect_neurons('A', 'B3', weight_matrix=weight_matrix) # for test 3
net.connect_neurons('A', 'B4', weight_matrix=weight_matrix) # for test 4'''
timesteps = 500
dt_step = 0.01
t = np.linspace(dt_step, timesteps*dt_step, timesteps)
pstc = 0.01
Ip = net.make_probe('in1', dt_sample=dt_step, pstc=pstc)
I2p = net.make_probe('in2', dt_sample=dt_step, pstc=pstc)
Ap = net.make_probe('A', dt_sample=dt_step, pstc=pstc)
Bp = net.make_probe('B', dt_sample=dt_step, pstc=pstc)
'''B2p = net.make_probe('B2', dt_sample=dt_step, pstc=pstc)
B3p = net.make_probe('B3', dt_sample=dt_step, pstc=pstc)
B4p = net.make_probe('B4', dt_sample=dt_step, pstc=pstc)'''
print "starting simulation"
net.run(timesteps*dt_step)
plt.ioff(); plt.close();
plt.subplot(711); plt.title('Input1')
plt.plot(Ip.get_data());
plt.subplot(712); plt.title('Input2')
plt.plot(I2p.get_data());
plt.subplot(713); plt.title('A = In1')
plt.plot(Ap.get_data())
plt.subplot(714); plt.title('B = In2(0) inhib by A')
plt.plot(Bp.get_data())
'''plt.subplot(715); plt.title('B2 = In2, network array full inhib by A')
plt.plot(B2p.get_data())
plt.subplot(716); plt.title('B3 = In2, B3[2] inhib by A')
plt.plot(B3p.get_data())
plt.subplot(717); plt.title('B4 = In2, B3[2] inhib by A')
plt.plot(B4p.get_data())'''
plt.tight_layout()
plt.show()
| mit |
imaculate/scikit-learn | sklearn/tests/test_base.py | 5 | 7693 | # Author: Gael Varoquaux
# License: BSD 3 clause
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_clone_sparse_matrices():
sparse_matrix_classes = [
getattr(sp, name)
for name in dir(sp) if name.endswith('_matrix')]
PY26 = sys.version_info[:2] == (2, 6)
if PY26:
# sp.dok_matrix can not be deepcopied in Python 2.6
sparse_matrix_classes.remove(sp.dok_matrix)
for cls in sparse_matrix_classes:
sparse_matrix = cls(np.eye(5))
clf = MyEstimator(empty=sparse_matrix)
clf_cloned = clone(clf)
assert_true(clf.empty.__class__ is clf_cloned.empty.__class__)
assert_array_equal(clf.empty.toarray(), clf_cloned.empty.toarray())
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
ehocchen/trading-with-python | lib/yahooFinance.py | 76 | 8290 | # -*- coding: utf-8 -*-
# Author: Jev Kuznetsov <[email protected]>
# License: BSD
"""
Toolset working with yahoo finance data
This module includes functions for easy access to YahooFinance data
Functions
----------
- `getHistoricData` get historic data for a single symbol
- `getQuote` get current quote for a symbol
- `getScreenerSymbols` load symbols from a yahoo stock screener file
Classes
---------
- `HistData` a class for working with multiple symbols
"""
from datetime import datetime, date
import urllib2
from pandas import DataFrame, Index, HDFStore, WidePanel
import numpy as np
import os
from extra import ProgressBar
def parseStr(s):
''' convert string to a float or string '''
f = s.strip()
if f[0] == '"':
return f.strip('"')
elif f=='N/A':
return np.nan
else:
try: # try float conversion
prefixes = {'M':1e6, 'B': 1e9}
prefix = f[-1]
if prefix in prefixes: # do we have a Billion/Million character?
return float(f[:-1])*prefixes[prefix]
else: # no, convert to float directly
return float(f)
except ValueError: # failed, return original string
return s
class HistData(object):
''' a class for working with yahoo finance data '''
def __init__(self, autoAdjust=True):
self.startDate = (2008,1,1)
self.autoAdjust=autoAdjust
self.wp = WidePanel()
def load(self,dataFile):
"""load data from HDF"""
if os.path.exists(dataFile):
store = HDFStore(dataFile)
symbols = [str(s).strip('/') for s in store.keys() ]
data = dict(zip(symbols,[store[symbol] for symbol in symbols]))
self.wp = WidePanel(data)
store.close()
else:
raise IOError('Data file does not exist')
def save(self,dataFile):
""" save data to HDF"""
print 'Saving data to', dataFile
store = HDFStore(dataFile)
for symbol in self.wp.items:
store[symbol] = self.wp[symbol]
store.close()
def downloadData(self,symbols='all'):
''' get data from yahoo '''
if symbols == 'all':
symbols = self.symbols
#store = HDFStore(self.dataFile)
p = ProgressBar(len(symbols))
for idx,symbol in enumerate(symbols):
try:
df = getSymbolData(symbol,sDate=self.startDate,verbose=False)
if self.autoAdjust:
df = _adjust(df,removeOrig=True)
if len(self.symbols)==0:
self.wp = WidePanel({symbol:df})
else:
self.wp[symbol] = df
except Exception,e:
print e
p.animate(idx+1)
def getDataFrame(self,field='close'):
''' return a slice on wide panel for a given field '''
return self.wp.minor_xs(field)
@property
def symbols(self):
return self.wp.items.tolist()
def __repr__(self):
return str(self.wp)
def getQuote(symbols):
''' get current yahoo quote, return a DataFrame '''
# for codes see: http://www.gummy-stuff.org/Yahoo-data.htm
if not isinstance(symbols,list):
symbols = [symbols]
header = ['symbol','last','change_pct','PE','time','short_ratio','prev_close','eps','market_cap']
request = str.join('', ['s', 'l1', 'p2' , 'r', 't1', 's7', 'p', 'e' , 'j1'])
data = dict(zip(header,[[] for i in range(len(header))]))
urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (str.join('+',symbols), request)
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
for line in lines:
fields = line.strip().split(',')
#print fields, len(fields)
for i,field in enumerate(fields):
data[header[i]].append( parseStr(field))
idx = data.pop('symbol')
return DataFrame(data,index=idx)
def _historicDataUrll(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3]):
"""
generate url
symbol: Yahoo finanance symbol
sDate: start date (y,m,d)
eDate: end date (y,m,d)
"""
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
return urlStr
def getHistoricData(symbols, **options):
'''
get data from Yahoo finance and return pandas dataframe
Will get OHLCV data frame if sinle symbol is provided.
If many symbols are provided, it will return a wide panel
Parameters
------------
symbols: Yahoo finanance symbol or a list of symbols
sDate: start date (y,m,d)
eDate: end date (y,m,d)
adjust : T/[F] adjust data based on adj_close
'''
assert isinstance(symbols,(list,str)), 'Input must be a string symbol or a list of symbols'
if isinstance(symbols,str):
return getSymbolData(symbols,**options)
else:
data = {}
print 'Downloading data:'
p = ProgressBar(len(symbols))
for idx,symbol in enumerate(symbols):
p.animate(idx+1)
data[symbol] = getSymbolData(symbol,verbose=False,**options)
return WidePanel(data)
def getSymbolData(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3], adjust=False, verbose=True):
"""
get data from Yahoo finance and return pandas dataframe
symbol: Yahoo finanance symbol
sDate: start date (y,m,d)
eDate: end date (y,m,d)
"""
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
return None
dates = []
data = [[] for i in range(6)]
#high
# header : Date,Open,High,Low,Close,Volume,Adj Close
for line in lines[1:]:
#print line
fields = line.rstrip().split(',')
dates.append(datetime.strptime( fields[0],'%Y-%m-%d'))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
idx = Index(dates)
data = dict(zip(['open','high','low','close','volume','adj_close'],data))
# create a pandas dataframe structure
df = DataFrame(data,index=idx).sort()
if verbose:
print 'Got %i days of data' % len(df)
if adjust:
return _adjust(df,removeOrig=True)
else:
return df
def _adjust(df, removeOrig=False):
'''
_adjustust hist data based on adj_close field
'''
c = df['close']/df['adj_close']
df['adj_open'] = df['open']/c
df['adj_high'] = df['high']/c
df['adj_low'] = df['low']/c
if removeOrig:
df=df.drop(['open','close','high','low'],axis=1)
renames = dict(zip(['adj_open','adj_close','adj_high','adj_low'],['open','close','high','low']))
df=df.rename(columns=renames)
return df
def getScreenerSymbols(fileName):
''' read symbols from a .csv saved by yahoo stock screener '''
with open(fileName,'r') as fid:
lines = fid.readlines()
symbols = []
for line in lines[3:]:
fields = line.strip().split(',')
field = fields[0].strip()
if len(field) > 0:
symbols.append(field)
return symbols
| bsd-3-clause |
wzbozon/statsmodels | statsmodels/examples/l1_demo/short_demo.py | 33 | 3737 | """
You can fit your LikelihoodModel using l1 regularization by changing
the method argument and adding an argument alpha. See code for
details.
The Story
---------
The maximum likelihood (ML) solution works well when the number of data
points is large and the noise is small. When the ML solution starts
"breaking", the regularized solution should do better.
The l1 Solvers
--------------
The standard l1 solver is fmin_slsqp and is included with scipy. It
sometimes has trouble verifying convergence when the data size is
large.
The l1_cvxopt_cp solver is part of CVXOPT and this package needs to be
installed separately. It works well even for larger data sizes.
"""
from __future__ import print_function
from statsmodels.compat.python import range
import statsmodels.api as sm
import matplotlib.pyplot as plt
import numpy as np
import pdb # pdb.set_trace()
## Load the data from Spector and Mazzeo (1980)
spector_data = sm.datasets.spector.load()
spector_data.exog = sm.add_constant(spector_data.exog)
N = len(spector_data.endog)
K = spector_data.exog.shape[1]
### Logit Model
logit_mod = sm.Logit(spector_data.endog, spector_data.exog)
## Standard logistic regression
logit_res = logit_mod.fit()
## Regularized regression
# Set the reularization parameter to something reasonable
alpha = 0.05 * N * np.ones(K)
# Use l1, which solves via a built-in (scipy.optimize) solver
logit_l1_res = logit_mod.fit_regularized(method='l1', alpha=alpha, acc=1e-6)
# Use l1_cvxopt_cp, which solves with a CVXOPT solver
logit_l1_cvxopt_res = logit_mod.fit_regularized(
method='l1_cvxopt_cp', alpha=alpha)
## Print results
print("============ Results for Logit =================")
print("ML results")
print(logit_res.summary())
print("l1 results")
print(logit_l1_res.summary())
print(logit_l1_cvxopt_res.summary())
### Multinomial Logit Example using American National Election Studies Data
anes_data = sm.datasets.anes96.load()
anes_exog = anes_data.exog
anes_exog = sm.add_constant(anes_exog, prepend=False)
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
mlogit_res = mlogit_mod.fit()
## Set the regularization parameter.
alpha = 10 * np.ones((mlogit_mod.J - 1, mlogit_mod.K))
# Don't regularize the constant
alpha[-1,:] = 0
mlogit_l1_res = mlogit_mod.fit_regularized(method='l1', alpha=alpha)
print(mlogit_l1_res.params)
#mlogit_l1_res = mlogit_mod.fit_regularized(
# method='l1_cvxopt_cp', alpha=alpha, abstol=1e-10, trim_tol=1e-6)
#print mlogit_l1_res.params
## Print results
print("============ Results for MNLogit =================")
print("ML results")
print(mlogit_res.summary())
print("l1 results")
print(mlogit_l1_res.summary())
#
#
#### Logit example with many params, sweeping alpha
spector_data = sm.datasets.spector.load()
X = spector_data.exog
Y = spector_data.endog
## Fit
N = 50 # number of points to solve at
K = X.shape[1]
logit_mod = sm.Logit(Y, X)
coeff = np.zeros((N, K)) # Holds the coefficients
alphas = 1 / np.logspace(-0.5, 2, N)
## Sweep alpha and store the coefficients
# QC check doesn't always pass with the default options.
# Use the options QC_verbose=True and disp=True
# to to see what is happening. It just barely doesn't pass, so I decreased
# acc and increased QC_tol to make it pass
for n, alpha in enumerate(alphas):
logit_res = logit_mod.fit_regularized(
method='l1', alpha=alpha, trim_mode='off', QC_tol=0.1, disp=False,
QC_verbose=True, acc=1e-15)
coeff[n,:] = logit_res.params
## Plot
plt.figure(1);plt.clf();plt.grid()
plt.title('Regularization Path');
plt.xlabel('alpha');
plt.ylabel('Parameter value');
for i in range(K):
plt.plot(alphas, coeff[:,i], label='X'+str(i), lw=3)
plt.legend(loc='best')
plt.show()
| bsd-3-clause |
tomwallis/PsyUtils | tests/misc_tests.py | 1 | 2087 | # tests for miscellaneous functions.
import numpy as np
#from skimage import img_as_float, img_as_ubyte, img_as_uint, io
import psyutils as pu
from nose.tools import *
#import os
from pandas.util.testing import assert_frame_equal
import pandas as pd
def test_pix_per_deg():
res = pu.misc.pix_per_deg(60, (1920, 1080), (52, 29))
desired = 40.361110682401332
np.testing.assert_allclose(res, desired)
def test_pix_per_deg_2():
res = pu.misc.pix_per_deg(60, (1920, 1080), (52, 29), average_wh=False)
desired = np.array([40.97539747, 39.7468239])
np.testing.assert_allclose(res, desired)
def test_expand_grid():
df = pu.misc.expand_grid({'height': [60, 70],
'weight': [100, 140, 180]})
desired = pd.DataFrame({'height': [60, 60, 60, 70, 70, 70],
'weight': [100, 140, 180, 100, 140, 180]})
# Because dicts don't follow a consistent order (specified at runtime to
# reduce memory requirements), we need to sort these and their indices
# to ensure the assert will pass consistently.
# same column order
df.sort_index(axis=1, inplace=True)
desired.sort_index(axis=1, inplace=True)
# same row order:
df.sort_values(by='height', inplace=True)
desired.sort_values(by='height', inplace=True)
df.reset_index(drop=True, inplace=True)
desired.reset_index(drop=True, inplace=True)
# test
assert_frame_equal(df, desired, check_names=False)
# doesn't yet work in python 3.3 run_tests.sh for some reason...
# def test_expand_grid():
# from collections import OrderedDict
# entries = OrderedDict([('height', [60, 70]),
# ('weight', [100, 140, 180])])
# df = pu.misc.expand_grid(entries)
# print(df)
# desired = pd.DataFrame({'height': [60, 60, 60, 70, 70, 70],
# 'weight': [100, 140, 180, 100, 140, 180]})
# print(desired)
# try:
# assert_frame_equal(df, desired, check_names=False)
# return True
# except AssertionError:
# return False
| mit |
rahul-c1/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
achim1/pyevsel | HErmes/selection/variables.py | 2 | 26753 | """
Container classes for variables
"""
import numpy as np
import os
import pandas as pd
import tables
import abc
import enum
import array
import numbers
from ..utils import files as f
from ..utils import Logger
from copy import deepcopy as copy
from collections import Sequence
from itertools import chain, count
DEFAULT_BINS = 70
REGISTERED_FILEEXTENSIONS = [".h5"]
try:
import uproot as ur
import uproot_methods.classes.TVector3 as TVector3
import uproot_methods.classes.TLorentzVector as TLorentzVector
import uproot_methods.classes.TH1
REGISTERED_FILEEXTENSIONS.append(".root")
except ImportError:
Logger.warning("No uproot found, root support is limited!")
# helper
def _depth(seq):
"""
Infer out the depth of a nested sequence.
"""
for level in count(1):
#print (seq)
if not hasattr(seq,"__iter__"):
return level
else:
if not hasattr(seq[0],"__iter__"):
return level + 1
else:
if len(seq[0]) == 0:
return level + 1
seq = seq[0]
#seq = list(chain.from_iterable(s for s in seq if isinstance(s, Sequence)))
if level > 5:
raise ValueError("This data has a nesting level > 5. This seems at the edge of being useful")
################################################################
# define a non-member function so that it can be used in a
# multiprocessing approach
def extract_from_root(filename, definitions,
nevents=None,
dtype=np.float64,
transform = None,
reduce_dimension=None):
"""
Use the uproot system to get information from rootfiles. Supports a basic tree of
primitive datatype like structure.
Args:
filename (str): datafile
definitiions (list): tree and branch adresses
Keyword Args:
nevents (int): number of events to read out
reduce_dimension (int): If data is vector-type, reduce it by taking the n-th element
dtype (np.dtyoe): A numpy datatype, default double (np.float64) - use smaller dtypes to
save memory
transform (func): A function which directy transforms the readout data
"""
can_be_concatted = False
rootfile = ur.open(filename)
success = False
i=0
branch = None
# it will most likely work only with TTrees
while not success:
try:
tree = rootfile.get(definitions[i][0])
branch = tree
for address in definitions[i][1:]:
Logger.debug("Searching for address {}".format(address))
branch = branch.get(address)
#tree = file[definitions[i][0]]
#branch = rootfile[definitions[i][0]].get(definitions[i][1])
success = True
except KeyError as e:
Logger.warning(f"Can not find address {definitions[i]}")
i+=1
except IndexError:
Logger.critical(f"None of the provided keys could be found {definitions}")
break
Logger.debug(f"Found valid definitions {definitions[i]}")
##FiXME make logger.critical end program!
if nevents is not None:
data = branch.array(entrystop=nevents)
else:
data = branch.array()
# check for dimensionality
multidim = False
try:
len(data[0])
multidim = True
except TypeError:
Logger.debug(f"Assuming scalar data {definitions[i]}")
if multidim:
Logger.debug("Inspecting data...")
tmp_lengths = set([len(k) for k in data])
Logger.debug("Found {}".format(tmp_lengths))
firstlen = list(tmp_lengths)[0]
if (len(tmp_lengths) == 1) and (firstlen == 1):
multidim = False
Logger.debug("Found data containing iterables of size 1... flattening!")
del tmp_lengths
if dtype != np.float64:
tmpdata = array.array("f",[])
else:
tmpdata = array.array("d",[])
if isinstance(data[0][0], numbers.Number):
[tmpdata.append(dtype(k)) for k in data]
#tmpdata = np.asarray([k[0] for k in data])
#multidim = True
data = tmpdata
del tmpdata
else:
Logger.info("Is multidim data")
multidim = True
else:
del tmp_lengths
multidim = True
Logger.debug("Assuming array data {}".format(definitions[i]))
if reduce_dimension is not None:
if not multidim:
raise ValueError("Can not reduce scalar data!")
if isinstance(reduce_dimension, int):
data = np.array([k[reduce_dimension] for k in data], dtype=dtype)
multidim = False
else:
data = [[k[reduce_dimension[1]] for k in j] for j in data]
if multidim:
Logger.debug("Grabbing multidimensional data from root-tree for {}".format(definitions[i]))
del data
if nevents is None:
data = branch.array() #this will be a jagged array now!
else:
data = branch.array(entrystop=nevents)
del branch
if (len(data[0])):
if isinstance(data[0][0], TVector3.TVector3):
Logger.info("Found TVector3 data, treating appropriatly")
data = pd.Series([np.array([i.x,i.y,i.z], dtype=dtype) for i in data])
if isinstance(data[0][0], TLorentzVector.TLorentzVector):
Logger.info("Found TLorentzVector data, treating appropriatly")
data = pd.Series([np.array([i.x,i.y,i.z,i.t], dtype=dtype) for i in data])
else: # probably number then
data = pd.Series([np.asarray(i,dtype=dtype) for i in data])
# the below might not be picklable (multiprocessing!)
#tmpdata = [i for i in data]
# FIXME: dataframe/series
# try to convert this to a pandas dataframe
#data = pd.DataFrame(tmpdata)
can_be_concatted = True
data.multidim = True
else:
Logger.debug("Grabbing scalar data from root-tree for {}".format(definitions[i]))
# convert in cases of TVector3/TLorentzVector
if isinstance(data[0], TVector3.TVector3):
Logger.debug("Found TVector3")
data = pd.Series([np.array([i.x,i.y,i.z], dtype=dtype) for i in data])
elif isinstance(data[0], TLorentzVector.TLorentzVector):
Logger.debug("Found TLorentzVector")
data = pd.Series([np.array([i.x,i.y,i.z, i.t], dtype=dtype) for i in data])
else:
try:
#FIXME: why is that asarray needed?
#data = pd.Series(np.asarray(data,dtype=dtype))
data = pd.Series(data,dtype=dtype)
except TypeError: # data consist of some object
data = pd.Series(data)
Logger.debug("Got {} elements for {}".format(len(data), definitions[i]))
can_be_concatted = True
if transform is not None:
data = transform(data)
return data, can_be_concatted
################################################################
# define a non-member function so that it can be used in a
# multiprocessing approach
def harvest(filenames, definitions, **kwargs):
"""
Read variables from files into memory. Will be used by HErmes.selection.variables.Variable.harvest
This will be run multi-threaded. Keep that in mind, arguments have to be picklable,
also everything thing which is read out must be picklable. Lambda functions are NOT picklable
Args:
filenames (list): the files to extract the variables from.
currently supported: hdf
definitions (list): where to find the data in the files. They usually
have some tree-like structure, so this a list
of leaf-value pairs. If there is more than one
all of them will be tried. (As it might be that
in some files a different naming scheme was used)
Example: [("hello_reoncstruction", "x"), ("hello_reoncstruction", "y")] ]
Keyword Args:
transformation (func): After the data is read out from the files,
transformation will be applied, e.g. the log
to the energy.
fill_empty (bool): Fill empty fields with zeros
nevents (int): ROOT only - read out only nevents from the files
reduce_dimension (str): ROOT only - multidimensional data can be reduced by only
using the index given by reduce_dimension.
E.g. in case of a TVector3, and we want to have onlz
x, that would be 0, y -> 1 and z -> 2.
dtype (np.dtype) : datatype to cast to (default np.float64, but can be used
to reduce memory footprint.
Returns:
pd.Series or pd.DataFrame
"""
nevents = kwargs["nevents"] if "nevents" in kwargs else None
fill_empty = kwargs["fill_empty"] if "fill_empty" in kwargs else False
reduce_dimension = kwargs["reduce_dimension"] if "reduce_dimension" in kwargs else None
transform = kwargs["transformation"] if "transformation" in kwargs else None
dtype = kwargs["dtype"] if "dtype" in kwargs else np.float64
concattable = True
data = pd.Series(dtype=dtype)
#multidim_data = pd.DataFrame()
for filename in filenames:
filetype = f.strip_all_endings(filename)[1]
assert filetype in REGISTERED_FILEEXTENSIONS, "Filetype {} not known!".format(filetype)
assert os.path.exists(filename), "File {} does not exist!".format(filetype)
if (filetype == ".h5") and (transform is not None):
Logger.critical("Can not apply direct transformation for h5 files (yet). This is only important for root files and varaibles which are used as VariableRole.PARAMETER")
Logger.debug("Attempting to harvest {1} file {0}".format(filename,filetype))
if filetype == ".h5" and not isinstance(filename, tables.table.Table):
# store = pd.HDFStore(filename)
hdftable = tables.open_file(filename)
else:
hdftable = filename
tmpdata = pd.Series(dtype=dtype)
for definition in definitions:
definition = list(definition)
if filetype == ".h5":
if not definition[0].startswith("/"):
definition[0] = "/" + definition[0]
try:
# data = store.select_column(*definition)
if not definition[1]:
tmpdata = hdftable.get_node(definition[0])
else:
tmpdata = hdftable.get_node(definition[0]).col(definition[1])
if tmpdata.ndim == 2:
if data.empty:
data = pd.DataFrame()
tmpdata = pd.DataFrame(tmpdata, dtype=dtype)
else:
tmpdata = pd.Series(tmpdata, dtype=dtype)
Logger.debug("Found {} entries in table for {}{}".format(len(tmpdata),definition[0],definition[1]))
break
except tables.NoSuchNodeError:
Logger.debug("Can not find definition {0} in {1}! ".format(definition, filename))
continue
elif filetype == ".root":
tmpdata, concattable = extract_from_root(filename, definitions,
nevents=nevents,
dtype=dtype,
transform=transform,
reduce_dimension=reduce_dimension)
if filetype == ".h5":
hdftable.close()
#tmpdata = harvest_single_file(filename, filetype,definitions)
# self.data = self.data.append(data.map(self.transform))
# concat should be much faster
if not True in [isinstance(tmpdata, k) for k in [pd.Series, pd.DataFrame] ]:
concattable = False
if not concattable:
Logger.warning(f"Data {definitions} can not be concatted, keep that in mind!")
try:
tmpdata = pd.Series(tmpdata)
#return tmpdata
except:
tmpdata = [k for k in tmpdata]
tmpdata = pd.Series(tmpdata)
#return tmpdata
data = pd.concat([data, tmpdata])
del tmpdata
return data
################################################################
def freedman_diaconis_bins(data,leftedge,\
rightedge,minbins=20,\
maxbins=70,fallbackbins=DEFAULT_BINS):
"""
Get a number of bins for a histogram
following Freedman/Diaconis
Args:
leftedge (float): left bin edge
rightedge (float): right bin edge
minbins (int): the minimum number of bins
maxbins (int): the maximum number of bins
fallbackbins (int): a number of bins which is returned
if calculation failse
Returns:
nbins (int): number of bins, minbins < bins < maxbins
"""
try:
finite_data = np.isfinite(data)
q3 = np.percentile(data[finite_data],75)
q1 = np.percentile(data[finite_data],25)
n_data = len(data)
if q3 == q1:
Logger.warning("Can not calculate bins, falling back... to min max approach")
q3 = max(finite_data)
q1 = min(finite_data)
h = (2*(q3-q1))/(n_data**1./3)
bins = (rightedge - leftedge)/h
if not np.isfinite(bins):
Logger.info(f"Got some nan somewhere: q1 : {q1}, q3 : {q3}, n_data : {n_data}, h : {h}")
Logger.warning("Calculate Freedman-Draconis bins failed, calculated nan bins, returning fallback")
bins = fallbackbins
if bins < minbins:
bins = minbins
if bins > maxbins:
bins = maxbins
except Exception as e:
Logger.warning(f"Calculate Freedman-Draconis bins failed {e}")
bins = fallbackbins
return bins
#############################################################
class VariableRole(enum.Enum):
"""
Define roles for variables. Some variables used in a special context (like weights)
are easily recognizable by this flag.
"""
UNKNOWN = 0
SCALAR = 10
ARRAY = 20
GENERATORWEIGHT = 30
RUNID = 40
EVENTID = 50
STARTIME = 60
ENDTIME = 70
FLUXWEIGHT = 80
PARAMETER = 90 # a single parameter, no array whatsoever
##############################################################
class AbstractBaseVariable(metaclass=abc.ABCMeta):
"""
Read out tagged numerical data from files
"""
_harvested = False
_bins = None
ROLES = VariableRole
def __hash__(self):
return hash(self.name)
def __repr__(self):
return """<Variable: {}>""".format(self.name)
def __eq__(self,other):
return self.name == other.name
def __lt__(self, other):
return sorted([self.name,other.name])[0] == self.name
def __gt__(self, other):
return sorted([self.name,other.name])[1] == self.name
def __le__(self, other):
return self < other or self == other
def __ge__(self, other):
return self > other or self == other
def declare_harvested(self):
self._harvested = True
@property
def harvested(self):
return self._harvested
@property
def bins(self):
if self._bins is None:
return self.calculate_fd_bins()
else:
return self._bins
@bins.setter
def bins(self, value):
self._bins = value
def calculate_fd_bins(self, cutmask=None):
"""
Calculate a reasonable binning
Keyword Args:
cutmask (numpy.ndarray) : a boolean mask to cut on, in case
cuts have been applied to the
category this data is part of
Returns:
numpy.ndarray: Freedman Diaconis bins
"""
tmpdata = self.data
if cutmask is not None:
if len(cutmask) > 0:
tmpdata = tmpdata[cutmask]
try:
min(tmpdata)
except Exception as e:
Logger.warning(f"Can not infere minimum of {tmpdata}. Fall back to DEFAULT_BINS. This is a bug!")
return DEFAULT_BINS
nbins = freedman_diaconis_bins(tmpdata, min(tmpdata), max(tmpdata))
bins = np.linspace(min(tmpdata),max(tmpdata), nbins)
return bins
def harvest(self, *files):
"""
Hook to the harvest method. Don't use in case of multiprocessing!
Args:
*files: walk through these files and readout
"""
if self.role == VariableRole.PARAMETER:
self._data = harvest(files, self.definitions, transformation= self.transform)
self._data = self._data[0]
else:
self._data = harvest(files, self.definitions)
self.declare_harvested()
@abc.abstractmethod
def rewire_variables(self, vardict):
return
@property
def ndim(self):
"""
Infer the nesting depth of the data
"""
if hasattr(self._data, "multidim"):
if self._data.multidim == True:
return 2
if self._data.ndim == 1:
# check again
level = _depth(self._data)
if level != self._data.ndim:
Logger.warning(f"Discrepancy in dimensionality found {level} VS {self._data.ndmin}")
return level
return self._data.ndim
@property
def data(self):
if isinstance(self._data, pd.DataFrame):
#return self._data.as_matrix()
#FIXME: as_matrix is depracted in favor of values
return self._data.values
if not hasattr(self._data, "shape"):
Logger.warning("Something's wrong, this should be array data!")
Logger.warning(f"Seeeing {type(self._data)} data")
Logger.warning("Attempting to fix!")
self._data = np.asarray(self._data)
return self._data
############################################
class Variable(AbstractBaseVariable):
"""
A hook to a single variable read out from a file
"""
def __init__(self, name, definitions=None,\
bins=None, label="", transform=None,
role=VariableRole.SCALAR,
nevents=None,
reduce_dimension=None):
"""
Args:
name (str) : An unique identifier
Keyword Args:
definitions (list) : table and/or column names in underlying data
bins (numpy.ndarray) : used for histograms
label (str) : used for plotting and as a label in tables
transform (func) : apply to each member of the underlying data at readout
role (HErmes.selection.variables.VariableRole) : The role the variable is playing.
In most cases the default is the best choice
nevents (int) : number of events to read in (ROOT only right now!)
reduce_dimension (int) : in case of multidimensionality,
take only the the given index of the array (ROOT only right now)
"""
AbstractBaseVariable.__init__(self)
if definitions is not None:
#assert not (False in [len(x) <= 2 for x in definitions]), "Can not understand variable definitions {}!".format(definitions)
self.defsize = len(definitions[0])
#FIXME : not sure how important this is right now
#assert not (False in [len(x) == self.defsize for x in definitions]), "All definitions must have the same length!"
else:
self.defsize = 0
self.name = name
self.role = role
self.bins = bins # when histogrammed
self.label = label
self.transform = transform
self.definitions = definitions
self._data = pd.Series(dtype=np.float64)
self.nevents = nevents
self.reduce_dimension = reduce_dimension
self._role = role
#if self.defsize == 1:
# self.data = pd.DataFrame()
#if self.defsize == 2:
# self.data = pd.Series()
def rewire_variables(self, vardict):
"""
Make sure all the variables are connected properly. This is
only needed for combined/compound variables
Returns:
None
"""
pass
##########################################################
class CompoundVariable(AbstractBaseVariable):
"""
Calculate a variable from other variables. This kind of variable will not read any file.
"""
def __init__(self, name, variables=None, label="",\
bins=None, operation=lambda x,y : x + y,
role=VariableRole.SCALAR,
dtype=np.float64):
"""
A compound variable is a variable which is created from two or more other variables. This variable does not have
a direct representation in a file, but gets calculated on the fly instead, e.g. a residual of two other variables
The 'operation' denoted function here defines what operator should be applied to the variables to create the new
coumpound variable
Args:
name (str) : An unique identifier for the new variable.
Keyword Args:
variables (list) : A list of variables used to calculate the new variable.
label (str) : A label for plotting.
bins (np.ndarray) : binning for distributions.
operation (fnc) : The operation which will be applied to variables.
role (HErmes.selection.variables.VariableRole) : The role the variable is playing.
In most cases the default is the best choice. Assigning roles
to variables allows for special magic, e.g. in the case
of weighting
"""
AbstractBaseVariable.__init__(self)
self.name = name
self.role = role
self.label = label
self.bins = bins
if variables is None:
variables = []
self.variables = variables
self.operation = operation
self._data = pd.Series(dtype=np.float64) #dtype to suppress warning
self.definitions = ((self.__repr__()),)
def rewire_variables(self, vardict):
"""
Use to avoid the necessity to read out variables twice
as the variables are copied over by the categories,
the refernce is lost. Can be rewired though
"""
newvars = []
for var in self.variables:
newvars.append(vardict[var.name])
self.variables = newvars
def __repr__(self):
return """<CompoundVariable {} created from: {}>""".format(self.name,"".join([x.name for x in self.variables ]))
def harvest(self, *filenames):
#FIXME: filenames is not used, just
#there for compatibility
if self.harvested:
return
harvested = [var for var in self.variables if var.harvested]
if not len(harvested) == len(self.variables):
Logger.error("Variables have to be harvested for compound variable {0} first!".format(self.variables))
Logger.error("Only {} is harvested".format(harvested))
return
self._data = self.operation(*[var.data for var in self.variables])
self.declare_harvested()
##########################################################
class VariableList(AbstractBaseVariable):
"""
A list of variable. Can not be read out from files.
"""
def __init__(self, name, variables=None, label="", bins=None, role=VariableRole.SCALAR):
"""
Args:
name (str): An unique identifier for the new category.
Keyword Args:
variables (list): A list of variables used to calculate the new variable.
label (str): A label for plotting.
bins (np.ndarray): binning for distributions.
role (HErmes.selection.variables.VariableRole): The role the variable is playing. In most cases the default is the best choice
"""
AbstractBaseVariable.__init__(self)
self.name = name
self.label = label
self.bins = bins
if variables is None:
variables = []
self.variables = variables
def harvest(self, *filenames):
#FIXME: filenames is not used, just
#there for compatibility
# do not calculate weights yet!
if self.harvested:
return
harvested = [var for var in self.variables if var.harvested]
if not len(harvested) == len(self.variables):
Logger.error("Variables have to be harvested for compound variable {} first!".format(self.name))
return
self.declare_harvested()
def rewire_variables(self, vardict):
"""
Use to avoid the necessity to read out variables twice
as the variables are copied over by the categories,
the refernce is lost. Can be rewired though
"""
newvars = []
for var in self.variables:
newvars.append(vardict[var.name])
self.variables = newvars
@property
def data(self):
return [x.data for x in self.variables]
| gpl-2.0 |
ibis-project/ibis | ibis/backends/hdf5/__init__.py | 1 | 2068 | import pandas as pd
import ibis.expr.operations as ops
import ibis.expr.schema as sch
from ibis.backends.base import BaseBackend
from ibis.backends.base.file import FileClient
from ibis.backends.pandas.core import execute, execute_node
class HDFTable(ops.DatabaseTable):
pass
class HDFClient(FileClient):
def insert(
self, path, key, expr, format='table', data_columns=True, **kwargs
):
path = self.root / path
data = execute(expr)
data.to_hdf(
str(path), key, format=format, data_columns=data_columns, **kwargs
)
def table(self, name, path):
if name not in self.list_tables(path):
raise AttributeError(name)
# get the schema
with pd.HDFStore(str(path), mode='r') as store:
df = store.select(name, start=0, stop=0)
schema = sch.infer(df)
t = self.table_class(name, schema, self).to_expr()
self.dictionary[name] = path
return t
def list_tables(self, path=None):
# tables are individual tables within a file
if path is None:
path = self.root
if path.is_file() and str(path).endswith(self.extension):
with pd.HDFStore(str(path), mode='r') as store:
# strip leading /
return [k[1:] for k in store.keys()]
return []
def list_databases(self, path=None):
return self._list_databases_dirs_or_files(path)
class Backend(BaseBackend):
name = 'hdf5'
kind = 'pandas'
extension = 'h5'
table_class = HDFTable
def connect(self, path):
"""Create a HDF5Client for use with Ibis
Parameters
----------
path: str or pathlib.Path
Returns
-------
HDF5Client
"""
return HDFClient(backend=self, root=path)
@execute_node.register(Backend.table_class, HDFClient)
def hdf_read_table(op, client, scope, **kwargs):
key = op.name
path = client.dictionary[key]
df = pd.read_hdf(str(path), key, mode='r')
return df
| apache-2.0 |
bnaul/scikit-learn | sklearn/inspection/tests/test_partial_dependence.py | 4 | 28045 | """
Testing for the partial dependence module.
"""
import numpy as np
import pytest
import sklearn
from sklearn.inspection import partial_dependence
from sklearn.inspection._partial_dependence import (
_grid_from_X,
_partial_dependence_brute,
_partial_dependence_recursion
)
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import MultiTaskLasso
from sklearn.tree import DecisionTreeRegressor
from sklearn.datasets import load_iris
from sklearn.datasets import make_classification, make_regression
from sklearn.cluster import KMeans
from sklearn.compose import make_column_transformer
from sklearn.metrics import r2_score
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.pipeline import make_pipeline
from sklearn.dummy import DummyClassifier
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.exceptions import NotFittedError
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_array_equal
from sklearn.utils import _IS_32BIT
from sklearn.utils.validation import check_random_state
from sklearn.tree.tests.test_tree import assert_is_subtree
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
# (X, y), n_targets <-- as expected in the output of partial_dep()
binary_classification_data = (make_classification(n_samples=50,
random_state=0), 1)
multiclass_classification_data = (make_classification(n_samples=50,
n_classes=3,
n_clusters_per_class=1,
random_state=0), 3)
regression_data = (make_regression(n_samples=50, random_state=0), 1)
multioutput_regression_data = (make_regression(n_samples=50, n_targets=2,
random_state=0), 2)
# iris
iris = load_iris()
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
@pytest.mark.parametrize('Estimator, method, data', [
(GradientBoostingClassifier, 'auto', binary_classification_data),
(GradientBoostingClassifier, 'auto', multiclass_classification_data),
(GradientBoostingClassifier, 'brute', binary_classification_data),
(GradientBoostingClassifier, 'brute', multiclass_classification_data),
(GradientBoostingRegressor, 'auto', regression_data),
(GradientBoostingRegressor, 'brute', regression_data),
(DecisionTreeRegressor, 'brute', regression_data),
(LinearRegression, 'brute', regression_data),
(LinearRegression, 'brute', multioutput_regression_data),
(LogisticRegression, 'brute', binary_classification_data),
(LogisticRegression, 'brute', multiclass_classification_data),
(MultiTaskLasso, 'brute', multioutput_regression_data),
])
@pytest.mark.parametrize('grid_resolution', (5, 10))
@pytest.mark.parametrize('features', ([1], [1, 2]))
@pytest.mark.parametrize('kind', ('legacy', 'average', 'individual', 'both'))
def test_output_shape(Estimator, method, data, grid_resolution,
features, kind):
# Check that partial_dependence has consistent output shape for different
# kinds of estimators:
# - classifiers with binary and multiclass settings
# - regressors
# - multi-task regressors
est = Estimator()
# n_target corresponds to the number of classes (1 for binary classif) or
# the number of tasks / outputs in multi task settings. It's equal to 1 for
# classical regression_data.
(X, y), n_targets = data
n_instances = X.shape[0]
est.fit(X, y)
result = partial_dependence(
est, X=X, features=features, method=method, kind=kind,
grid_resolution=grid_resolution
)
# FIXME: to be removed in 0.24
pdp, axes = result if kind == 'legacy' else (result, result["values"])
expected_pdp_shape = (n_targets,
*[grid_resolution for _ in range(len(features))])
expected_ice_shape = (n_targets, n_instances,
*[grid_resolution for _ in range(len(features))])
if kind == 'legacy':
assert pdp.shape == expected_pdp_shape
elif kind == 'average':
assert pdp.average.shape == expected_pdp_shape
elif kind == 'individual':
assert pdp.individual.shape == expected_ice_shape
else: # 'both'
assert pdp.average.shape == expected_pdp_shape
assert pdp.individual.shape == expected_ice_shape
expected_axes_shape = (len(features), grid_resolution)
assert axes is not None
assert np.asarray(axes).shape == expected_axes_shape
def test_grid_from_X():
# tests for _grid_from_X: sanity check for output, and for shapes.
# Make sure that the grid is a cartesian product of the input (it will use
# the unique values instead of the percentiles)
percentiles = (.05, .95)
grid_resolution = 100
X = np.asarray([[1, 2],
[3, 4]])
grid, axes = _grid_from_X(X, percentiles, grid_resolution)
assert_array_equal(grid, [[1, 2],
[1, 4],
[3, 2],
[3, 4]])
assert_array_equal(axes, X.T)
# test shapes of returned objects depending on the number of unique values
# for a feature.
rng = np.random.RandomState(0)
grid_resolution = 15
# n_unique_values > grid_resolution
X = rng.normal(size=(20, 2))
grid, axes = _grid_from_X(X, percentiles, grid_resolution=grid_resolution)
assert grid.shape == (grid_resolution * grid_resolution, X.shape[1])
assert np.asarray(axes).shape == (2, grid_resolution)
# n_unique_values < grid_resolution, will use actual values
n_unique_values = 12
X[n_unique_values - 1:, 0] = 12345
rng.shuffle(X) # just to make sure the order is irrelevant
grid, axes = _grid_from_X(X, percentiles, grid_resolution=grid_resolution)
assert grid.shape == (n_unique_values * grid_resolution, X.shape[1])
# axes is a list of arrays of different shapes
assert axes[0].shape == (n_unique_values,)
assert axes[1].shape == (grid_resolution,)
@pytest.mark.parametrize(
"grid_resolution, percentiles, err_msg",
[(2, (0, 0.0001), "percentiles are too close"),
(100, (1, 2, 3, 4), "'percentiles' must be a sequence of 2 elements"),
(100, 12345, "'percentiles' must be a sequence of 2 elements"),
(100, (-1, .95), r"'percentiles' values must be in \[0, 1\]"),
(100, (.05, 2), r"'percentiles' values must be in \[0, 1\]"),
(100, (.9, .1), r"percentiles\[0\] must be strictly less than"),
(1, (0.05, 0.95), "'grid_resolution' must be strictly greater than 1")]
)
def test_grid_from_X_error(grid_resolution, percentiles, err_msg):
X = np.asarray([[1, 2], [3, 4]])
with pytest.raises(ValueError, match=err_msg):
_grid_from_X(
X, grid_resolution=grid_resolution, percentiles=percentiles
)
@pytest.mark.parametrize('target_feature', range(5))
@pytest.mark.parametrize('est, method', [
(LinearRegression(), 'brute'),
(GradientBoostingRegressor(random_state=0), 'brute'),
(GradientBoostingRegressor(random_state=0), 'recursion'),
(HistGradientBoostingRegressor(random_state=0), 'brute'),
(HistGradientBoostingRegressor(random_state=0), 'recursion')]
)
def test_partial_dependence_helpers(est, method, target_feature):
# Check that what is returned by _partial_dependence_brute or
# _partial_dependence_recursion is equivalent to manually setting a target
# feature to a given value, and computing the average prediction over all
# samples.
# This also checks that the brute and recursion methods give the same
# output.
# Note that even on the trainset, the brute and the recursion methods
# aren't always strictly equivalent, in particular when the slow method
# generates unrealistic samples that have low mass in the joint
# distribution of the input features, and when some of the features are
# dependent. Hence the high tolerance on the checks.
X, y = make_regression(random_state=0, n_features=5, n_informative=5)
# The 'init' estimator for GBDT (here the average prediction) isn't taken
# into account with the recursion method, for technical reasons. We set
# the mean to 0 to that this 'bug' doesn't have any effect.
y = y - y.mean()
est.fit(X, y)
# target feature will be set to .5 and then to 123
features = np.array([target_feature], dtype=np.int32)
grid = np.array([[.5],
[123]])
if method == 'brute':
pdp, predictions = _partial_dependence_brute(est, grid, features, X,
response_method='auto')
else:
pdp = _partial_dependence_recursion(est, grid, features)
mean_predictions = []
for val in (.5, 123):
X_ = X.copy()
X_[:, target_feature] = val
mean_predictions.append(est.predict(X_).mean())
pdp = pdp[0] # (shape is (1, 2) so make it (2,))
# allow for greater margin for error with recursion method
rtol = 1e-1 if method == 'recursion' else 1e-3
assert np.allclose(pdp, mean_predictions, rtol=rtol)
@pytest.mark.parametrize('seed', range(1))
def test_recursion_decision_tree_vs_forest_and_gbdt(seed):
# Make sure that the recursion method gives the same results on a
# DecisionTreeRegressor and a GradientBoostingRegressor or a
# RandomForestRegressor with 1 tree and equivalent parameters.
rng = np.random.RandomState(seed)
# Purely random dataset to avoid correlated features
n_samples = 1000
n_features = 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples) * 10
# The 'init' estimator for GBDT (here the average prediction) isn't taken
# into account with the recursion method, for technical reasons. We set
# the mean to 0 to that this 'bug' doesn't have any effect.
y = y - y.mean()
# set max_depth not too high to avoid splits with same gain but different
# features
max_depth = 5
tree_seed = 0
forest = RandomForestRegressor(n_estimators=1, max_features=None,
bootstrap=False, max_depth=max_depth,
random_state=tree_seed)
# The forest will use ensemble.base._set_random_states to set the
# random_state of the tree sub-estimator. We simulate this here to have
# equivalent estimators.
equiv_random_state = check_random_state(tree_seed).randint(
np.iinfo(np.int32).max)
gbdt = GradientBoostingRegressor(n_estimators=1, learning_rate=1,
criterion='mse', max_depth=max_depth,
random_state=equiv_random_state)
tree = DecisionTreeRegressor(max_depth=max_depth,
random_state=equiv_random_state)
forest.fit(X, y)
gbdt.fit(X, y)
tree.fit(X, y)
# sanity check: if the trees aren't the same, the PD values won't be equal
try:
assert_is_subtree(tree.tree_, gbdt[0, 0].tree_)
assert_is_subtree(tree.tree_, forest[0].tree_)
except AssertionError:
# For some reason the trees aren't exactly equal on 32bits, so the PDs
# cannot be equal either. See
# https://github.com/scikit-learn/scikit-learn/issues/8853
assert _IS_32BIT, "this should only fail on 32 bit platforms"
return
grid = rng.randn(50).reshape(-1, 1)
for f in range(n_features):
features = np.array([f], dtype=np.int32)
pdp_forest = _partial_dependence_recursion(forest, grid, features)
pdp_gbdt = _partial_dependence_recursion(gbdt, grid, features)
pdp_tree = _partial_dependence_recursion(tree, grid, features)
np.testing.assert_allclose(pdp_gbdt, pdp_tree)
np.testing.assert_allclose(pdp_forest, pdp_tree)
@pytest.mark.parametrize('est', (
GradientBoostingClassifier(random_state=0),
HistGradientBoostingClassifier(random_state=0),
))
@pytest.mark.parametrize('target_feature', (0, 1, 2, 3, 4, 5))
def test_recursion_decision_function(est, target_feature):
# Make sure the recursion method (implicitly uses decision_function) has
# the same result as using brute method with
# response_method=decision_function
X, y = make_classification(n_classes=2, n_clusters_per_class=1,
random_state=1)
assert np.mean(y) == .5 # make sure the init estimator predicts 0 anyway
est.fit(X, y)
preds_1 = partial_dependence(
est, X, [target_feature], response_method='decision_function',
method='recursion', kind='average'
)
preds_2 = partial_dependence(
est, X, [target_feature], response_method='decision_function',
method='brute', kind='average'
)
assert_allclose(preds_1['average'], preds_2['average'], atol=1e-7)
@pytest.mark.parametrize('est', (
LinearRegression(),
GradientBoostingRegressor(random_state=0),
HistGradientBoostingRegressor(random_state=0, min_samples_leaf=1,
max_leaf_nodes=None, max_iter=1),
DecisionTreeRegressor(random_state=0),
))
@pytest.mark.parametrize('power', (1, 2))
def test_partial_dependence_easy_target(est, power):
# If the target y only depends on one feature in an obvious way (linear or
# quadratic) then the partial dependence for that feature should reflect
# it.
# We here fit a linear regression_data model (with polynomial features if
# needed) and compute r_squared to check that the partial dependence
# correctly reflects the target.
rng = np.random.RandomState(0)
n_samples = 200
target_variable = 2
X = rng.normal(size=(n_samples, 5))
y = X[:, target_variable]**power
est.fit(X, y)
pdp = partial_dependence(
est, features=[target_variable], X=X, grid_resolution=1000,
kind='average'
)
new_X = pdp["values"][0].reshape(-1, 1)
new_y = pdp['average'][0]
# add polynomial features if needed
new_X = PolynomialFeatures(degree=power).fit_transform(new_X)
lr = LinearRegression().fit(new_X, new_y)
r2 = r2_score(new_y, lr.predict(new_X))
assert r2 > .99
@pytest.mark.parametrize('Estimator',
(sklearn.tree.DecisionTreeClassifier,
sklearn.tree.ExtraTreeClassifier,
sklearn.ensemble.ExtraTreesClassifier,
sklearn.neighbors.KNeighborsClassifier,
sklearn.neighbors.RadiusNeighborsClassifier,
sklearn.ensemble.RandomForestClassifier))
def test_multiclass_multioutput(Estimator):
# Make sure error is raised for multiclass-multioutput classifiers
# make multiclass-multioutput dataset
X, y = make_classification(n_classes=3, n_clusters_per_class=1,
random_state=0)
y = np.array([y, y]).T
est = Estimator()
est.fit(X, y)
with pytest.raises(
ValueError,
match="Multiclass-multioutput estimators are not supported"):
partial_dependence(est, X, [0])
class NoPredictProbaNoDecisionFunction(ClassifierMixin, BaseEstimator):
def fit(self, X, y):
# simulate that we have some classes
self.classes_ = [0, 1]
return self
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
@pytest.mark.parametrize(
"estimator, params, err_msg",
[(KMeans(),
{'features': [0]},
"'estimator' must be a fitted regressor or classifier"),
(LinearRegression(),
{'features': [0], 'response_method': 'predict_proba'},
'The response_method parameter is ignored for regressors'),
(GradientBoostingClassifier(random_state=0),
{'features': [0], 'response_method': 'predict_proba',
'method': 'recursion'},
"'recursion' method, the response_method must be 'decision_function'"),
(GradientBoostingClassifier(random_state=0),
{'features': [0], 'response_method': 'predict_proba', 'method': 'auto'},
"'recursion' method, the response_method must be 'decision_function'"),
(GradientBoostingClassifier(random_state=0),
{'features': [0], 'response_method': 'blahblah'},
'response_method blahblah is invalid. Accepted response_method'),
(NoPredictProbaNoDecisionFunction(),
{'features': [0], 'response_method': 'auto'},
'The estimator has no predict_proba and no decision_function method'),
(NoPredictProbaNoDecisionFunction(),
{'features': [0], 'response_method': 'predict_proba'},
'The estimator has no predict_proba method.'),
(NoPredictProbaNoDecisionFunction(),
{'features': [0], 'response_method': 'decision_function'},
'The estimator has no decision_function method.'),
(LinearRegression(),
{'features': [0], 'method': 'blahblah'},
'blahblah is invalid. Accepted method names are brute, recursion, auto'),
(LinearRegression(),
{'features': [0], 'method': 'recursion', 'kind': 'individual'},
"The 'recursion' method only applies when 'kind' is set to 'average'"),
(LinearRegression(),
{'features': [0], 'method': 'recursion', 'kind': 'both'},
"The 'recursion' method only applies when 'kind' is set to 'average'"),
(LinearRegression(),
{'features': [0], 'method': 'recursion'},
"Only the following estimators support the 'recursion' method:")]
)
def test_partial_dependence_error(estimator, params, err_msg):
X, y = make_classification(random_state=0)
estimator.fit(X, y)
with pytest.raises(ValueError, match=err_msg):
partial_dependence(estimator, X, **params)
@pytest.mark.parametrize(
"with_dataframe, err_msg",
[(True, "Only array-like or scalar are supported"),
(False, "Only array-like or scalar are supported")]
)
def test_partial_dependence_slice_error(with_dataframe, err_msg):
X, y = make_classification(random_state=0)
if with_dataframe:
pd = pytest.importorskip('pandas')
X = pd.DataFrame(X)
estimator = LogisticRegression().fit(X, y)
with pytest.raises(TypeError, match=err_msg):
partial_dependence(estimator, X, features=slice(0, 2, 1))
@pytest.mark.parametrize(
'estimator',
[LinearRegression(), GradientBoostingClassifier(random_state=0)]
)
@pytest.mark.parametrize('features', [-1, 10000])
def test_partial_dependence_unknown_feature_indices(estimator, features):
X, y = make_classification(random_state=0)
estimator.fit(X, y)
err_msg = 'all features must be in'
with pytest.raises(ValueError, match=err_msg):
partial_dependence(estimator, X, [features])
@pytest.mark.parametrize(
'estimator',
[LinearRegression(), GradientBoostingClassifier(random_state=0)]
)
def test_partial_dependence_unknown_feature_string(estimator):
pd = pytest.importorskip("pandas")
X, y = make_classification(random_state=0)
df = pd.DataFrame(X)
estimator.fit(df, y)
features = ['random']
err_msg = 'A given column is not a column of the dataframe'
with pytest.raises(ValueError, match=err_msg):
partial_dependence(estimator, df, features)
@pytest.mark.parametrize(
'estimator',
[LinearRegression(), GradientBoostingClassifier(random_state=0)]
)
def test_partial_dependence_X_list(estimator):
# check that array-like objects are accepted
X, y = make_classification(random_state=0)
estimator.fit(X, y)
partial_dependence(estimator, list(X), [0], kind='average')
def test_warning_recursion_non_constant_init():
# make sure that passing a non-constant init parameter to a GBDT and using
# recursion method yields a warning.
gbc = GradientBoostingClassifier(init=DummyClassifier(), random_state=0)
gbc.fit(X, y)
with pytest.warns(
UserWarning,
match='Using recursion method with a non-constant init predictor'):
partial_dependence(gbc, X, [0], method='recursion', kind='average')
with pytest.warns(
UserWarning,
match='Using recursion method with a non-constant init predictor'):
partial_dependence(gbc, X, [0], method='recursion', kind='average')
def test_partial_dependence_sample_weight():
# Test near perfect correlation between partial dependence and diagonal
# when sample weights emphasize y = x predictions
# non-regression test for #13193
# TODO: extend to HistGradientBoosting once sample_weight is supported
N = 1000
rng = np.random.RandomState(123456)
mask = rng.randint(2, size=N, dtype=bool)
x = rng.rand(N)
# set y = x on mask and y = -x outside
y = x.copy()
y[~mask] = -y[~mask]
X = np.c_[mask, x]
# sample weights to emphasize data points where y = x
sample_weight = np.ones(N)
sample_weight[mask] = 1000.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(X, y, sample_weight=sample_weight)
pdp = partial_dependence(clf, X, features=[1], kind='average')
assert np.corrcoef(pdp['average'], pdp["values"])[0, 1] > 0.99
def test_hist_gbdt_sw_not_supported():
# TODO: remove/fix when PDP supports HGBT with sample weights
clf = HistGradientBoostingRegressor(random_state=1)
clf.fit(X, y, sample_weight=np.ones(len(X)))
with pytest.raises(NotImplementedError,
match="does not support partial dependence"):
partial_dependence(clf, X, features=[1])
def test_partial_dependence_pipeline():
# check that the partial dependence support pipeline
iris = load_iris()
scaler = StandardScaler()
clf = DummyClassifier(random_state=42)
pipe = make_pipeline(scaler, clf)
clf.fit(scaler.fit_transform(iris.data), iris.target)
pipe.fit(iris.data, iris.target)
features = 0
pdp_pipe = partial_dependence(
pipe, iris.data, features=[features], grid_resolution=10,
kind='average'
)
pdp_clf = partial_dependence(
clf, scaler.transform(iris.data), features=[features],
grid_resolution=10, kind='average'
)
assert_allclose(pdp_pipe['average'], pdp_clf['average'])
assert_allclose(
pdp_pipe["values"][0],
pdp_clf["values"][0] * scaler.scale_[features] + scaler.mean_[features]
)
@pytest.mark.parametrize(
"estimator",
[LogisticRegression(max_iter=1000, random_state=0),
GradientBoostingClassifier(random_state=0, n_estimators=5)],
ids=['estimator-brute', 'estimator-recursion']
)
@pytest.mark.parametrize(
"preprocessor",
[None,
make_column_transformer(
(StandardScaler(), [iris.feature_names[i] for i in (0, 2)]),
(RobustScaler(), [iris.feature_names[i] for i in (1, 3)])),
make_column_transformer(
(StandardScaler(), [iris.feature_names[i] for i in (0, 2)]),
remainder='passthrough')],
ids=['None', 'column-transformer', 'column-transformer-passthrough']
)
@pytest.mark.parametrize(
"features",
[[0, 2], [iris.feature_names[i] for i in (0, 2)]],
ids=['features-integer', 'features-string']
)
def test_partial_dependence_dataframe(estimator, preprocessor, features):
# check that the partial dependence support dataframe and pipeline
# including a column transformer
pd = pytest.importorskip("pandas")
df = pd.DataFrame(iris.data, columns=iris.feature_names)
pipe = make_pipeline(preprocessor, estimator)
pipe.fit(df, iris.target)
pdp_pipe = partial_dependence(
pipe, df, features=features, grid_resolution=10, kind='average'
)
# the column transformer will reorder the column when transforming
# we mixed the index to be sure that we are computing the partial
# dependence of the right columns
if preprocessor is not None:
X_proc = clone(preprocessor).fit_transform(df)
features_clf = [0, 1]
else:
X_proc = df
features_clf = [0, 2]
clf = clone(estimator).fit(X_proc, iris.target)
pdp_clf = partial_dependence(
clf, X_proc, features=features_clf, method='brute', grid_resolution=10,
kind='average'
)
assert_allclose(pdp_pipe['average'], pdp_clf['average'])
if preprocessor is not None:
scaler = preprocessor.named_transformers_['standardscaler']
assert_allclose(
pdp_pipe["values"][1],
pdp_clf["values"][1] * scaler.scale_[1] + scaler.mean_[1]
)
else:
assert_allclose(pdp_pipe["values"][1], pdp_clf["values"][1])
@pytest.mark.parametrize(
"features, expected_pd_shape",
[(0, (3, 10)),
(iris.feature_names[0], (3, 10)),
([0, 2], (3, 10, 10)),
([iris.feature_names[i] for i in (0, 2)], (3, 10, 10)),
([True, False, True, False], (3, 10, 10))],
ids=['scalar-int', 'scalar-str', 'list-int', 'list-str', 'mask']
)
def test_partial_dependence_feature_type(features, expected_pd_shape):
# check all possible features type supported in PDP
pd = pytest.importorskip("pandas")
df = pd.DataFrame(iris.data, columns=iris.feature_names)
preprocessor = make_column_transformer(
(StandardScaler(), [iris.feature_names[i] for i in (0, 2)]),
(RobustScaler(), [iris.feature_names[i] for i in (1, 3)])
)
pipe = make_pipeline(
preprocessor, LogisticRegression(max_iter=1000, random_state=0)
)
pipe.fit(df, iris.target)
pdp_pipe = partial_dependence(
pipe, df, features=features, grid_resolution=10, kind='average'
)
assert pdp_pipe['average'].shape == expected_pd_shape
assert len(pdp_pipe["values"]) == len(pdp_pipe['average'].shape) - 1
@pytest.mark.parametrize(
"estimator", [LinearRegression(), LogisticRegression(),
GradientBoostingRegressor(), GradientBoostingClassifier()]
)
def test_partial_dependence_unfitted(estimator):
X = iris.data
preprocessor = make_column_transformer(
(StandardScaler(), [0, 2]), (RobustScaler(), [1, 3])
)
pipe = make_pipeline(preprocessor, estimator)
with pytest.raises(NotFittedError, match="is not fitted yet"):
partial_dependence(pipe, X, features=[0, 2], grid_resolution=10)
with pytest.raises(NotFittedError, match="is not fitted yet"):
partial_dependence(estimator, X, features=[0, 2], grid_resolution=10)
@pytest.mark.parametrize('Estimator, data', [
(LinearRegression, multioutput_regression_data),
(LogisticRegression, binary_classification_data)])
def test_kind_average_and_average_of_individual(Estimator, data):
est = Estimator()
(X, y), n_targets = data
est.fit(X, y)
pdp_avg = partial_dependence(
est, X=X, features=[1, 2], kind='average'
)
pdp_ind = partial_dependence(
est, X=X, features=[1, 2], kind='individual'
)
avg_ind = np.mean(pdp_ind['individual'], axis=1)
assert_allclose(avg_ind, pdp_avg['average'])
def test_warning_for_kind_legacy():
est = LogisticRegression()
(X, y), n_targets = binary_classification_data
est.fit(X, y)
err_msg = ("A Bunch will be returned in place of 'predictions' from "
"version 0.26")
with pytest.warns(FutureWarning, match=err_msg):
partial_dependence(est, X=X, features=[1, 2])
with pytest.warns(FutureWarning, match=err_msg):
partial_dependence(est, X=X, features=[1, 2], kind='legacy')
| bsd-3-clause |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| unlicense |
prheenan/Research | Perkins/Projects/WetLab/Demos/PCR_Optimizations/2016_5_17_PCR_Optimization_ovh2.0/Main_PCR_Optimization.py | 1 | 5410 | # force floating point division. Can still use integer with //
from __future__ import division
# This file is used for importing the common utilities classes.
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append("../../../")
from Util import DilutionUtil
from PCR.Optimization.PCR_Opt_Analysis import PCR_Analyze_Objects
class Gradient:
MACHINE_TOUCHGENE = 0
MACHINE_BIORAD = 1
def __init__(self,Temperatures,ConcentrationYields,ConcentrationVolumes,
NumRepeats,Description,Machine,Date,NumVialsCombined=1):
self.Temperatures = np.array(Temperatures)
self.Concentrations = np.array(ConcentrationYields)
self.Volumes = np.array(ConcentrationVolumes)
self.Repeats = NumRepeats
self.Description = Description
self.Machine = Machine
self.Date = Date
self.NumVialsCombined = NumVialsCombined
def GetMachineName(self):
return "TouchGene" if self.Machine==Gradient.MACHINE_TOUCHGENE\
else "BioRad"
def GetYieldPer100uLTube(self):
return self.Volumes * self.Concentrations / self.NumVialsCombined
def run():
"""
Taken from notebook #2, pp 14
"""
GradientsObjs = \
[Gradient(Temperatures=[60.2,62.5,64.2,65.8],
ConcentrationYields=[76,62,40,10],
ConcentrationVolumes=35,
NumRepeats=30,
Description="Notebook#2, pp14",
Machine=Gradient.MACHINE_TOUCHGENE,
Date="???"),
Gradient(Temperatures=[60.2,62.5,64.2,65.8],
ConcentrationYields=[110,100,70,30],
ConcentrationVolumes=35,
NumRepeats=35,
Description="Notebook#2, pp14",
Machine=Gradient.MACHINE_TOUCHGENE,
Date="???"),
## 5/17/2016 data
# 35R
Gradient(Temperatures=[60,61.4,62.3,64],
ConcentrationYields=[104.1,95.1,96.7,75.7],
ConcentrationVolumes=35,
NumRepeats=35,
Description="",
Machine=Gradient.MACHINE_TOUCHGENE,
Date="5/17/2016"),
#40R
Gradient(Temperatures=[60,61.4,62.3,64],
ConcentrationYields=[146.6,149.3,147.4,106.1],
ConcentrationVolumes=35,
NumRepeats=40,
Description="",
Machine=Gradient.MACHINE_TOUCHGENE,
Date="5/17/2016"),
# 5/23 data, only one temperature and pooeled two vials
Gradient(Temperatures=[61.4],
ConcentrationYields=[464],
ConcentrationVolumes=35,
NumRepeats=40,
Description="",
Machine=Gradient.MACHINE_TOUCHGENE,
Date="5/23/2016",
NumVialsCombined=2),
## 5/24 data, ibid
Gradient(Temperatures=[61.4],
ConcentrationYields=[350],
ConcentrationVolumes=35,
NumRepeats=40,
Description="",
Machine=Gradient.MACHINE_TOUCHGENE,
Date="5/24/2016",
NumVialsCombined=2),
## 5/26 data on the biorad
Gradient(Temperatures=[60,61.3,62.5,64],
ConcentrationYields=[155.2,86.7,41.5,50],
ConcentrationVolumes=35,
NumRepeats=35,
Description="",
Machine=Gradient.MACHINE_BIORAD,
Date="5/26/2016"),
Gradient(Temperatures=[60,61.3,62.5,64],
ConcentrationYields=[172,137,127,62.6],
ConcentrationVolumes=35,
NumRepeats=40,
Description="",
Machine=Gradient.MACHINE_BIORAD,
Date="5/26/2016"),
Gradient(Temperatures=[60,61.3],
ConcentrationYields=[55,44],
ConcentrationVolumes=35,
NumRepeats=40,
Description="Gradient of ovh-2.0,labelled. T_ann too high?",
Machine=Gradient.MACHINE_BIORAD,
Date="6/2/2016"),
Gradient(Temperatures=[58,60.5,62],
ConcentrationYields=[95,91,91],
ConcentrationVolumes=35,
NumRepeats=40,
Description="Gradient of ovh-2.0,labelled, with 45s ext",
Machine=Gradient.MACHINE_BIORAD,
Date="6/3/2016"),
Gradient(Temperatures=[58,60.5,62],
ConcentrationYields=[145,105,121],
ConcentrationVolumes=35,
NumRepeats=40,
Description="Gradient of ovh-2.0,spacer, with 45s ext",
Machine=Gradient.MACHINE_BIORAD,
Date="6/3/2016"),
Gradient(Temperatures=[60],
ConcentrationYields=[91.5],
ConcentrationVolumes=70*4, # 8 tubes into 4, diluted 2-fold
NumRepeats=40,
Description="Gradient of ovh-2.0,spacer, with 45s ext",
Machine=Gradient.MACHINE_BIORAD,
Date="6/6/2016")
]
PCR_Analyze_Objects(GradientsObjs,"Ovh2.0-Spacer")
if __name__ == "__main__":
run()
| gpl-3.0 |
dwysocki/ASTP-760 | notes/textbook/py/gr-ch8-exercises.py | 1 | 1576 | #!/usr/bin/env python3
from os import path
from sys import argv
import numpy as np
import matplotlib.pyplot as plt
from scipy.constants.constants import c
def problem_3(dir, fmt):
c = 1.000e+00
G = 1.000e+00
M_s = 1.476e+03
M_e = 4.434e-03
R_s = 6.960e+08
R_e = 6.371e+06
AU = 1.496e+11
def phi(M, r):
return - G * M / r
def v(M, r):
return np.sqrt(-phi(M, r))
def a(M, r):
return phi(M, r) / r
print("P 8.3.")
print("a)")
print(" (i) phi =", phi(M_s, R_s))
print(" (ii) phi =", phi(M_s, AU))
print(" (iii) phi =", phi(M_e, R_e))
print(" (iv) v =", v(M_s, AU))
print()
print("b)")
print(" (Sun) a =", a(M_s, AU))
print(" (Earth) a =", a(M_e, R_e))
def problem_17(dir, fmt):
M_s = 1.476e+03
C = np.array([2.5e+6, 6.3e+6, 6.3e+7, 3.1e+8, 6.3e+9])
T = np.array([8.4e-3, 5.5e-2, 2.1e+0, 2.3e+1, 2.1e+3]) * c
def M(C, T):
return C**3 / (2 * np.pi * T**2) / M_s
M_bh_est = M(C, T)
fig, ax = plt.subplots()
ax.scatter(C, M_bh_est)
ax.set_xlabel("Circumference (m)")
ax.set_ylabel(r"$M_\bullet$ ($M_\odot$)")
ax.set_xlim([0, 7e9])
fig.savefig(path.join(dir, "ch8_problem_17b."+fmt))
plt.close(fig)
print("P 8.17.")
print("b)")
print("C_sat =", C[-1])
print("M_bh =", M_bh_est[-1])
problems = [
problem_3,
problem_17
]
def main(dir, fmt):
for problem in problems:
problem(dir, fmt)
if __name__ == "__main__":
exit(main(*argv[1:]))
| mit |
RRShieldsCutler/clusterpluck | clusterpluck/tools/h_clustering.py | 1 | 2893 | #!/usr/bin/env Python
import argparse
import sys
import pandas as pd
import scipy.spatial.distance as ssd
from collections import defaultdict
import scipy.cluster.hierarchy as sch
from scipy.cluster.hierarchy import linkage
from scipy.cluster.hierarchy import cut_tree
# The arg parser
def make_arg_parser():
parser = argparse.ArgumentParser(description='Performs hierarchical clustering on the percent identity BGC scores matrix')
parser.add_argument('-i', '--input', help='Input file: A percent identity (pident) scores matrix of strain_cluster comparisons', required=True)
parser.add_argument('-o', '--output', help='Where to save the output csv; default to screen', required=False, default='-')
parser.add_argument('-t', '--height', help='At what height to cut the tree, default is 0.3', required=False, default=0.3)
parser.add_argument('-c', '--ofu', help='Comma-separated list of the ofus (e.g. ofu00001,ofu00003) to reveal', required=False, type=str)
parser.add_argument('-m', '--method', help='What clustering method to use on the distance matrix: single, complete, average, weighted, centroid, median, ward.', required=False, default='complete')
return parser
# Perform the hierarchical clustering
def process_hierarchy(inf, h, method):
df = pd.read_csv(inf, header=0, index_col=0)
df = df.fillna(0)
strains = df.index
df = 1 - (df / 100)
df_v = ssd.squareform(df, force='tovector', checks=False) # flatten matrix to condensed distance vector
if method == 'single':
li = sch.single(df_v)
elif method == 'complete':
li = sch.complete(df_v)
elif method == 'average':
li = sch.average(df_v)
elif method == 'weighted':
li = sch.weighted(df_v)
else:
print('\nERROR: Please enter a valid clustering method\n')
sys.exit()
hclus = cut_tree(li, height=h) # using the height (percent ID as decimal, for example), cluster OFUs from dendrogram
hclus = pd.DataFrame(hclus, index=strains)
hclus.ix[:, 0] += 1 # cut_tree defaults to the first 'cluster' being named "0"; this just bumps all IDs +1
return hclus
def bgcs_in_ofu(ofus, hclus):
dd = defaultdict(list)
for value, key in hclus.itertuples(index=True):
key = str('%05d' % key)
dd[key].extend(list([value]))
ofu_list = ofus.split(',')
for ofu in ofu_list:
ofu = str(ofu)
if ofu.startswith('ofu'):
ofu_n = str(ofu.replace('ofu', ''))
bgcs = dd[ofu_n]
print('\nThis ofu cluster, %s, contains the following BGCs:' % ofu)
print(bgcs, '\n')
return None
def main():
parser = make_arg_parser()
args = parser.parse_args()
# Parse command line
method = str(args.method)
with open(args.input, 'r') as inf:
h = float(args.height)
hclus = process_hierarchy(inf, h, method)
if args.ofu:
ofus = args.ofu
bgcs_in_ofu(ofus, hclus)
sys.exit()
with open(args.output, 'w') if args.output != '-' else sys.stdout as outf:
hclus.to_csv(outf)
if __name__ == '__main__':
main()
| mit |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/matplotlib/tests/test_dates.py | 7 | 13831 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import map
import datetime
import warnings
import tempfile
import dateutil
try:
# mock in python 3.3+
from unittest import mock
except ImportError:
import mock
from nose.tools import assert_raises, assert_equal
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
@image_comparison(baseline_images=['date_empty'], extensions=['png'])
def test_date_empty():
# make sure mpl does the right thing when told to plot dates even
# if no date data has been presented, cf
# http://sourceforge.net/tracker/?func=detail&aid=2850075&group_id=80706&atid=560720
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.xaxis_date()
@image_comparison(baseline_images=['date_axhspan'], extensions=['png'])
def test_date_axhspan():
# test ax hspan with date inputs
t0 = datetime.datetime(2009, 1, 20)
tf = datetime.datetime(2009, 1, 21)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axhspan(t0, tf, facecolor="blue", alpha=0.25)
ax.set_ylim(t0 - datetime.timedelta(days=5),
tf + datetime.timedelta(days=5))
fig.subplots_adjust(left=0.25)
@image_comparison(baseline_images=['date_axvspan'], extensions=['png'])
def test_date_axvspan():
# test ax hspan with date inputs
t0 = datetime.datetime(2000, 1, 20)
tf = datetime.datetime(2010, 1, 21)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axvspan(t0, tf, facecolor="blue", alpha=0.25)
ax.set_xlim(t0 - datetime.timedelta(days=720),
tf + datetime.timedelta(days=720))
fig.autofmt_xdate()
@image_comparison(baseline_images=['date_axhline'],
extensions=['png'])
def test_date_axhline():
# test ax hline with date inputs
t0 = datetime.datetime(2009, 1, 20)
tf = datetime.datetime(2009, 1, 31)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axhline(t0, color="blue", lw=3)
ax.set_ylim(t0 - datetime.timedelta(days=5),
tf + datetime.timedelta(days=5))
fig.subplots_adjust(left=0.25)
@image_comparison(baseline_images=['date_axvline'], tol=16,
extensions=['png'])
def test_date_axvline():
# test ax hline with date inputs
t0 = datetime.datetime(2000, 1, 20)
tf = datetime.datetime(2000, 1, 21)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axvline(t0, color="red", lw=3)
ax.set_xlim(t0 - datetime.timedelta(days=5),
tf + datetime.timedelta(days=5))
fig.autofmt_xdate()
@cleanup
def test_too_many_date_ticks():
# Attempt to test SF 2715172, see
# https://sourceforge.net/tracker/?func=detail&aid=2715172&group_id=80706&atid=560720
# setting equal datetimes triggers and expander call in
# transforms.nonsingular which results in too many ticks in the
# DayLocator. This should trigger a Locator.MAXTICKS RuntimeError
warnings.filterwarnings(
'ignore',
'Attempting to set identical left==right results\\nin singular '
'transformations; automatically expanding.\\nleft=\d*\.\d*, '
'right=\d*\.\d*',
UserWarning, module='matplotlib.axes')
t0 = datetime.datetime(2000, 1, 20)
tf = datetime.datetime(2000, 1, 20)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim((t0, tf), auto=True)
ax.plot([], [])
ax.xaxis.set_major_locator(mdates.DayLocator())
assert_raises(RuntimeError, fig.savefig, 'junk.png')
@image_comparison(baseline_images=['RRuleLocator_bounds'], extensions=['png'])
def test_RRuleLocator():
import matplotlib.testing.jpl_units as units
units.register()
# This will cause the RRuleLocator to go out of bounds when it tries
# to add padding to the limits, so we make sure it caps at the correct
# boundary values.
t0 = datetime.datetime(1000, 1, 1)
tf = datetime.datetime(6000, 1, 1)
fig = plt.figure()
ax = plt.subplot(111)
ax.set_autoscale_on(True)
ax.plot([t0, tf], [0.0, 1.0], marker='o')
rrule = mdates.rrulewrapper(dateutil.rrule.YEARLY, interval=500)
locator = mdates.RRuleLocator(rrule)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(mdates.AutoDateFormatter(locator))
ax.autoscale_view()
fig.autofmt_xdate()
@image_comparison(baseline_images=['DateFormatter_fractionalSeconds'],
extensions=['png'])
def test_DateFormatter():
import matplotlib.testing.jpl_units as units
units.register()
# Lets make sure that DateFormatter will allow us to have tick marks
# at intervals of fractional seconds.
t0 = datetime.datetime(2001, 1, 1, 0, 0, 0)
tf = datetime.datetime(2001, 1, 1, 0, 0, 1)
fig = plt.figure()
ax = plt.subplot(111)
ax.set_autoscale_on(True)
ax.plot([t0, tf], [0.0, 1.0], marker='o')
# rrule = mpldates.rrulewrapper( dateutil.rrule.YEARLY, interval=500 )
# locator = mpldates.RRuleLocator( rrule )
# ax.xaxis.set_major_locator( locator )
# ax.xaxis.set_major_formatter( mpldates.AutoDateFormatter(locator) )
ax.autoscale_view()
fig.autofmt_xdate()
def test_date_formatter_strftime():
"""
Tests that DateFormatter matches datetime.strftime,
check microseconds for years before 1900 for bug #3179
as well as a few related issues for years before 1900.
"""
def test_strftime_fields(dt):
"""For datetime object dt, check DateFormatter fields"""
# Note: the last couple of %%s are to check multiple %s are handled
# properly; %% should get replaced by %.
formatter = mdates.DateFormatter("%w %d %m %y %Y %H %I %M %S %%%f %%x")
# Compute date fields without using datetime.strftime,
# since datetime.strftime does not work before year 1900
formatted_date_str = (
"{weekday} {day:02d} {month:02d} {year:02d} {full_year:04d} "
"{hour24:02d} {hour12:02d} {minute:02d} {second:02d} "
"%{microsecond:06d} %x"
.format(
# weeknum=dt.isocalendar()[1], # %U/%W {weeknum:02d}
# %w Sunday=0, weekday() Monday=0
weekday=str((dt.weekday() + 1) % 7),
day=dt.day,
month=dt.month,
year=dt.year % 100,
full_year=dt.year,
hour24=dt.hour,
hour12=((dt.hour-1) % 12) + 1,
minute=dt.minute,
second=dt.second,
microsecond=dt.microsecond))
assert_equal(formatter.strftime(dt), formatted_date_str)
try:
# Test strftime("%x") with the current locale.
import locale # Might not exist on some platforms, such as Windows
locale_formatter = mdates.DateFormatter("%x")
locale_d_fmt = locale.nl_langinfo(locale.D_FMT)
expanded_formatter = mdates.DateFormatter(locale_d_fmt)
assert_equal(locale_formatter.strftime(dt),
expanded_formatter.strftime(dt))
except (ImportError, AttributeError):
pass
for year in range(1, 3000, 71):
# Iterate through random set of years
test_strftime_fields(datetime.datetime(year, 1, 1))
test_strftime_fields(datetime.datetime(year, 2, 3, 4, 5, 6, 12345))
def test_date_formatter_callable():
scale = -11
locator = mock.Mock(_get_unit=mock.Mock(return_value=scale))
callable_formatting_function = (lambda dates, _:
[dt.strftime('%d-%m//%Y') for dt in dates])
formatter = mdates.AutoDateFormatter(locator)
formatter.scaled[-10] = callable_formatting_function
assert_equal(formatter([datetime.datetime(2014, 12, 25)]),
['25-12//2014'])
def test_drange():
"""
This test should check if drange works as expected, and if all the
rounding errors are fixed
"""
start = datetime.datetime(2011, 1, 1, tzinfo=mdates.UTC)
end = datetime.datetime(2011, 1, 2, tzinfo=mdates.UTC)
delta = datetime.timedelta(hours=1)
# We expect 24 values in drange(start, end, delta), because drange returns
# dates from an half open interval [start, end)
assert_equal(24, len(mdates.drange(start, end, delta)))
# if end is a little bit later, we expect the range to contain one element
# more
end = end + datetime.timedelta(microseconds=1)
assert_equal(25, len(mdates.drange(start, end, delta)))
# reset end
end = datetime.datetime(2011, 1, 2, tzinfo=mdates.UTC)
# and tst drange with "complicated" floats:
# 4 hours = 1/6 day, this is an "dangerous" float
delta = datetime.timedelta(hours=4)
daterange = mdates.drange(start, end, delta)
assert_equal(6, len(daterange))
assert_equal(mdates.num2date(daterange[-1]), end - delta)
@cleanup
def test_empty_date_with_year_formatter():
# exposes sf bug 2861426:
# https://sourceforge.net/tracker/?func=detail&aid=2861426&group_id=80706&atid=560720
# update: I am no longer believe this is a bug, as I commented on
# the tracker. The question is now: what to do with this test
import matplotlib.dates as dates
fig = plt.figure()
ax = fig.add_subplot(111)
yearFmt = dates.DateFormatter('%Y')
ax.xaxis.set_major_formatter(yearFmt)
with tempfile.TemporaryFile() as fh:
assert_raises(ValueError, fig.savefig, fh)
def test_auto_date_locator():
def _create_auto_date_locator(date1, date2):
locator = mdates.AutoDateLocator()
locator.create_dummy_axis()
locator.set_view_interval(mdates.date2num(date1),
mdates.date2num(date2))
return locator
d1 = datetime.datetime(1990, 1, 1)
results = ([datetime.timedelta(weeks=52 * 200),
['1990-01-01 00:00:00+00:00', '2010-01-01 00:00:00+00:00',
'2030-01-01 00:00:00+00:00', '2050-01-01 00:00:00+00:00',
'2070-01-01 00:00:00+00:00', '2090-01-01 00:00:00+00:00',
'2110-01-01 00:00:00+00:00', '2130-01-01 00:00:00+00:00',
'2150-01-01 00:00:00+00:00', '2170-01-01 00:00:00+00:00']
],
[datetime.timedelta(weeks=52),
['1990-01-01 00:00:00+00:00', '1990-02-01 00:00:00+00:00',
'1990-03-01 00:00:00+00:00', '1990-04-01 00:00:00+00:00',
'1990-05-01 00:00:00+00:00', '1990-06-01 00:00:00+00:00',
'1990-07-01 00:00:00+00:00', '1990-08-01 00:00:00+00:00',
'1990-09-01 00:00:00+00:00', '1990-10-01 00:00:00+00:00',
'1990-11-01 00:00:00+00:00', '1990-12-01 00:00:00+00:00']
],
[datetime.timedelta(days=141),
['1990-01-05 00:00:00+00:00', '1990-01-26 00:00:00+00:00',
'1990-02-16 00:00:00+00:00', '1990-03-09 00:00:00+00:00',
'1990-03-30 00:00:00+00:00', '1990-04-20 00:00:00+00:00',
'1990-05-11 00:00:00+00:00']
],
[datetime.timedelta(days=40),
['1990-01-03 00:00:00+00:00', '1990-01-10 00:00:00+00:00',
'1990-01-17 00:00:00+00:00', '1990-01-24 00:00:00+00:00',
'1990-01-31 00:00:00+00:00', '1990-02-07 00:00:00+00:00']
],
[datetime.timedelta(hours=40),
['1990-01-01 00:00:00+00:00', '1990-01-01 04:00:00+00:00',
'1990-01-01 08:00:00+00:00', '1990-01-01 12:00:00+00:00',
'1990-01-01 16:00:00+00:00', '1990-01-01 20:00:00+00:00',
'1990-01-02 00:00:00+00:00', '1990-01-02 04:00:00+00:00',
'1990-01-02 08:00:00+00:00', '1990-01-02 12:00:00+00:00',
'1990-01-02 16:00:00+00:00']
],
[datetime.timedelta(minutes=20),
['1990-01-01 00:00:00+00:00', '1990-01-01 00:05:00+00:00',
'1990-01-01 00:10:00+00:00', '1990-01-01 00:15:00+00:00',
'1990-01-01 00:20:00+00:00']
],
[datetime.timedelta(seconds=40),
['1990-01-01 00:00:00+00:00', '1990-01-01 00:00:05+00:00',
'1990-01-01 00:00:10+00:00', '1990-01-01 00:00:15+00:00',
'1990-01-01 00:00:20+00:00', '1990-01-01 00:00:25+00:00',
'1990-01-01 00:00:30+00:00', '1990-01-01 00:00:35+00:00',
'1990-01-01 00:00:40+00:00']
],
[datetime.timedelta(microseconds=1500),
['1989-12-31 23:59:59.999507+00:00',
'1990-01-01 00:00:00+00:00',
'1990-01-01 00:00:00.000502+00:00',
'1990-01-01 00:00:00.001005+00:00',
'1990-01-01 00:00:00.001508+00:00']
],
)
for t_delta, expected in results:
d2 = d1 + t_delta
locator = _create_auto_date_locator(d1, d2)
assert_equal(list(map(str, mdates.num2date(locator()))),
expected)
@image_comparison(baseline_images=['date_inverted_limit'],
extensions=['png'])
def test_date_inverted_limit():
# test ax hline with date inputs
t0 = datetime.datetime(2009, 1, 20)
tf = datetime.datetime(2009, 1, 31)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.axhline(t0, color="blue", lw=3)
ax.set_ylim(t0 - datetime.timedelta(days=5),
tf + datetime.timedelta(days=5))
ax.invert_yaxis()
fig.subplots_adjust(left=0.25)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| apache-2.0 |
walterreade/scikit-learn | sklearn/utils/validation.py | 19 | 25724 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..utils.fixes import signature
from .deprecation import deprecated
from ..exceptions import DataConversionWarning as _DataConversionWarning
from ..exceptions import NonBLASDotWarning as _NonBLASDotWarning
from ..exceptions import NotFittedError as _NotFittedError
@deprecated("DataConversionWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class DataConversionWarning(_DataConversionWarning):
pass
@deprecated("NonBLASDotWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class NonBLASDotWarning(_NonBLASDotWarning):
pass
@deprecated("NotFittedError has been moved into the sklearn.exceptions module."
" It will not be available here from version 0.19")
class NotFittedError(_NotFittedError):
pass
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', _NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent representation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if estimator is not None:
if isinstance(estimator, six.string_types):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
if ensure_min_samples >= 2:
raise ValueError("%s expects at least 2 samples provided "
"in a 2 dimensional array-like input"
% estimator_name)
warnings.warn(
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample.",
DeprecationWarning)
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, shape_repr, ensure_min_samples,
context))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, shape_repr, ensure_min_features,
context))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s%s."
% (dtype_orig, array.dtype, context))
warnings.warn(msg, _DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2d and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X. This parameter
does not influence whether y can have np.inf or np.nan values.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
_DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
# FIXME NotFittedError_ --> NotFittedError in 0.19
raise _NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| bsd-3-clause |
wzbozon/statsmodels | statsmodels/tsa/filters/_utils.py | 29 | 4391 | from functools import wraps
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tsa.base import datetools
from statsmodels.tsa.tsatools import freq_to_period
def _get_pandas_wrapper(X, trim_head=None, trim_tail=None, names=None):
index = X.index
#TODO: allow use index labels
if trim_head is None and trim_tail is None:
index = index
elif trim_tail is None:
index = index[trim_head:]
elif trim_head is None:
index = index[:-trim_tail]
else:
index = index[trim_head:-trim_tail]
if hasattr(X, "columns"):
if names is None:
names = X.columns
return lambda x : X.__class__(x, index=index, columns=names)
else:
if names is None:
names = X.name
return lambda x : X.__class__(x, index=index, name=names)
def _maybe_get_pandas_wrapper(X, trim_head=None, trim_tail=None):
"""
If using pandas returns a function to wrap the results, e.g., wrapper(X)
trim is an integer for the symmetric truncation of the series in some
filters.
otherwise returns None
"""
if _is_using_pandas(X, None):
return _get_pandas_wrapper(X, trim_head, trim_tail)
else:
return
def _maybe_get_pandas_wrapper_freq(X, trim=None):
if _is_using_pandas(X, None):
index = X.index
func = _get_pandas_wrapper(X, trim)
freq = index.inferred_freq
return func, freq
else:
return lambda x : x, None
def pandas_wrapper(func, trim_head=None, trim_tail=None, names=None, *args,
**kwargs):
@wraps(func)
def new_func(X, *args, **kwargs):
# quick pass-through for do nothing case
if not _is_using_pandas(X, None):
return func(X, *args, **kwargs)
wrapper_func = _get_pandas_wrapper(X, trim_head, trim_tail,
names)
ret = func(X, *args, **kwargs)
ret = wrapper_func(ret)
return ret
return new_func
def pandas_wrapper_bunch(func, trim_head=None, trim_tail=None,
names=None, *args, **kwargs):
@wraps(func)
def new_func(X, *args, **kwargs):
# quick pass-through for do nothing case
if not _is_using_pandas(X, None):
return func(X, *args, **kwargs)
wrapper_func = _get_pandas_wrapper(X, trim_head, trim_tail,
names)
ret = func(X, *args, **kwargs)
ret = wrapper_func(ret)
return ret
return new_func
def pandas_wrapper_predict(func, trim_head=None, trim_tail=None,
columns=None, *args, **kwargs):
pass
def pandas_wrapper_freq(func, trim_head=None, trim_tail=None,
freq_kw='freq', columns=None, *args, **kwargs):
"""
Return a new function that catches the incoming X, checks if it's pandas,
calls the functions as is. Then wraps the results in the incoming index.
Deals with frequencies. Expects that the function returns a tuple,
a Bunch object, or a pandas-object.
"""
@wraps(func)
def new_func(X, *args, **kwargs):
# quick pass-through for do nothing case
if not _is_using_pandas(X, None):
return func(X, *args, **kwargs)
wrapper_func = _get_pandas_wrapper(X, trim_head, trim_tail,
columns)
index = X.index
freq = index.inferred_freq
kwargs.update({freq_kw : freq_to_period(freq)})
ret = func(X, *args, **kwargs)
ret = wrapper_func(ret)
return ret
return new_func
def dummy_func(X):
return X
def dummy_func_array(X):
return X.values
def dummy_func_pandas_columns(X):
return X.values
def dummy_func_pandas_series(X):
return X['A']
import pandas as pd
import numpy as np
def test_pandas_freq_decorator():
X = pd.util.testing.makeDataFrame()
# in X, get a function back that returns an X with the same columns
func = pandas_wrapper(dummy_func)
np.testing.assert_equal(func(X.values), X)
func = pandas_wrapper(dummy_func_array)
pd.util.testing.assert_frame_equal(func(X), X)
expected = X.rename(columns=dict(zip('ABCD', 'EFGH')))
func = pandas_wrapper(dummy_func_array, names=list('EFGH'))
pd.util.testing.assert_frame_equal(func(X), expected)
| bsd-3-clause |
Archman/beamline | tests/test_datautils.py | 1 | 4363 | import beamline
import numpy as np
import unittest
import os
class DataUtilsTest(unittest.TestCase):
def setUp(self):
datafields = ['s','Sx','Sy','enx', 'eny']
sddsfile = 'test.sig'
hdf5file = 'test.h5'
package_path = os.path.join(*os.path.split(beamline.__path__[0])[:-1])
datascript = os.path.join(package_path, 'scripts/sddsprintdata.sh')
datapath = os.path.join(package_path, 'tests/tracking')
self.hdf5fullpath = os.path.join(os.path.expanduser(datapath), hdf5file)
self.sddsfullpath = os.path.join(os.path.expanduser(datapath), sddsfile)
self.A = beamline.DataExtracter(self.sddsfullpath, *datafields)
self.A.setDataScript(datascript)
self.A.setDataPath (datapath)
self.A.setH5file (self.hdf5fullpath)
self.s = np.array([[0.], [0.], [1.], [1.5], [2.5], [3.], [4.], [4.5],
[5.5], [6.5], [7.], [8.], [8.5], [9.5], [10.], [11.]])
self.sNamelist = [['0.0', 'MARK'], ['0.0', 'CHARGE'], ['1.0', 'DRIF'], ['1.5', 'QUAD'],
['2.5', 'DRIF'], ['3.0', 'CSRCSBEND'], ['4.0', 'DRIF'], ['4.5', 'CSRCSBEND'],
['5.5', 'DRIF'], ['6.5', 'DRIF'], ['7.0', 'CSRCSBEND'], ['8.0', 'DRIF'],
['8.5', 'CSRCSBEND'], ['9.5', 'DRIF'], ['10.0', 'QUAD'], ['11.0', 'DRIF']]
self.colnames = ['s', 'ElementName', 'ElementOccurence', 'ElementType',
's1', 's12', 's13', 's14', 's15', 's16', 's17',
's2', 's23', 's24', 's25', 's26', 's27',
's3', 's34', 's35', 's36', 's37',
's4', 's45', 's46', 's47',
's5', 's56', 's57',
's6', 's67',
's7',
'ma1', 'ma2', 'ma3', 'ma4', 'ma5', 'ma6', 'ma7',
'minimum1', 'minimum2', 'minimum3', 'minimum4', 'minimum5', 'minimum6', 'minimum7',
'maximum1', 'maximum2', 'maximum3', 'maximum4', 'maximum5', 'maximum6', 'maximum7',
'Sx', 'Sxp', 'Sy', 'Syp', 'Ss', 'Sdelta', 'St',
'ex', 'enx', 'ecx', 'ecnx', 'ey', 'eny', 'ecy', 'ecny',
'betaxBeam', 'alphaxBeam', 'betayBeam', 'alphayBeam']
def test_extractData(self):
self.A.kwslist = ['s']
ret1 = self.A.extractData()
self.assertIsInstance(ret1, beamline.datautils.DataExtracter)
self.assertIsInstance(ret1.h5data, np.ndarray)
self.assertListEqual(list(ret1.h5data), list(self.s))
self.A.kwslist = ['s', 'ElementType']
ret2 = self.A.extractData()
self.assertListEqual(ret2.h5data.tolist(), self.sNamelist)
ret3 = beamline.datautils.DataExtracter(self.sddsfullpath, *('s'))
self.assertIsInstance(ret3, beamline.datautils.DataExtracter)
ret3.extractData()
self.assertListEqual(list(ret3.h5data), list(self.s))
def test_getAllPars(self):
ret = self.A.getAllPars()
self.assertListEqual(ret, ['Step'])
def test_getAllCols(self):
ret = self.A.getAllCols()
self.assertListEqual(ret, self.colnames)
class GetAllColsTest(DataUtilsTest):
def runTest(self):
self.assertEqual(self.A.getAllCols(), ['s', 'ElementName', 'ElementOccurence', 'ElementType', 's1', 's12', 's13', 's14', 's15', 's16', 's17', 's2', 's23', 's24', 's25', 's26', 's27', 's3', 's34', 's35', 's36', 's37', 's4', 's45', 's46', 's47', 's5', 's56', 's57', 's6', 's67', 's7', 'ma1', 'ma2', 'ma3', 'ma4', 'ma5', 'ma6', 'ma7', 'minimum1', 'minimum2', 'minimum3', 'minimum4', 'minimum5', 'minimum6', 'minimum7', 'maximum1', 'maximum2', 'maximum3', 'maximum4', 'maximum5', 'maximum6', 'maximum7', 'Sx', 'Sxp', 'Sy', 'Syp', 'Ss', 'Sdelta', 'St', 'ex', 'enx', 'ecx', 'ecnx', 'ey', 'eny', 'ecy', 'ecny', 'betaxBeam', 'alphaxBeam', 'betayBeam', 'alphayBeam'])
class WorkflowTest(DataUtilsTest):
def runTest(self):
self.A.extractData().dump()
import h5py
fd = h5py.File(self.hdf5fullpath, 'r')
d_s = fd['s'][:]
d_sx = fd['Sx'][:]
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(d_s, d_sx, 'r-')
plt.show()
if __name__ == '__main__':
unittest.main()
| mit |
atreyv/sympy | examples/intermediate/sample.py | 107 | 3494 | """
Utility functions for plotting sympy functions.
See examples\mplot2d.py and examples\mplot3d.py for usable 2d and 3d
graphing functions using matplotlib.
"""
from sympy.core.sympify import sympify, SympifyError
from sympy.external import import_module
np = import_module('numpy')
def sample2d(f, x_args):
"""
Samples a 2d function f over specified intervals and returns two
arrays (X, Y) suitable for plotting with matlab (matplotlib)
syntax. See examples\mplot2d.py.
f is a function of one variable, such as x**2.
x_args is an interval given in the form (var, min, max, n)
"""
try:
f = sympify(f)
except SympifyError:
raise ValueError("f could not be interpretted as a SymPy function")
try:
x, x_min, x_max, x_n = x_args
except AttributeError:
raise ValueError("x_args must be a tuple of the form (var, min, max, n)")
x_l = float(x_max - x_min)
x_d = x_l/float(x_n)
X = np.arange(float(x_min), float(x_max) + x_d, x_d)
Y = np.empty(len(X))
for i in range(len(X)):
try:
Y[i] = float(f.subs(x, X[i]))
except TypeError:
Y[i] = None
return X, Y
def sample3d(f, x_args, y_args):
"""
Samples a 3d function f over specified intervals and returns three
2d arrays (X, Y, Z) suitable for plotting with matlab (matplotlib)
syntax. See examples\mplot3d.py.
f is a function of two variables, such as x**2 + y**2.
x_args and y_args are intervals given in the form (var, min, max, n)
"""
x, x_min, x_max, x_n = None, None, None, None
y, y_min, y_max, y_n = None, None, None, None
try:
f = sympify(f)
except SympifyError:
raise ValueError("f could not be interpreted as a SymPy function")
try:
x, x_min, x_max, x_n = x_args
y, y_min, y_max, y_n = y_args
except AttributeError:
raise ValueError("x_args and y_args must be tuples of the form (var, min, max, intervals)")
x_l = float(x_max - x_min)
x_d = x_l/float(x_n)
x_a = np.arange(float(x_min), float(x_max) + x_d, x_d)
y_l = float(y_max - y_min)
y_d = y_l/float(y_n)
y_a = np.arange(float(y_min), float(y_max) + y_d, y_d)
def meshgrid(x, y):
"""
Taken from matplotlib.mlab.meshgrid.
"""
x = np.array(x)
y = np.array(y)
numRows, numCols = len(y), len(x)
x.shape = 1, numCols
X = np.repeat(x, numRows, 0)
y.shape = numRows, 1
Y = np.repeat(y, numCols, 1)
return X, Y
X, Y = np.meshgrid(x_a, y_a)
Z = np.ndarray((len(X), len(X[0])))
for j in range(len(X)):
for k in range(len(X[0])):
try:
Z[j][k] = float(f.subs(x, X[j][k]).subs(y, Y[j][k]))
except (TypeError, NotImplementedError):
Z[j][k] = 0
return X, Y, Z
def sample(f, *var_args):
"""
Samples a 2d or 3d function over specified intervals and returns
a dataset suitable for plotting with matlab (matplotlib) syntax.
Wrapper for sample2d and sample3d.
f is a function of one or two variables, such as x**2.
var_args are intervals for each variable given in the form (var, min, max, n)
"""
if len(var_args) == 1:
return sample2d(f, var_args[0])
elif len(var_args) == 2:
return sample3d(f, var_args[0], var_args[1])
else:
raise ValueError("Only 2d and 3d sampling are supported at this time.")
| bsd-3-clause |
PeRDy/performance-tools | performance_tools/times.py | 1 | 3646 | from collections import OrderedDict
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
class Distribution(object):
def __init__(self, data, spurious=0.1):
"""Store time series and remove spurious data.
:param data: Time series data.
:type data: numpy.array
:param spurious: Spurious data coefficient.
:type spurious: float
"""
if spurious < 1:
self.data = self._remove_spurious(data, spurious)
elif spurious < 0 or spurious > 1:
raise AttributeError
else:
self.data = np.sort(data)
self.mu, self.std, self.median, self.max, self.min = self._statistical_data()
def _remove_spurious(self, data, spurious=0.1):
spurious_coefficient = spurious / 2
num_spurious = int(len(data) * spurious_coefficient)
return np.sort(data)[num_spurious:-num_spurious]
def _statistical_data(self):
mu, std = stats.norm.fit(self.data)
median = self.data[len(self.data) / 2]
max_ = self.data[-1]
min_ = self.data[0]
return mu, std, median, max_, min_
def plot(self, normal=True, pareto=True):
"""Plot data.
:param normal: If true, plot normal distribution.
:type normal: bool
:param pareto: If true, plot pareto distribution (80-20 law).
:type pareto: bool
"""
if pareto:
pareto = 1.161
# Plot the histogram.
plt.hist(self.data, bins=25, normed=True, alpha=0.6, color='g')
# Plot normal PDF.
xmin, xmax = plt.xlim()
grid_granularity = 100 if len(self.data) > 100 else len(self.data)
x = np.linspace(xmin, xmax, grid_granularity)
if normal:
norm_pdf = stats.norm.pdf(x, self.mu, self.std)
plt.plot(x, norm_pdf, 'k', linewidth=2, label='Normal')
# Plot pareto PDF.
if pareto:
pareto_pdf = stats.pareto.pdf(x, pareto)
plt.plot(x, pareto_pdf, 'r', linewidth=2, label='Pareto')
title = "Fit results: mu = %.2f, std = %.2f" % (self.mu, self.std)
plt.title(title)
plt.legend()
plt.show()
def __repr__(self):
return "Max: {}\nMin: {}\nMean: {}\nStandard Deviation: {}\nMedian: {}".format(
self.max, self.min, self.mu, self.std, self.median
)
class Classification(object):
def __init__(self, data, **classes):
"""Classify time series.
:param data: Time series data.
:type data: numpy.array
:keyword classes: Classes and values.
"""
if len(classes) == 0:
classes = {
'excellent': 0.4,
'good': 1.0,
'ok': 1.5,
'bad': 3.0,
'ugly': None,
}
self.classes = OrderedDict(sorted(classes.items(), key=lambda t: t[1], reverse=True))
self.data = np.sort(data)
self.classified_data = self._classify()
def _classify(self):
result = {k: 0 for k in self.classes.keys()}
for data in self.data:
current_klass = None
for klass, max_value in ((k, v) for k, v in self.classes.items()):
if data < max_value or (current_klass is None and max_value is None):
current_klass = klass
result[current_klass] += 1
return result
def __repr__(self):
total = float(sum(self.classified_data.values())) / 100
return "\n".join(["{}: {:d} ({:.2f}%)".format(k, v, v / total) for (k, v) in self.classified_data.items()])
| gpl-2.0 |
helloTC/ATT | util/extcsv.py | 1 | 4170 | #!/usr/bin/env python
# coding=utf-8
import pandas as pd
import numpy as np
class OperateCSV(object):
"""
A class to help do operations on csv data
"""
def __init__(self, data):
"""
Initialize instance
Parameters:
-----------
data: csv file data path or pd.DataFrame
"""
if isinstance(data, str):
assert data.endswith('csv'), "a .csv file should be inputed"
self.rawdata = pd.read_csv(data)
elif isinstance(data, pd.DataFrame):
self.rawdata = data
else:
raise Exception('Please input a csv file name or a pandas DataFrame.')
def getkeys(self, data = None):
"""
Get all of the keys in rawdata
Parameters
----------
None
Returns:
---------
data_keys: a list contains keys
Examples:
---------
>>> self.getkeys()
"""
if data is None:
data = self.rawdata
return data.keys().tolist()
def getdata_from_keys(self, keys, data = None):
"""
Get data from a list of keys
Parameters:
-----------
keys: a list of keys need to be extracted
Return:
--------
pt_data: data as part of rawdata with specific series of keys
Example:
---------
>>> self.get_data_from_keys(['Subject', 'Gender'])
"""
if data is None:
data = self.rawdata
return data.loc[:, keys]
def get_data_by_row(self, source_key, source_value, data = None):
"""
Get data by row
Parameters:
-----------
source_key: get data that source_value is in source_key
source_value: get specific row of data that contains source_value
Return:
--------
pt_data: data that rows contains source_value in key of source_key
Example:
---------
>>> self.get_data_by_row('Subject', ['100004', '308331'])
"""
if data is None:
data = self.rawdata
return data[data[source_key].isin(source_value)]
def find_values_by_key(self, dest_key, source_key, source_value, data = None):
"""
Find values in dest_key from source_value in source_key
Parameters:
-----------
dest_key: destination key, a string. Values got from this column
source_key: source key, a string.
source_value: value as match condition to find values in destination key
Returns:
---------
match_data: data that satisfied match condition
Example:
--------
>>> self.find_values_by_key('Subject', 'Family_ID', '23142_21345')
"""
if data is None:
data = self.rawdata
return data[dest_key][data[source_key] == source_value]
def unique_value_by_key(self, key, data = None):
"""
Get unique value by a key
Parameters:
------------
key: a string. key name of data
data: operate new dataframe or not
Return:
-------
unique_data: unique list data
Examples:
---------
>>> self.unique_value_by_key('Gender')
"""
if data is None:
data = self.rawdata
return np.unique(data[key]).tolist()
def reorder_by_list(self, datalist, key, data=None):
"""
Reorder pandas dataframe by a datalist which correspond to the key.
Parameters:
-----------
datalist: template list for data[key] should be.
key: key name of the data.
data: operate new dataframe or not.
Return:
-------
reorder_data: reordered data
Examples:
---------
>>> reorder_by_list(subjectID, 'Subject', data=origin_pd)
"""
if data is None:
data = self.rawdata
data.reset_index(drop=True,inplace=True)
ord_idx = []
for sid in datalist:
ord_idx.append(data[key].tolist().index(sid))
return data.reindex(ord_idx)
| mit |
pavle-batuta/maxeler_2dpoint_gen | printpoint2d.py | 1 | 2607 | import argparse
import struct
from itertools import izip
try:
import numpy as np
except ImportError:
np = None
try:
import matplotlib.pyplot as plt
except:
plt = None
RESOLUTION_PERCENTAGE = {'low':20, 'medium':50, 'high':80, 'full':100}
COLOR_LIST = ['b','g','r','c','m','y']
def arg_parsing():
description_string = """Input a list of 2d points in the format (x,y,c)
where c is a cluster number, You can then compare the list with another
file or plot the points"""
parser = argparse.ArgumentParser(description=description_string)
parser.add_argument('input', type=argparse.FileType('r'),
help='Input file')
parser.add_argument('-c', '--compare-to', dest='other', default=None,
type=argparse.FileType('r'),
help='File to compare')
parser.add_argument('-p','--plot', action='store_true', dest='do_plot',
help='Plot the input file.')
parser.add_argument('--resolution', choices=['low', 'med', 'high', 'full'],
default='full',
help=('Choose resolution for rendering points: '
'low (render 20%% of points), '
'medium (render 50%% of points), '
'high (render 80%% of points) or '
'full (default): render all points.')
)
return parser.parse_args()
def read_file_chunks(file, chunk_size=8*3):
"""Lazy function (generator) for reading chunks of files. Deafult size is
8b."""
while True:
data = file.read(chunk_size)
if not data:
break
yield data
def compare_files(first, second, more=False):
total_chunks = 0
different_chunks = 0
for chunk1,chunk2 in izip(read_file_chunks(first, chunk_size=8*3),
read_file_chunks(second, chunk_size=8*3)):
if(chunk1 != chunk2):
# TODO: additional information?
different_chunks+=1
total_chunks += 1
return (total_chunks, different_chunks)
def plot_points_colored(points):
xs, ys, cs = izip(*read_file_chunks(first, chunk_size=8*3))
plt.scatter(xs, ys)
plt.gray()
def main():
args = arg_parsing()
if (args.other):
cmp_res = compare_files(first=args.input, second=args.other)
if (args.do_plot):
print plt
if plt:
plot_points_colored()
else:
print ("Cannot plot, missing matplotlib dependency.")
if __name__ == '__main__':
main()
| mit |
xyguo/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 157 | 2409 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
erikvolz/idepi-ev0 | idepi/feature_extraction/__init__.py | 3 | 1810 | #
# idepi :: (IDentify EPItope) python libraries containing some useful machine
# learning interfaces for regression and discrete analysis (including
# cross-validation, grid-search, and maximum-relevance/mRMR feature selection)
# and utilities to help identify neutralizing antibody epitopes via machine
# learning.
#
# Copyright (C) 2011 N Lance Hepler <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from sklearn.pipeline import FeatureUnion as FeatureUnion_
from idepi.feature_extraction._msavectorizer import *
from idepi.feature_extraction._msavectorizerpairwise import *
from idepi.feature_extraction._msavectorizerregex import *
from idepi.feature_extraction._msavectorizerregexpairwise import *
__all__ = ['FeatureUnion']
__all__ += _msavectorizer.__all__
__all__ += _msavectorizerpairwise.__all__
__all__ += _msavectorizerregex.__all__
__all__ += _msavectorizerregexpairwise.__all__
class FeatureUnion(FeatureUnion_):
def get_feature_names(self):
feature_names = []
for _, trans in self.transformer_list:
feature_names.extend(trans.get_feature_names())
return feature_names
| gpl-3.0 |
MartinDelzant/scikit-learn | sklearn/ensemble/forest.py | 176 | 62555 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Brian Holt <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score fuction."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'balanced_subsample', 'subsample', 'auto')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated and will be removed in 0.18."
" It was replaced by class_weight='balanced_subsample' "
"using the balanced strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
elahesadatnaghib/FB-Scheduler-v2 | FBDE.py | 1 | 24163 | __author__ = 'Elahe'
import ephem
import numpy as np
import json
from numpy import *
import sqlite3 as lite
from calculations import *
import pandas as pd
class DataFeed(object):
def __init__(self, date, site, custom_episode = False):
self.Site = site
self.night_id = int(date - ephem.Date('2015/6/28 12:00:00.00')) + 1
# connecting to db
con = lite.connect('FBDE.db')
cur = con.cursor()
# fields data: ID, RA, Dec, Label, N_visit, time of the last visit
cur.execute('SELECT ID, Dec, RA, Label, N_visit, Last_visit, N_visit_u, Last_visit_u, N_visit_g, Last_visit_g, '
'N_visit_r, Last_visit_r, N_visit_i, Last_visit_i, N_visit_z, Last_visit_z, N_visit_y, Last_visit_y FROM FieldsStatistics')
input1 = pd.DataFrame(cur.fetchall(), columns = ['ID', 'Dec', 'RA', 'Label', 'N_visit', 't_visit',
'N_visit_u', 't_visit_u', 'N_visit_r', 't_visit_r',
'N_visit_i', 't_visit_i', 'N_visit_g', 't_visit_g',
'N_visit_z', 't_visit_z', 'N_visit_y', 't_visit_y'])
self.n_fields = len(input1)
# create fields objects and feed their parameters and data
dtype = [('ID', np.int), ('Dec', np.float), ('RA', np.float), ('Label', np.str), ('N_visit', np.int), ('t_visit', np.float),
('N_visit_u', np.int), ('t_visit_u', np.float), ('N_visit_g', np.int), ('t_visit_g', np.float), ('N_visit_r', np.int),
('t_visit_r', np.float), ('N_visit_i', np.int), ('t_visit_i', np.float), ('N_visit_z', np.int), ('t_visit_z', np.float),
('N_visit_y', np.int), ('t_visit_y', np.float)]
fields_info = np.zeros((self.n_fields,), dtype = dtype)
fields_info['ID'] = input1['ID']
fields_info['Dec'] = input1['Dec']
fields_info['RA'] = input1['RA']
fields_info['Label'] = input1['Label']
fields_info['N_visit'] = input1['N_visit']
fields_info['t_visit'] = input1['t_visit']
fields_info['N_visit_u'] = input1['N_visit_u']
fields_info['t_visit_u'] = input1['t_visit_u']
fields_info['N_visit_g'] = input1['N_visit_g']
fields_info['t_visit_g'] = input1['t_visit_g']
fields_info['N_visit_r'] = input1['N_visit_r']
fields_info['t_visit_r'] = input1['t_visit_r']
fields_info['N_visit_i'] = input1['N_visit_i']
fields_info['t_visit_i'] = input1['t_visit_i']
fields_info['N_visit_z'] = input1['N_visit_z']
fields_info['t_visit_z'] = input1['t_visit_z']
fields_info['N_visit_y'] = input1['N_visit_y']
fields_info['t_visit_y'] = input1['t_visit_y']
Max_N_visit = np.max(input1['N_visit']); Max_N_visit_u = np.max(input1['N_visit_u'])
Max_N_visit_g = np.max(input1['N_visit_g']); Max_N_visit_r = np.max(input1['N_visit_r'])
Max_N_visit_i = np.max(input1['N_visit_i']); Max_N_visit_z = np.max(input1['N_visit_z'])
Max_N_visit_y = np.max(input1['N_visit_y'])
del input1
''' import data for the current night '''
cur.execute('SELECT ephemDate, altitude, hourangle, visible, covered, brightness, moonseparation FROM FieldData where nightid == {}'.format(self.night_id))
input2 = pd.DataFrame(cur.fetchall(), columns=['ephemDate', 'alts','hourangs', 'visible', 'covered', 'brightness', 'moonsep'])
self.n_t_slots = (np.shape(input2)[0]) / self.n_fields
all_fields_all_moments = np.zeros((self.n_fields,self.n_t_slots,), dtype = [('alts', np.float),
('hourangs', np.float),
('visible', np.bool),
('covered', np.bool),
('brightness', np.float),
('moonsep', np.float)])
self.time_slots = np.zeros(self.n_t_slots)
self.time_slots = input2['ephemDate'][0:self.n_t_slots]
for i in range(self.n_fields):
all_fields_all_moments[i, :]['alts'] = input2['alts'][i * self.n_t_slots : (i+1) * self.n_t_slots]
all_fields_all_moments[i, :]['hourangs'] = input2['hourangs'][i * self.n_t_slots : (i+1) * self.n_t_slots]
all_fields_all_moments[i, :]['visible'] = input2['visible'][i * self.n_t_slots : (i+1) * self.n_t_slots]
all_fields_all_moments[i, :]['covered'] = input2['covered'][i * self.n_t_slots : (i+1) * self.n_t_slots] #TODO covered and brighntess should be updatable
all_fields_all_moments[i, :]['brightness'] = input2['brightness'][i * self.n_t_slots : (i+1) * self.n_t_slots]
all_fields_all_moments[i, :]['moonsep'] = input2['moonsep'][i * self.n_t_slots : (i+1) * self.n_t_slots]
del input2
# adjusting t start and t end to where the data exist
self.t_start = self.time_slots[0]; self.t_end = self.time_slots[self.n_t_slots -1]
# n_fields by n_fields symmetric matrix, slew time from field i to j
slew_t = np.loadtxt("NightDataInLIS/Constants/slewMatrix.dat") * ephem.second
''' Model parameter and data'''
# model param and
cur.execute('SELECT Value FROM ModelParam')
input3 = pd.DataFrame(cur.fetchall(), columns= ['ModelParam'])
self.inf = input3['ModelParam'][0]
self.eps = input3['ModelParam'][1]
self.t_expo = input3['ModelParam'][2]
visit_w1 = input3['ModelParam'][3]; visit_w2 = input3['ModelParam'][4]
self.visit_w = [visit_w1, visit_w2]
self.max_n_night = input3['ModelParam'][5]
self.t_interval = input3['ModelParam'][6]
''' Night variables'''
Night_var = np.array([Max_N_visit, Max_N_visit_u, Max_N_visit_g,
Max_N_visit_r, Max_N_visit_i, Max_N_visit_z, Max_N_visit_y],
dtype = [('Max_n_visit', np.int), ('Max_n_visit_u', np.int),
('Max_n_visit_g', np.int), ('Max_n_visit_r', np.int),
('Max_n_visit_i', np.int), ('Max_n_visit_z', np.int), ('Max_n_visit_y', np.int)])
# create fields
self.fields = []
for index, info in enumerate(fields_info):
temp = FiledState(info, self.t_start, self.time_slots, all_fields_all_moments[index,:], slew_t[index,:],input3, Night_var)
self.fields.append(temp)
del all_fields_all_moments
del slew_t
''' Filter variables'''
cur.execute('SELECT * FROM FilterStatistics')
input4 = cur.fetchall()
# create filters
f_names = ['u', 'g', 'r', 'i', 'z', 'y']
self.filters = []
for info, f_name in zip(input4,f_names):
temp = FilterState(f_name, info, input3)
self.filters.append((temp))
# create episode
self.episode = EpisodeStatus(self.t_start, self.t_end, self.time_slots, self.t_expo)
del input3
con.close()
########################################################################################################################
########################################################################################################################
class Scheduler(DataFeed):
def __init__(self, date, site, f_weight):
super(Scheduler, self).__init__(date, site)
# scheduler parameters
self.f_weight = f_weight
# scheduler decisions
self.next_field = None
self.next_filter = None
self.filter_change= None
def schedule(self):
self.episode.init_episode(self.fields, self.filters) # Initialize scheduling
self.episode.field.update_visit_var(self.t_start, self.episode.filter.name)
self.reset_output()
while self.episode.t < self.episode.t_end:
all_costs = np.zeros((self.n_fields,), dtype = [('u', np.float),('g', np.float),('r', np.float),('i', np.float),('z', np.float),('y', np.float)])
for f in self.filters:
f.eval_feasibility(self.episode.filter)
for index, field in enumerate(self.fields):
field.eval_feasibility()
all_costs[index] = field.eval_cost(self.f_weight, self.episode.filter.name, self.filters)
winner_indx, winner_cost, winner_filter_index = decision_maker(all_costs)
# decisions made for this visit
self.next_field = self.fields[winner_indx]
self.next_filter = self.filters[winner_filter_index]
self.filter_change = (self.episode.filter != self.next_filter)
# evaluate time of the visit
t_visit = eval_t_visit(self.episode.t, self.next_field.slew_t_to, self.filter_change, 2 * ephem.minute)
# update visit variables of the next field
self.next_field.update_visit_var(t_visit, self.next_filter.name)
# update visit variables of the next filter
self.next_filter.update_visit_var(t_visit, self.episode.step)
# record visit
self.record_visit()
'''prepare for the next visit'''
# update the episode status
dt = eval_dt(self.next_field.slew_t_to, self.t_expo, self.filter_change, 2 * ephem.minute)
self.episode.update_episode_var(dt, self.next_field, self.next_filter)
# update all fields
self.episode.set_fields(self.fields, self.next_field)
# update all filters
self.episode.set_filter(self.filters, self.next_filter)
self.wrap_up()
def reset_output(self):
self.output_dtype = format_output()
self.NightOutput = np.zeros((0,), dtype = self.output_dtype)
try:
os.remove("Output/log{}.lis".format(self.night_id))
except:
pass
self.op_log = open("Output/log{}.lis".format(self.night_id),"w")
#record the first entry
entry1 = record_assistant(self.episode.field, self.episode.t, self.episode.filter.name, self.output_dtype, first_entry=True)
self.NightOutput = np.append(self.NightOutput, entry1)
self.op_log.write(json.dumps(entry1.tolist())+"\n")
def record_visit(self):
entry = record_assistant(self.next_field, self.episode.t, self.episode.filter.name, self.output_dtype)
self.NightOutput = np.append(self.NightOutput, entry)
self.op_log.write(json.dumps(entry.tolist())+"\n")
def wrap_up(self):
np.save("Output/Schedule{}.npy".format(self.night_id), self.NightOutput)
def set_f_wight(self, new_f_weight):
self.f_weight = new_f_weight
def eval_performance(self, preferences):
return eval_performance(self.NightOutput, preferences)
########################################################################################################################
########################################################################################################################
class EpisodeStatus(object):
def __init__(self, t_start, t_end, time_slots, exposure_t):
# parameters constant during the current episode
self.t_start = t_start
self.t_end = t_end
self.episode_len= t_end - t_start # in days
self.time_slots = time_slots
self.n_t_slots = len(time_slots)
self.exposure_t = exposure_t
self.moon_phase = (t_start - ephem.previous_new_moon(t_start))/30 # need to be changed
# variables change after each decision
self.t = None # current time
self.n = None # current time slot
self.step = None # current decision number
self.epi_prog = None # Episode progress
self.field = None # current field
self.filter = None # current filter
self.filter_seq = None # sequence of filters used
self.f_change_flag = None
def init_episode(self, fields, filters):
self.clock(0, reset = True)
self.set_filter(filters, self.filter, initialization = True)
self.set_fields(fields, self.field, initialization = True)
def update_episode_var(self, dt, field, filter):
self.clock(dt)
self.field = field
self.filter = filter
self.filter_dynamic()
def clock(self, dt, reset = False): # sets or resets t, n, step
if reset:
self.t = self.t_start + self.exposure_t
self.step = 0
else:
self.t += dt
self.step += 1
self.find_n()
def find_n(self):
n = 0
if self.t <= self.t_start:
self.n = 0
elif self.t >= self.t_end:
self.n = self.n_t_slots -1
else:
while self.t > self.time_slots[n]:
n += 1
self.n = n
def set_fields(self, fields, current_field, initialization = False):
if initialization:
self.field = eval_init_state(fields, 0)
current_field = self.field
#finding the index of current field
index = fields.index(current_field)
for field in fields:
field.update_field(self.n, self.t, index, initialization)
def set_filter(self, filters, current_filter, initialization = False):
if initialization:
self.filter = eval_init_filter(filters)
self.f_change_flag = False
self.filter_seq = []
for f in filters:
f.update_filter(self.t, current_filter, initialization)
def filter_dynamic(self):
try:
if self.filter_seq[-1] != self.filter.name:
self.filter_seq.append(self.filter.name)
self.f_change_flag = True
else:
self.f_change_flag = False
except:
self.f_change_flag = False
########################################################################################################################
########################################################################################################################
class FiledState(object): # an object of this class stores the information and status of a single field
def __init__(self, field_info, t_start, time_slots, all_moments_data, all_slew_to ,model_param, Night_var):
# parameters (constant during the current episode)
# by input data
self.id = field_info['ID']
self.dec = field_info['Dec']
self.ra = field_info['RA']
self.label = field_info['Label']
self.filter_dtype_count = [('all', np.int),('u', np.int),('g', np.int),('r', np.int),('i', np.int),('z', np.int),('y', np.int)]
self.filter_dtype_value = [('all', np.float),('u', np.float),('g', np.float),('r', np.float),('i', np.float),('z', np.float),('y', np.float)]
self.N_visit = np.zeros(1, self.filter_dtype_count)
self.t_visit = np.zeros(1, self.filter_dtype_value)
self.Max_N_visit= np.zeros(1, self.filter_dtype_count)
self.N_visit['all']= field_info['N_visit'] # before the current episode of the scheduling
self.t_visit['all']= field_info['t_visit'] # before the current episode of the scheduling
self.N_visit['u'] = field_info['N_visit_u']
self.t_visit['u'] = field_info['t_visit_u']
self.N_visit['g'] = field_info['N_visit_g']
self.t_visit['g'] = field_info['t_visit_g']
self.N_visit['r'] = field_info['N_visit_r']
self.t_visit['r'] = field_info['t_visit_r']
self.N_visit['i'] = field_info['N_visit_i']
self.t_visit['i'] = field_info['t_visit_i']
self.N_visit['z'] = field_info['N_visit_z']
self.t_visit['z'] = field_info['t_visit_z']
self.N_visit['y'] = field_info['N_visit_y']
self.t_visit['y'] = field_info['t_visit_y']
self.Max_N_visit['all'] = Night_var[0]['Max_n_visit']
self.Max_N_visit['u'] = Night_var[0]['Max_n_visit_u']
self.Max_N_visit['g'] = Night_var[0]['Max_n_visit_g']
self.Max_N_visit['r'] = Night_var[0]['Max_n_visit_r']
self.Max_N_visit['i'] = Night_var[0]['Max_n_visit_i']
self.Max_N_visit['z'] = Night_var[0]['Max_n_visit_z']
self.Max_N_visit['y'] = Night_var[0]['Max_n_visit_y']
#self.year_vis = # visibility of the year to be added
# by calculation
self.time_slots = time_slots
self.since_t_visit = None
self.t_setting = None
# parameters that based on the updates to prediction might need to be updated
#self.night_vis= # prediction for the visibility of the night to be added (with cloud model)
# variables (gets updated after each time step for all field)
# from input data
self.slew_t_to = None
self.alt = None
self.ha = None
self.visible = None
self.brightness = None
self.covered = None
self.moonsep = None
# by calculation
self.since_t_last_visit = None
self.t_to_invis = None
self.feasible = None
self.cost = None
# visit variables (gets updated only after a visit of the specific field)
# from input data
self.n_ton_visits = None # total number of visits in the current episode
self.t_last_visit = None # time of the last visit in the current episode
# data of the field for all moments of the current episode
self.all_moments_data = None
self.all_slew_to = None
#Basis functions
self.F = None
self.data_feed(all_moments_data, all_slew_to, model_param)
self.cal_param(t_start, time_slots)
def update_field(self, n, t, current_state_index, initialization = False):
self.slew_t_to = self.all_slew_to[current_state_index]
self.alt = self.all_moments_data[n]['alts']
self.ha = self.all_moments_data[n]['hourangs']
self.visible = self.all_moments_data[n]['visible']
self.brightness= self.all_moments_data[n]['brightness']
self.covered = self.all_moments_data[n]['covered']
self.moonsep = self.all_moments_data[n]['moonsep']
if initialization :
self.n_ton_visits = np.zeros(1, self.filter_dtype_count)
self.t_last_visit = np.zeros(1,self.filter_dtype_value)
for index in range(7):
self.t_last_visit[0][index] = -self.inf
# must be executed after all the variables are updated
self.cal_variable(t)
def set_param(self, night_visibility):
self.night_vis = night_visibility
def set_variable(self, slew_t_to, alt, ha, bri, cov, msep, t):
self.slew_t_to = slew_t_to
self.alt = alt
self.ha = ha
self.brightness= bri
self.covered = cov
self.moonsep = msep
self.cal_variable(t)
def update_visit_var(self, t_new_visit, filter_name):
self.n_ton_visits[0]['all'] += 1
self.t_last_visit[0]['all'] = t_new_visit
self.n_ton_visits[0][filter_name] += 1
self.t_last_visit[0][filter_name] = t_new_visit
def cal_param(self, t_start, time_slots):
self.since_t_visit = np.zeros(1,self.filter_dtype_value)
for index in range(7):
if self.t_visit[0][index] == -self.inf:
self.since_t_visit[0][index] = self.inf
else:
self.since_t_visit[0][index] = t_start - self.t_visit[0][index]
r = np.where(self.all_moments_data['visible'])
if (r[0].size):
index = r[0][-1]
self.t_setting = self.time_slots[index]
else:
self.t_setting = -self.inf
def cal_variable(self, t):
self.since_t_last_visit = np.zeros(1, self.filter_dtype_value)
for index in self.since_t_last_visit.dtype.names:
if self.t_last_visit[0][index] == -self.inf:
self.since_t_last_visit[0][index] = self.inf
else:
self.since_t_last_visit[0][index] = t - self.t_last_visit[0][index]
if self.t_setting == -self.inf:
self.t_to_invis = -self.inf
else:
self.t_to_invis = self.t_setting - t
if self.t_to_invis < self.t_interval /2:
self.t_to_invis = 0
def data_feed(self, all_moments_data, all_slew_to, model_param):
self.all_moments_data = all_moments_data
self.all_slew_to = all_slew_to
self.inf = model_param['ModelParam'][0]
self.eps = model_param['ModelParam'][1]
self.t_expo = model_param['ModelParam'][2]
visit_w1 = model_param['ModelParam'][3]; visit_w2 = model_param['ModelParam'][4]
self.visit_w = [visit_w1, visit_w2]
self.max_n_night = model_param['ModelParam'][5]
self.t_interval = model_param['ModelParam'][6]
def eval_feasibility(self):
self.feasible = eval_feasibility(self)
return self.feasible
def eval_cost(self, f_weight, curr_filter_name, filters):
if not self.feasible:
self.F = None
return self.inf * np.ones((6,))
self.F = eval_basis_fcn(self, curr_filter_name, filters)
self.cost = eval_cost(self.F, f_weight, filters)
return self.cost
class FilterState(object):
def __init__(self, f_name, info, model_param):
# filter parameters (constant during the current episode)
self.name = f_name
self.N_visit_in = info[6]
# filter variables (update after each observation step)
# by calculation
self.t_since_last_visit_in = None
self.feasible = None
# filter visit variables (update after each visit in this specific filter)
self.t_last_visit_in = None
self.n_visit_in = None
self.n_current_batch = None
self.visit_seq = None
self.n_changed_to = None
# model parameter
self.f_change_t = None
self.inf = model_param['ModelParam'][0]
self.eps = model_param['ModelParam'][1]
def update_filter(self, t, current_filter, initialization):
if initialization:
self.n_visit_in = 0
self.n_current_batch = 0
self.t_last_visit_in = -self.inf
self.visit_seq = []
self.n_changed_to = 0
elif current_filter.name != self.name:
self.n_current_batch = 0
self.cal_variable(t)
def update_visit_var(self, t_new_visit, step):
self.t_last_visit_in = t_new_visit
self.n_visit_in += 1
try:
if self.visit_seq[-1] == step -1:
self.n_current_batch += 1
else:
self.n_current_batch = 1
self.n_changed_to += 1
except:
self.n_current_batch = 1
self.n_changed_to += 1
self.visit_seq.append(step)
def set_variable(self, t):
self.cal_variable(t)
def cal_variable(self, t):
if self.t_last_visit_in == -self.inf:
self.t_since_last_visit_in = self.inf
else:
self.t_since_last_visit_in = t - self.t_last_visit_in
def eval_feasibility(self, current_filter):
self.feasible = eval_feasibility_filter(self, current_filter)
| mit |
themrmax/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 84 | 7866 | # Important note for the deprecation cleaning of 0.20 :
# All the function and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_gmm.py'
import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.utils.testing import assert_warns_message, ignore_warnings
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.mixture.dpgmm import digamma, gammaln
from sklearn.mixture.dpgmm import wishart_log_det, wishart_logz
np.seterr(all='warn')
@ignore_warnings(category=DeprecationWarning)
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
@ignore_warnings(category=DeprecationWarning)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
@ignore_warnings(category=DeprecationWarning)
def test_digamma():
assert_warns_message(DeprecationWarning, "The function digamma is"
" deprecated in 0.18 and will be removed in 0.20. "
"Use scipy.special.digamma instead.", digamma, 3)
@ignore_warnings(category=DeprecationWarning)
def test_gammaln():
assert_warns_message(DeprecationWarning, "The function gammaln"
" is deprecated in 0.18 and will be removed"
" in 0.20. Use scipy.special.gammaln instead.",
gammaln, 3)
@ignore_warnings(category=DeprecationWarning)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
result = assert_warns_message(DeprecationWarning, "The function "
"log_normalize is deprecated in 0.18 and"
" will be removed in 0.20.",
log_normalize, a)
assert np.allclose(v, result, rtol=0.01)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_log_det():
a = np.array([0.1, 0.8, 0.01, 0.09])
b = np.array([0.2, 0.7, 0.05, 0.1])
assert_warns_message(DeprecationWarning, "The function "
"wishart_log_det is deprecated in 0.18 and"
" will be removed in 0.20.",
wishart_log_det, a, b, 2, 4)
@ignore_warnings(category=DeprecationWarning)
def test_wishart_logz():
assert_warns_message(DeprecationWarning, "The function "
"wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.", wishart_logz,
3, np.identity(3), 1, 3)
@ignore_warnings(category=DeprecationWarning)
def test_DPGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `DPGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type='dirichlet_process'` "
"instead. DPGMM is deprecated in 0.18 and will be removed in 0.20.",
DPGMM)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_VBGMM_deprecation():
assert_warns_message(
DeprecationWarning, "The `VBGMM` class is not working correctly and "
"it's better to use `sklearn.mixture.BayesianGaussianMixture` class "
"with parameter `weight_concentration_prior_type="
"'dirichlet_distribution'` instead. VBGMM is deprecated "
"in 0.18 and will be removed in 0.20.", VBGMM)
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
def test_vbgmm_no_modify_alpha():
alpha = 2.
n_components = 3
X, y = make_blobs(random_state=1)
vbgmm = VBGMM(n_components=n_components, alpha=alpha, n_iter=1)
assert_equal(vbgmm.alpha, alpha)
assert_equal(vbgmm.fit(X).alpha_, float(alpha) / n_components)
| bsd-3-clause |
Miegl/PiFmAdv | src/generate_waveforms.py | 1 | 1709 | #!/usr/bin/python
# PiFmAdv - Advanced featured FM transmitter for the Raspberry Pi
# Copyright (C) 2017 Miegl
#
# See https://github.com/Miegl/PiFmAdv
# This program generates the waveform of a single biphase symbol
#
# This program uses Pydemod, see https://github.com/ChristopheJacquet/Pydemod
import pydemod.app.rds as rds
import numpy
import scipy.io.wavfile as wavfile
import io
import matplotlib.pyplot as plt
sample_rate = 228000
outc = io.open("waveforms.c", mode="w", encoding="utf8")
outh = io.open("waveforms.h", mode="w", encoding="utf8")
header = u"""
/* This file was automatically generated by "generate_waveforms.py".
(C) 2014 Christophe Jacquet.
Released under the GNU GPL v3 license.
*/
"""
outc.write(header)
outh.write(header)
def generate_bit(name):
offset = 240
l = 96
count = 2
sample = numpy.zeros(3*l)
sample[l] = 1
sample[2*l] = -1
# Apply the data-shaping filter
sf = rds.pulse_shaping_filter(96*8, 228000)
shapedSamples = numpy.convolve(sample, sf)
out = shapedSamples[528-288:528+288] #[offset:offset+l*count]
#plt.plot(sf)
#plt.plot(out)
#plt.show()
iout = (out * 20000./max(abs(out)) ).astype(numpy.dtype('>i2'))
wavfile.write(u"waveform_{}.wav".format(name), sample_rate, iout)
outc.write(u"float waveform_{name}[] = {{{values}}};\n\n".format(
name = name,
values = u", ".join(map(unicode, out/2.5))))
# note: need to limit the amplitude so as not to saturate when the biphase
# waveforms are summed
outh.write(u"extern float waveform_{name}[{size}];\n".format(name=name, size=len(out)))
generate_bit("biphase")
outc.close()
outh.close()
| gpl-3.0 |
StingraySoftware/stingray | stingray/modeling/tests/test_parameterestimation.py | 1 | 35403 |
import numpy as np
import scipy.stats
import os
import logging
from astropy.tests.helper import pytest, catch_warnings
from astropy.modeling import models
from astropy.modeling.fitting import _fitter_to_model_params
from stingray import Powerspectrum
from stingray.modeling import ParameterEstimation, PSDParEst, \
OptimizationResults, SamplingResults
from stingray.modeling import PSDPosterior, set_logprior, PSDLogLikelihood, \
LogLikelihood
try:
from statsmodels.tools.numdiff import approx_hess
comp_hessian = True
except ImportError:
comp_hessian = False
try:
import emcee
can_sample = True
except ImportError:
can_sample = False
try:
import matplotlib.pyplot as plt
can_plot = True
except ImportError:
can_plot = False
class LogLikelihoodDummy(LogLikelihood):
def __init__(self, x, y, model):
LogLikelihood.__init__(self, x, y, model)
def evaluate(self, parse, neg=False):
return np.nan
class OptimizationResultsSubclassDummy(OptimizationResults):
def __init__(self, lpost, res, neg, log=None):
if log is None:
self.log = logging.getLogger('Fitting summary')
self.log.setLevel(logging.DEBUG)
if not self.log.handlers:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
self.log.addHandler(ch)
self.neg = neg
if res is not None:
self.result = res.fun
self.p_opt = res.x
else:
self.result = None
self.p_opt = None
self.model = lpost.model
class TestParameterEstimation(object):
@classmethod
def setup_class(cls):
np.random.seed(100)
m = 1
nfreq = 100
freq = np.arange(nfreq)
noise = np.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power, cls.model,
m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
def test_par_est_initializes(self):
pe = ParameterEstimation()
def test_parest_stores_max_post_correctly(self):
"""
Make sure the keyword for Maximum A Posteriori fits is stored correctly
as a default.
"""
pe = ParameterEstimation()
assert pe.max_post is True, "max_post should be set to True as a default."
def test_object_works_with_loglikelihood_object(self):
llike = PSDLogLikelihood(self.ps.freq, self.ps.power,
self.model, m=self.ps.m)
pe = ParameterEstimation()
res = pe.fit(llike, [2.0])
assert isinstance(res,
OptimizationResults), "res must be of " \
"type OptimizationResults"
def test_fit_fails_when_object_is_not_posterior_or_likelihood(self):
x = np.ones(10)
y = np.ones(10)
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit(x, y)
def test_fit_fails_without_lpost_or_t0(self):
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit()
def test_fit_fails_without_t0(self):
pe = ParameterEstimation()
with pytest.raises(TypeError):
res = pe.fit(np.ones(10))
def test_fit_fails_with_incorrect_number_of_parameters(self):
pe = ParameterEstimation()
t0 = [1, 2]
with pytest.raises(ValueError):
res = pe.fit(self.lpost, t0)
def test_fit_method_works_with_correct_parameter(self):
pe = ParameterEstimation()
t0 = [2.0]
res = pe.fit(self.lpost, t0)
def test_fit_method_fails_with_too_many_tries(self):
lpost = LogLikelihoodDummy(self.ps.freq, self.ps.power, self.model)
pe = ParameterEstimation()
t0 = [2.0]
with pytest.raises(Exception):
res = pe.fit(lpost, t0, neg=True)
def test_compute_lrt_fails_when_garbage_goes_in(self):
pe = ParameterEstimation()
t0 = [2.0]
with pytest.raises(TypeError):
pe.compute_lrt(self.lpost, t0, None, t0)
with pytest.raises(ValueError):
pe.compute_lrt(self.lpost, t0[:-1], self.lpost, t0)
def test_compute_lrt_sets_max_post_to_false(self):
t0 = [2.0]
pe = ParameterEstimation(max_post=True)
assert pe.max_post is True
delta_deviance, opt1, opt2 = pe.compute_lrt(self.lpost, t0,
self.lpost, t0)
assert pe.max_post is False
assert delta_deviance < 1e-7
@pytest.mark.skipif("not can_sample", "not can_plot")
def test_sampler_runs(self):
pe = ParameterEstimation()
if os.path.exists("test_corner.pdf"):
os.unlink("test_corner.pdf")
with catch_warnings(RuntimeWarning):
sample_res = pe.sample(self.lpost, [2.0], nwalkers=50, niter=10,
burnin=50, print_results=True, plot=True)
assert os.path.exists("test_corner.pdf")
assert sample_res.acceptance > 0.25
assert isinstance(sample_res, SamplingResults)
# TODO: Fix pooling with the current setup of logprior
# @pytest.mark.skipif("not can_sample", "not can_plot")
# def test_sampler_pooling(self):
# pe = ParameterEstimation()
# if os.path.exists("test_corner.pdf"):
# os.unlink("test_corner.pdf")
# with catch_warnings(RuntimeWarning):
# sample_res = pe.sample(self.lpost, [2.0], nwalkers=50, niter=10,
# burnin=50, print_results=True, plot=True,
# pool=True)
@pytest.mark.skipif("can_sample")
def test_sample_raises_error_without_emcee(self):
pe = ParameterEstimation()
with pytest.raises(ImportError):
sample_res = pe.sample(self.lpost, [2.0])
def test_simulate_lrt_fails_in_superclass(self):
pe = ParameterEstimation()
with pytest.raises(NotImplementedError):
pe.simulate_lrts(None, None, None, None, None)
class TestOptimizationResults(object):
@classmethod
def setup_class(cls):
np.random.seed(1000)
m = 1
nfreq = 100
freq = np.arange(nfreq)
noise = np.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.n = freq.shape[0]
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "powell"
cls.max_post = True
cls.t0 = np.array([2.0])
cls.neg = True
cls.opt = scipy.optimize.minimize(cls.lpost, cls.t0,
method=cls.fitmethod,
args=cls.neg, tol=1.e-10)
cls.opt.x = np.atleast_1d(cls.opt.x)
cls.optres = OptimizationResultsSubclassDummy(cls.lpost,
cls.opt,
neg=True)
def test_object_initializes_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
assert hasattr(res, "p_opt")
assert hasattr(res, "result")
assert hasattr(res, "deviance")
assert hasattr(res, "aic")
assert hasattr(res, "bic")
assert hasattr(res, "model")
assert isinstance(res.model, models.Const1D)
assert res.p_opt == self.opt.x, "res.p_opt must be the same as opt.x!"
assert np.isclose(res.p_opt[0], 2.0, atol=0.1, rtol=0.1)
assert res.model == self.lpost.model
assert res.result == self.opt.fun
mean_model = np.ones_like(self.lpost.x) * self.opt.x[0]
assert np.allclose(res.mfit, mean_model), "res.model should be exactly " \
"the model for the data."
def test_compute_criteria_works_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg = self.neg)
test_aic = res.result+ 2.0*res.p_opt.shape[0]
test_bic = res.result + res.p_opt.shape[0] * \
np.log(self.lpost.x.shape[0])
test_deviance = -2 * self.lpost.loglikelihood(res.p_opt,
neg=False)
assert np.isclose(res.aic, test_aic, atol=0.1, rtol=0.1)
assert np.isclose(res.bic, test_bic, atol=0.1, rtol=0.1)
assert np.isclose(res.deviance, test_deviance, atol=0.1, rtol=0.1)
def test_merit_calculated_correctly(self):
res = OptimizationResults(self.lpost, self.opt, neg=self.neg)
test_merit = np.sum(((self.ps.power - 2.0)/2.0)**2.)
assert np.isclose(res.merit, test_merit, rtol=0.2)
def test_compute_statistics_computes_mfit(self):
assert hasattr(self.optres, "mfit") is False
self.optres._compute_statistics(self.lpost)
assert hasattr(self.optres, "mfit")
def test_compute_model(self):
self.optres._compute_model(self.lpost)
assert hasattr(self.optres,
"mfit"), "OptimizationResult object should have mfit " \
"attribute at this point!"
_fitter_to_model_params(self.model, self.opt.x)
mfit_test = self.model(self.lpost.x)
assert np.allclose(self.optres.mfit, mfit_test)
def test_compute_statistics_computes_all_statistics(self):
self.optres._compute_statistics(self.lpost)
assert hasattr(self.optres, "merit")
assert hasattr(self.optres, "dof")
assert hasattr(self.optres, "sexp")
assert hasattr(self.optres, "ssd")
assert hasattr(self.optres, "sobs")
test_merit = np.sum(((self.ps.power - 2.0)/2.0)**2.)
test_dof = self.ps.n - self.lpost.npar
test_sexp = 2.0 * self.lpost.x.shape[0] * len(self.optres.p_opt)
test_ssd = np.sqrt(2.0*test_sexp)
test_sobs = np.sum(self.ps.power - self.optres.p_opt[0])
assert np.isclose(test_merit, self.optres.merit, rtol=0.2)
assert test_dof == self.optres.dof
assert test_sexp == self.optres.sexp
assert test_ssd == self.optres.ssd
assert np.isclose(test_sobs, self.optres.sobs, atol=0.01, rtol=0.01)
def test_compute_criteria_returns_correct_attributes(self):
self.optres._compute_criteria(self.lpost)
assert hasattr(self.optres, "aic")
assert hasattr(self.optres, "bic")
assert hasattr(self.optres, "deviance")
npar = self.optres.p_opt.shape[0]
test_aic = self.optres.result + 2. * npar
test_bic = self.optres.result + npar * np.log(self.ps.freq.shape[0])
test_deviance = -2 * self.lpost.loglikelihood(self.optres.p_opt,
neg=False)
assert np.isclose(test_aic, self.optres.aic)
assert np.isclose(test_bic, self.optres.bic)
assert np.isclose(test_deviance, self.optres.deviance)
def test_compute_covariance_with_hess_inverse(self):
self.optres._compute_covariance(self.lpost, self.opt)
assert np.allclose(self.optres.cov, np.asarray(self.opt.hess_inv))
assert np.allclose(self.optres.err, np.sqrt(np.diag(self.opt.hess_inv)))
@pytest.mark.skipif("comp_hessian")
def test_compute_covariance_without_comp_hessian(self):
self.optres._compute_covariance(self.lpost, None)
assert self.optres.cov is None
assert self.optres.err is None
@pytest.mark.skipif("not comp_hessian")
def test_compute_covariance_with_hess_inverse(self):
optres = OptimizationResultsSubclassDummy(self.lpost, self.opt,
neg=True)
optres._compute_covariance(self.lpost, self.opt)
if comp_hessian:
phess = approx_hess(self.opt.x, self.lpost)
hess_inv = np.linalg.inv(phess)
assert np.allclose(optres.cov, hess_inv)
assert np.allclose(optres.err, np.sqrt(np.diag(np.abs(hess_inv))))
def test_print_summary_works(self, logger, caplog):
self.optres._compute_covariance(self.lpost, None)
self.optres.print_summary(self.lpost)
assert 'Parameter amplitude' in caplog.text
assert "Fitting statistics" in caplog.text
assert "number of data points" in caplog.text
assert "Deviance [-2 log L] D =" in caplog.text
assert "The Akaike Information Criterion of " \
"the model is" in caplog.text
assert "The Bayesian Information Criterion of " \
"the model is" in caplog.text
assert "The figure-of-merit function for this model" in caplog.text
assert "Summed Residuals S =" in caplog.text
assert "Expected S" in caplog.text
assert "merit function" in caplog.text
if can_sample:
class SamplingResultsDummy(SamplingResults):
def __init__(self, sampler, ci_min=0.05, ci_max=0.95, log=None):
if log is None:
self.log = logging.getLogger('Fitting summary')
self.log.setLevel(logging.DEBUG)
if not self.log.handlers:
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
self.log.addHandler(ch)
# store all the samples
self.samples = sampler.get_chain(flat=True)
chain_ndims = sampler.get_chain().shape
self.nwalkers = float(chain_ndims[0])
self.niter = float(chain_ndims[1])
# store number of dimensions
self.ndim = chain_ndims[2]
# compute and store acceptance fraction
self.acceptance = np.nanmean(sampler.acceptance_fraction)
self.L = self.acceptance * self.samples.shape[0]
class TestSamplingResults(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 100
freq = np.arange(nfreq)
noise = np.random.exponential(size=nfreq)
power = noise * 2.0
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(
amplitude)
cls.priors = {"amplitude": p_amplitude}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "BFGS"
cls.max_post = True
cls.t0 = [2.0]
cls.neg = True
pe = ParameterEstimation()
res = pe.fit(cls.lpost, cls.t0)
cls.nwalkers = 50
cls.niter = 100
np.random.seed(200)
p0 = np.array(
[np.random.multivariate_normal(res.p_opt, res.cov) for
i in range(cls.nwalkers)])
cls.sampler = emcee.EnsembleSampler(cls.nwalkers,
len(res.p_opt), cls.lpost,
args=[False])
with catch_warnings(RuntimeWarning):
_, _, _ = cls.sampler.run_mcmc(p0, cls.niter)
def test_can_sample_is_true(self):
assert can_sample
def test_sample_results_object_initializes(self):
s = SamplingResults(self.sampler)
assert s.samples.shape[0] == self.nwalkers * self.niter
assert s.acceptance > 0.25
assert np.isclose(s.L,
s.acceptance * self.nwalkers * self.niter)
def test_check_convergence_works(self):
s = SamplingResultsDummy(self.sampler)
s._check_convergence(self.sampler)
assert hasattr(s, "rhat")
rhat_test = 0.038688
assert np.isclose(rhat_test, s.rhat[0], atol=0.02, rtol=0.1)
s._infer()
assert hasattr(s, "mean")
assert hasattr(s, "std")
assert hasattr(s, "ci")
test_mean = 2.0
test_std = 0.2
assert np.isclose(test_mean, s.mean[0], rtol=0.1)
assert np.isclose(test_std, s.std[0], atol=0.01, rtol=0.01)
assert s.ci.size == 2
def test_infer_computes_correct_values(self):
s = SamplingResults(self.sampler)
@pytest.fixture()
def logger():
logger = logging.getLogger('Some.Logger')
logger.setLevel(logging.INFO)
return logger
class TestPSDParEst(object):
@classmethod
def setup_class(cls):
m = 1
nfreq = 100
freq = np.linspace(1, 10.0, nfreq)
rng = np.random.RandomState(100) # set the seed for the random number generator
noise = rng.exponential(size=nfreq)
cls.model = models.Lorentz1D() + models.Const1D()
cls.x_0_0 = 2.0
cls.fwhm_0 = 0.05
cls.amplitude_0 = 1000.0
cls.amplitude_1 = 2.0
cls.model.x_0_0 = cls.x_0_0
cls.model.fwhm_0 = cls.fwhm_0
cls.model.amplitude_0 = cls.amplitude_0
cls.model.amplitude_1 = cls.amplitude_1
p = cls.model(freq)
np.random.seed(400)
power = noise*p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1]-freq[0]
ps.norm = "leahy"
cls.ps = ps
cls.a_mean, cls.a_var = 2.0, 1.0
cls.a2_mean, cls.a2_var = 100.0, 10.0
p_amplitude_1 = lambda amplitude: \
scipy.stats.norm(loc=cls.a_mean, scale=cls.a_var).pdf(amplitude)
p_x_0_0 = lambda alpha: \
scipy.stats.uniform(0.0, 5.0).pdf(alpha)
p_fwhm_0 = lambda alpha: \
scipy.stats.uniform(0.0, 0.5).pdf(alpha)
p_amplitude_0 = lambda amplitude: \
scipy.stats.norm(loc=cls.a2_mean, scale=cls.a2_var).pdf(amplitude)
cls.priors = {"amplitude_1": p_amplitude_1,
"amplitude_0": p_amplitude_0,
"x_0_0": p_x_0_0,
"fwhm_0": p_fwhm_0}
cls.lpost = PSDPosterior(cls.ps.freq, cls.ps.power,
cls.model, m=cls.ps.m)
cls.lpost.logprior = set_logprior(cls.lpost, cls.priors)
cls.fitmethod = "powell"
cls.max_post = True
cls.t0 = [cls.x_0_0, cls.fwhm_0, cls.amplitude_0, cls.amplitude_1]
cls.neg = True
def test_fitting_with_ties_and_bounds(self, capsys):
double_f = lambda model : model.x_0_0 * 2
model = self.model.copy()
model += models.Lorentz1D(amplitude=model.amplitude_0,
x_0 = model.x_0_0 * 2,
fwhm = model.fwhm_0)
model.x_0_0 = self.model.x_0_0
model.amplitude_0 = self.model.amplitude_0
model.amplitude_1 = self.model.amplitude_1
model.fwhm_0 = self.model.fwhm_0
model.x_0_2.tied = double_f
model.fwhm_0.bounds = [0, 10]
model.amplitude_0.fixed = True
p = model(self.ps.freq)
noise = np.random.exponential(size=len(p))
power = noise*p
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = power
ps.m = self.ps.m
ps.df = self.ps.df
ps.norm = "leahy"
pe = PSDParEst(ps, fitmethod="TNC")
llike = PSDLogLikelihood(ps.freq, ps.power, model)
true_pars = [self.x_0_0, self.fwhm_0,
self.amplitude_1,
model.amplitude_2.value,
model.fwhm_2.value]
res = pe.fit(llike, true_pars, neg=True)
compare_pars = [self.x_0_0, self.fwhm_0,
self.amplitude_1,
model.amplitude_2.value,
model.fwhm_2.value]
assert np.allclose(compare_pars, res.p_opt, rtol=0.5)
def test_par_est_initializes(self):
pe = PSDParEst(self.ps)
assert pe.max_post is True, "max_post should be set to True as a default."
def test_fit_fails_when_object_is_not_posterior_or_likelihood(self):
x = np.ones(10)
y = np.ones(10)
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit(x, y)
def test_fit_fails_without_lpost_or_t0(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit()
def test_fit_fails_without_t0(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
res = pe.fit(np.ones(10))
def test_fit_fails_with_incorrect_number_of_parameters(self):
pe = PSDParEst(self.ps)
t0 = [1,2]
with pytest.raises(ValueError):
res = pe.fit(self.lpost, t0)
@pytest.mark.skipif("not can_plot")
def test_fit_method_works_with_correct_parameter(self):
pe = PSDParEst(self.ps)
lpost = PSDPosterior(self.ps.freq, self.ps.power,
self.model, self.priors, m=self.ps.m)
t0 = [2.0, 1, 1, 1]
res = pe.fit(lpost, t0)
assert isinstance(res, OptimizationResults), "res must be of type " \
"OptimizationResults"
pe.plotfits(res, save_plot=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
pe.plotfits(res, save_plot=True, log=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
pe.plotfits(res, res2=res, save_plot=True)
assert os.path.exists("test_ps_fit.png")
os.unlink("test_ps_fit.png")
def test_compute_lrt_fails_when_garbage_goes_in(self):
pe = PSDParEst(self.ps)
t0 = [2.0, 1, 1, 1]
with pytest.raises(TypeError):
pe.compute_lrt(self.lpost, t0, None, t0)
with pytest.raises(ValueError):
pe.compute_lrt(self.lpost, t0[:-1], self.lpost, t0)
def test_compute_lrt_works(self):
t0 = [2.0, 1, 1, 1]
pe = PSDParEst(self.ps, max_post=True)
assert pe.max_post is True
delta_deviance, _, _ = pe.compute_lrt(self.lpost, t0, self.lpost, t0)
assert pe.max_post is False
assert np.absolute(delta_deviance) < 1.5e-4
def test_simulate_lrts_works(self):
m = 1
nfreq = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(5) * 2.0).T
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)
pe = PSDParEst(ps)
lrt_obs, res1, res2 = pe.compute_lrt(loglike, [2.0], loglike2,
[2.0, 1.0, 2.0], neg=True)
lrt_sim = pe.simulate_lrts(s_all, loglike, [2.0], loglike2,
[2.0, 1.0, 2.0],
seed=100)
assert (lrt_obs > 0.4) and (lrt_obs < 0.6)
assert np.all(lrt_sim < 10.0) and np.all(lrt_sim > 0.01)
def test_compute_lrt_fails_with_wrong_input(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
lrt_sim = pe.simulate_lrts(np.arange(5), self.lpost, [1, 2, 3, 4],
[1, 2, 3, 4], [1, 2, 3, 4])
def test_generate_model_data(self):
pe = PSDParEst(self.ps)
m = self.model
_fitter_to_model_params(m, self.t0)
model = m(self.ps.freq)
pe_model = pe._generate_model(self.lpost, [self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1])
assert np.allclose(model, pe_model)
def generate_data_rng_object_works(self):
pe = PSDParEst(self.ps)
sim_data1 = pe._generate_data(self.lpost,
[self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1],
seed=1)
sim_data2 = pe._generate_data(self.lpost,
[self.x_0_0, self.fwhm_0,
self.amplitude_0,
self.amplitude_1],
seed=1)
assert np.allclose(sim_data1.power, sim_data2.power)
def test_generate_data_produces_correct_distribution(self):
model = models.Const1D()
model.amplitude = 2.0
p = model(self.ps.freq)
seed = 100
rng = np.random.RandomState(seed)
noise = rng.exponential(size=len(p))
power = noise*p
ps = Powerspectrum()
ps.freq = self.ps.freq
ps.power = power
ps.m = 1
ps.df = self.ps.freq[1]-self.ps.freq[0]
ps.norm = "leahy"
lpost = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
pe = PSDParEst(ps)
rng2 = np.random.RandomState(seed)
sim_data = pe._generate_data(lpost, [2.0], rng2)
assert np.allclose(ps.power, sim_data.power)
def test_generate_model_breaks_with_wrong_input(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
pe_model = pe._generate_model([1, 2, 3, 4], [1, 2, 3, 4])
def test_generate_model_breaks_for_wrong_number_of_parameters(self):
pe = PSDParEst(self.ps)
with pytest.raises(AssertionError):
pe_model = pe._generate_model(self.lpost, [1, 2, 3])
def test_pvalue_calculated_correctly(self):
a = [1, 1, 1, 2]
obs_val = 1.5
pe = PSDParEst(self.ps)
pval = pe._compute_pvalue(obs_val, a)
assert np.isclose(pval, 1./len(a))
def test_calibrate_lrt_fails_without_lpost_objects(self):
pe = PSDParEst(self.ps)
with pytest.raises(TypeError):
pval = pe.calibrate_lrt(self.lpost, [1, 2, 3, 4],
np.arange(10), np.arange(4))
def test_calibrate_lrt_fails_with_wrong_parameters(self):
pe = PSDParEst(self.ps)
with pytest.raises(ValueError):
pval = pe.calibrate_lrt(self.lpost, [1, 2, 3, 4],
self.lpost, [1, 2, 3])
def test_calibrate_lrt_works_as_expected(self):
m = 1
nfreq = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(10) * 2.0).T
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
loglike2 = PSDLogLikelihood(ps.freq, ps.power, model2, 1)
pe = PSDParEst(ps)
pval = pe.calibrate_lrt(loglike, [2.0], loglike2,
[2.0, 1.0, 2.0], sample=s_all,
max_post=False, nsim=5,
seed=100)
assert pval > 0.001
@pytest.mark.skipif("not can_sample")
def test_calibrate_lrt_works_with_sampling(self):
m = 1
nfreq = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(100)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
lpost = PSDPosterior(ps.freq, ps.power, model, m=1)
p_amplitude_1 = lambda amplitude: \
scipy.stats.norm(loc=2.0, scale=1.0).pdf(amplitude)
p_alpha_0 = lambda alpha: \
scipy.stats.uniform(0.0, 5.0).pdf(alpha)
p_amplitude_0 = lambda amplitude: \
scipy.stats.norm(loc=self.a2_mean, scale=self.a2_var).pdf(
amplitude)
priors = {"amplitude": p_amplitude_1}
priors2 = {"amplitude_1": p_amplitude_1,
"amplitude_0": p_amplitude_0,
"alpha_0": p_alpha_0}
lpost.logprior = set_logprior(lpost, priors)
model2 = models.PowerLaw1D() + models.Const1D()
model2.x_0_0.fixed = True
lpost2 = PSDPosterior(ps.freq, ps.power, model2, 1)
lpost2.logprior = set_logprior(lpost2, priors2)
pe = PSDParEst(ps)
with catch_warnings(RuntimeWarning):
pval = pe.calibrate_lrt(lpost, [2.0], lpost2,
[2.0, 1.0, 2.0], sample=None,
max_post=True, nsim=10, nwalkers=10,
burnin=10, niter=10,
seed=100)
assert pval > 0.001
def test_find_highest_outlier_works_as_expected(self):
mp_ind = 5
max_power = 1000.0
ps = Powerspectrum()
ps.freq = np.arange(10)
ps.power = np.ones_like(ps.freq)
ps.power[mp_ind] = max_power
ps.m = 1
ps.df = ps.freq[1]-ps.freq[0]
ps.norm = "leahy"
pe = PSDParEst(ps)
max_x, max_ind = pe._find_outlier(ps.freq, ps.power, max_power)
assert np.isclose(max_x, ps.freq[mp_ind])
assert max_ind == mp_ind
def test_compute_highest_outlier_works(self):
mp_ind = 5
max_power = 1000.0
ps = Powerspectrum()
ps.freq = np.arange(10)
ps.power = np.ones_like(ps.freq)
ps.power[mp_ind] = max_power
ps.m = 1
ps.df = ps.freq[1]-ps.freq[0]
ps.norm = "leahy"
model = models.Const1D()
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=1.0, scale=1.0).pdf(
amplitude)
priors = {"amplitude": p_amplitude}
lpost = PSDPosterior(ps.freq, ps.power, model, 1)
lpost.logprior = set_logprior(lpost, priors)
pe = PSDParEst(ps)
res = pe.fit(lpost, [1.0])
res.mfit = np.ones_like(ps.freq)
max_y, max_x, max_ind = pe._compute_highest_outlier(lpost, res)
assert np.isclose(max_y[0], 2*max_power)
assert np.isclose(max_x[0], ps.freq[mp_ind])
assert max_ind == mp_ind
def test_simulate_highest_outlier_works(self):
m = 1
nfreq = 100
seed = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(seed)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
nsim = 5
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(nsim) * 2.0).T
pe = PSDParEst(ps)
maxpow_sim = pe.simulate_highest_outlier(s_all, loglike, [2.0],
max_post=False, seed=seed)
assert maxpow_sim.shape[0] == nsim
assert np.all(maxpow_sim > 9.00) and np.all(maxpow_sim < 31.0)
def test_calibrate_highest_outlier_works(self):
m = 1
nfreq = 100
seed = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(seed)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
nsim = 5
loglike = PSDLogLikelihood(ps.freq, ps.power, model, m=1)
s_all = np.atleast_2d(np.ones(nsim) * 2.0).T
pe = PSDParEst(ps)
pval = pe.calibrate_highest_outlier(loglike, [2.0], sample=s_all,
max_post=False, seed=seed)
assert pval > 0.001
@pytest.mark.skipif("not can_sample")
def test_calibrate_highest_outlier_works_with_sampling(self):
m = 1
nfreq = 100
seed = 100
freq = np.linspace(1, 10, nfreq)
rng = np.random.RandomState(seed)
noise = rng.exponential(size=nfreq)
model = models.Const1D()
model.amplitude = 2.0
p = model(freq)
power = noise * p
ps = Powerspectrum()
ps.freq = freq
ps.power = power
ps.m = m
ps.df = freq[1] - freq[0]
ps.norm = "leahy"
nsim = 5
lpost = PSDPosterior(ps.freq, ps.power, model, m=1)
p_amplitude = lambda amplitude: \
scipy.stats.norm(loc=1.0, scale=1.0).pdf(
amplitude)
priors = {"amplitude": p_amplitude}
lpost.logprior = set_logprior(lpost, priors)
pe = PSDParEst(ps)
with catch_warnings(RuntimeWarning):
pval = pe.calibrate_highest_outlier(lpost, [2.0], sample=None,
max_post=True, seed=seed,
nsim=nsim, niter=10,
nwalkers=20, burnin=10)
assert pval > 0.001
| mit |
antepsis/anteplahmacun | sympy/holonomic/holonomic.py | 7 | 93736 | """
This module implements Holonomic Functions and
various operations on them.
"""
from __future__ import print_function, division
from sympy import (Symbol, diff, S, Dummy, Order, rf, meijerint, I,
solve, limit, Float, nsimplify, gamma)
from sympy.printing import sstr
from sympy.core.compatibility import range, ordered
from sympy.functions.combinatorial.factorials import binomial, factorial
from sympy.core.sympify import sympify
from sympy.simplify.hyperexpand import hyperexpand
from sympy.functions.special.hyper import hyper, meijerg
from sympy.core.numbers import NaN, Infinity, NegativeInfinity
from sympy.matrices import Matrix
from sympy.functions.elementary.exponential import exp_polar, exp
from .linearsolver import NewMatrix
from .recurrence import HolonomicSequence, RecurrenceOperator, RecurrenceOperators
from .holonomicerrors import (NotPowerSeriesError, NotHyperSeriesError,
SingularityError, NotHolonomicError)
from sympy.polys.rings import PolyElement
from sympy.polys.fields import FracElement
from sympy.polys.domains import QQ, ZZ, RR
from sympy.polys.domains.pythonrational import PythonRational
from sympy.polys.polyclasses import DMF
from sympy.polys.polyroots import roots
def DifferentialOperators(base, generator):
r"""
This function is used to create annihilators using ``Dx``.
Returns an Algebra of Differential Operators also called Weyl Algebra
and the operator for differentiation i.e. the ``Dx`` operator.
Parameters
==========
base:
Base polynomial ring for the algebra.
The base polynomial ring is the ring of polynomials in :math:`x` that
will appear as coefficients in the operators.
generator:
Generator of the algebra which can
be either a noncommutative ``Symbol`` or a string. e.g. "Dx" or "D".
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.abc import x
>>> from sympy.holonomic.holonomic import DifferentialOperators
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
>>> R
Univariate Differential Operator Algebra in intermediate Dx over the base ring ZZ[x]
>>> Dx*x
(1) + (x)*Dx
"""
ring = DifferentialOperatorAlgebra(base, generator)
return (ring, ring.derivative_operator)
class DifferentialOperatorAlgebra(object):
r"""
An Ore Algebra is a set of noncommutative polynomials in the
intermediate ``Dx`` and coefficients in a base polynomial ring :math:`A`.
It follows the commutation rule:
.. math ::
Dxa = \sigma(a)Dx + \delta(a)
for :math:`a \subset A`.
Where :math:`\sigma: A --> A` is an endomorphism and :math:`\delta: A --> A`
is a skew-derivation i.e. :math:`\delta(ab) = \delta(a) * b + \sigma(a) * \delta(b)`.
If one takes the sigma as identity map and delta as the standard derivation
then it becomes the algebra of Differential Operators also called
a Weyl Algebra i.e. an algebra whose elements are Differential Operators.
This class represents a Weyl Algebra and serves as the parent ring for
Differential Operators.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols
>>> from sympy.holonomic.holonomic import DifferentialOperators
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
>>> R
Univariate Differential Operator Algebra in intermediate Dx over the base ring
ZZ[x]
See Also
========
DifferentialOperator
"""
def __init__(self, base, generator):
# the base polynomial ring for the algebra
self.base = base
# the operator representing differentiation i.e. `Dx`
self.derivative_operator = DifferentialOperator(
[base.zero, base.one], self)
if generator is None:
self.gen_symbol = Symbol('Dx', commutative=False)
else:
if isinstance(generator, str):
self.gen_symbol = Symbol(generator, commutative=False)
elif isinstance(generator, Symbol):
self.gen_symbol = generator
def __str__(self):
string = 'Univariate Differential Operator Algebra in intermediate '\
+ sstr(self.gen_symbol) + ' over the base ring ' + \
(self.base).__str__()
return string
__repr__ = __str__
def __eq__(self, other):
if self.base == other.base and self.gen_symbol == other.gen_symbol:
return True
else:
return False
class DifferentialOperator(object):
"""
Differential Operators are elements of Weyl Algebra. The Operators
are defined by a list of polynomials in the base ring and the
parent ring of the Operator i.e. the algebra it belongs to.
Takes a list of polynomials for each power of ``Dx`` and the
parent ring which must be an instance of DifferentialOperatorAlgebra.
A Differential Operator can be created easily using
the operator ``Dx``. See examples below.
Examples
========
>>> from sympy.holonomic.holonomic import DifferentialOperator, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> DifferentialOperator([0, 1, x**2], R)
(1)*Dx + (x**2)*Dx**2
>>> (x*Dx*x + 1 - Dx**2)**2
(2*x**2 + 2*x + 1) + (4*x**3 + 2*x**2 - 4)*Dx + (x**4 - 6*x - 2)*Dx**2 + (-2*x**2)*Dx**3 + (1)*Dx**4
See Also
========
DifferentialOperatorAlgebra
"""
_op_priority = 20
def __init__(self, list_of_poly, parent):
"""
Parameters
==========
list_of_poly:
List of polynomials belonging to the base ring of the algebra.
parent:
Parent algebra of the operator.
"""
# the parent ring for this operator
# must be an DifferentialOperatorAlgebra object
self.parent = parent
base = self.parent.base
self.x = base.gens[0] if isinstance(base.gens[0], Symbol) else base.gens[0][0]
# sequence of polynomials in x for each power of Dx
# the list should not have trailing zeroes
# represents the operator
# convert the expressions into ring elements using from_sympy
for i, j in enumerate(list_of_poly):
if not isinstance(j, base.dtype):
list_of_poly[i] = base.from_sympy(sympify(j))
else:
list_of_poly[i] = base.from_sympy(base.to_sympy(j))
self.listofpoly = list_of_poly
# highest power of `Dx`
self.order = len(self.listofpoly) - 1
def __mul__(self, other):
"""
Multiplies two DifferentialOperator and returns another
DifferentialOperator instance using the commutation rule
Dx*a = a*Dx + a'
"""
listofself = self.listofpoly
if not isinstance(other, DifferentialOperator):
if not isinstance(other, self.parent.base.dtype):
listofother = [self.parent.base.from_sympy(sympify(other))]
else:
listofother = [other]
else:
listofother = other.listofpoly
# multiplies a polynomial `b` with a list of polynomials
def _mul_dmp_diffop(b, listofother):
if isinstance(listofother, list):
sol = []
for i in listofother:
sol.append(i * b)
return sol
else:
return [b * listofother]
sol = _mul_dmp_diffop(listofself[0], listofother)
# compute Dx^i * b
def _mul_Dxi_b(b):
sol1 = [self.parent.base.zero]
sol2 = []
if isinstance(b, list):
for i in b:
sol1.append(i)
sol2.append(i.diff())
else:
sol1.append(self.parent.base.from_sympy(b))
sol2.append(self.parent.base.from_sympy(b).diff())
return _add_lists(sol1, sol2)
for i in range(1, len(listofself)):
# find Dx^i * b in ith iteration
listofother = _mul_Dxi_b(listofother)
# solution = solution + listofself[i] * (Dx^i * b)
sol = _add_lists(sol, _mul_dmp_diffop(listofself[i], listofother))
return DifferentialOperator(sol, self.parent)
def __rmul__(self, other):
if not isinstance(other, DifferentialOperator):
if not isinstance(other, self.parent.base.dtype):
other = (self.parent.base).from_sympy(sympify(other))
sol = []
for j in self.listofpoly:
sol.append(other * j)
return DifferentialOperator(sol, self.parent)
def __add__(self, other):
if isinstance(other, DifferentialOperator):
sol = _add_lists(self.listofpoly, other.listofpoly)
return DifferentialOperator(sol, self.parent)
else:
list_self = self.listofpoly
if not isinstance(other, self.parent.base.dtype):
list_other = [((self.parent).base).from_sympy(sympify(other))]
else:
list_other = [other]
sol = []
sol.append(list_self[0] + list_other[0])
sol += list_self[1:]
return DifferentialOperator(sol, self.parent)
__radd__ = __add__
def __sub__(self, other):
return self + (-1) * other
def __rsub__(self, other):
return (-1) * self + other
def __neg__(self):
return -1 * self
def __div__(self, other):
return self * (S.One / other)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, n):
if n == 1:
return self
if n == 0:
return DifferentialOperator([self.parent.base.one], self.parent)
# if self is `Dx`
if self.listofpoly == self.parent.derivative_operator.listofpoly:
sol = []
for i in range(0, n):
sol.append(self.parent.base.zero)
sol.append(self.parent.base.one)
return DifferentialOperator(sol, self.parent)
# the general case
else:
if n % 2 == 1:
powreduce = self**(n - 1)
return powreduce * self
elif n % 2 == 0:
powreduce = self**(n / 2)
return powreduce * powreduce
def __str__(self):
listofpoly = self.listofpoly
print_str = ''
for i, j in enumerate(listofpoly):
if j == self.parent.base.zero:
continue
if i == 0:
print_str += '(' + sstr(j) + ')'
continue
if print_str:
print_str += ' + '
if i == 1:
print_str += '(' + sstr(j) + ')*%s' %(self.parent.gen_symbol)
continue
print_str += '(' + sstr(j) + ')' + '*%s**' %(self.parent.gen_symbol) + sstr(i)
return print_str
__repr__ = __str__
def __eq__(self, other):
if isinstance(other, DifferentialOperator):
if self.listofpoly == other.listofpoly and self.parent == other.parent:
return True
else:
return False
else:
if self.listofpoly[0] == other:
for i in listofpoly[1:]:
if i is not self.parent.base.zero:
return False
return True
else:
return False
def is_singular(self, x0):
"""
Checks if the differential equation is singular at x0.
"""
base = self.parent.base
return x0 in roots(base.to_sympy(self.listofpoly[-1]), self.x)
class HolonomicFunction(object):
r"""
A Holonomic Function is a solution to a linear homogeneous ordinary
differential equation with polynomial coefficients. This differential
equation can also be represented by an annihilator i.e. a Differential
Operator ``L`` such that :math:`L.f = 0`. For uniqueness of these functions,
initial conditions can also be provided along with the annihilator.
Holonomic functions have closure properties and thus forms a ring.
Given two Holonomic Functions f and g, their sum, product,
integral and derivative is also a Holonomic Function.
For ordinary points initial condition should be a vector of values of
the derivatives i.e. :math:`[y(x_0), y'(x_0), y''(x_0) ... ]`.
For regular singular points initial conditions can also be provided in this
format:
:math:`{s0: [C_0, C_1, ...], s1: [C^1_0, C^1_1, ...], ...}`
where s0, s1, ... are the roots of indicial equation and vectors
:math:`[C_0, C_1, ...], [C^0_0, C^0_1, ...], ...` are the corresponding intiial
terms of the associated power series. See Examples below.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols, S
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> p = HolonomicFunction(Dx - 1, x, 0, [1]) # e^x
>>> q = HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]) # sin(x)
>>> p + q # annihilator of e^x + sin(x)
HolonomicFunction((-1) + (1)*Dx + (-1)*Dx**2 + (1)*Dx**3, x, 0, [1, 2, 1])
>>> p * q # annihilator of e^x * sin(x)
HolonomicFunction((2) + (-2)*Dx + (1)*Dx**2, x, 0, [0, 1])
An example of initial conditions for regular singular points,
the indicial equation has only one root `1/2`.
>>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]})
HolonomicFunction((-1/2) + (x)*Dx, x, 0, {1/2: [1]})
>>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]}).to_expr()
sqrt(x)
To plot a Holonomic Function, one can use `.evalf()` for numerical
computation. Here's an example on `sin(x)**2/x` using numpy and matplotlib.
>>> import sympy.holonomic # doctest: +SKIP
>>> from sympy import var, sin # doctest: +SKIP
>>> import matplotlib.pyplot as plt # doctest: +SKIP
>>> import numpy as np # doctest: +SKIP
>>> var("x") # doctest: +SKIP
>>> r = np.linspace(1, 5, 100) # doctest: +SKIP
>>> y = sympy.holonomic.expr_to_holonomic(sin(x)**2/x, x0=1).evalf(r) # doctest: +SKIP
>>> plt.plot(r, y, label="holonomic function") # doctest: +SKIP
>>> plt.show() # doctest: +SKIP
"""
_op_priority = 20
def __init__(self, annihilator, x, x0=0, y0=None):
"""
Parameters
==========
annihilator:
Annihilator of the Holonomic Function, represented by a
`DifferentialOperator` object.
x:
Variable of the function.
x0:
The point at which initial conditions are stored.
Generally an integer.
y0:
The initial condition. The proper format for the initial condition
is described in class docstring. To make the function unique,
length of the vector `y0` should be equal to or greater than the
order of differential equation.
"""
# initial condition
self.y0 = y0
# the point for initial conditions, defualt is zero.
self.x0 = x0
# differential operator L such that L.f = 0
self.annihilator = annihilator
self.x = x
def __str__(self):
if self._have_init_cond():
str_sol = 'HolonomicFunction(%s, %s, %s, %s)' % (str(self.annihilator),\
sstr(self.x), sstr(self.x0), sstr(self.y0))
else:
str_sol = 'HolonomicFunction(%s, %s)' % (str(self.annihilator),\
sstr(self.x))
return str_sol
__repr__ = __str__
def unify(self, other):
"""
Unifies the base polynomial ring of a given two Holonomic
Functions.
"""
R1 = self.annihilator.parent.base
R2 = other.annihilator.parent.base
dom1 = R1.dom
dom2 = R2.dom
if R1 == R2:
return (self, other)
R = (dom1.unify(dom2)).old_poly_ring(self.x)
newparent, _ = DifferentialOperators(R, str(self.annihilator.parent.gen_symbol))
sol1 = [R1.to_sympy(i) for i in self.annihilator.listofpoly]
sol2 = [R2.to_sympy(i) for i in other.annihilator.listofpoly]
sol1 = DifferentialOperator(sol1, newparent)
sol2 = DifferentialOperator(sol2, newparent)
sol1 = HolonomicFunction(sol1, self.x, self.x0, self.y0)
sol2 = HolonomicFunction(sol2, other.x, other.x0, other.y0)
return (sol1, sol2)
def is_singularics(self):
"""
Returns True if the function have singular initial condition
in the dictionary format.
Returns False if the function have ordinary initial condition
in the list format.
Returns None for all other cases.
"""
if isinstance(self.y0, dict):
return True
elif isinstance(self.y0, list):
return False
def _have_init_cond(self):
"""
Checks if the function have initial condition.
"""
return bool(self.y0)
def _singularics_to_ord(self):
"""
Converts a singular initial condition to ordinary if possible.
"""
a = list(self.y0)[0]
b = self.y0[a]
if len(self.y0) == 1 and a == int(a) and a > 0:
y0 = []
a = int(a)
for i in range(a):
y0.append(S(0))
y0 += [j * factorial(a + i) for i, j in enumerate(b)]
return HolonomicFunction(self.annihilator, self.x, self.x0, y0)
def __add__(self, other):
# if the ground domains are different
if self.annihilator.parent.base != other.annihilator.parent.base:
a, b = self.unify(other)
return a + b
deg1 = self.annihilator.order
deg2 = other.annihilator.order
dim = max(deg1, deg2)
R = self.annihilator.parent.base
K = R.get_field()
rowsself = [self.annihilator]
rowsother = [other.annihilator]
gen = self.annihilator.parent.derivative_operator
# constructing annihilators up to order dim
for i in range(dim - deg1):
diff1 = (gen * rowsself[-1])
rowsself.append(diff1)
for i in range(dim - deg2):
diff2 = (gen * rowsother[-1])
rowsother.append(diff2)
row = rowsself + rowsother
# constructing the matrix of the ansatz
r = []
for expr in row:
p = []
for i in range(dim + 1):
if i >= len(expr.listofpoly):
p.append(0)
else:
p.append(K.new(expr.listofpoly[i].rep))
r.append(p)
r = NewMatrix(r).transpose()
homosys = [[S(0) for q in range(dim + 1)]]
homosys = NewMatrix(homosys).transpose()
# solving the linear system using gauss jordan solver
solcomp = r.gauss_jordan_solve(homosys)
sol = solcomp[0]
# if a solution is not obtained then increasing the order by 1 in each
# iteration
while sol.is_zero:
dim += 1
diff1 = (gen * rowsself[-1])
rowsself.append(diff1)
diff2 = (gen * rowsother[-1])
rowsother.append(diff2)
row = rowsself + rowsother
r = []
for expr in row:
p = []
for i in range(dim + 1):
if i >= len(expr.listofpoly):
p.append(S(0))
else:
p.append(K.new(expr.listofpoly[i].rep))
r.append(p)
r = NewMatrix(r).transpose()
homosys = [[S(0) for q in range(dim + 1)]]
homosys = NewMatrix(homosys).transpose()
solcomp = r.gauss_jordan_solve(homosys)
sol = solcomp[0]
# taking only the coefficients needed to multiply with `self`
# can be also be done the other way by taking R.H.S and multiplying with
# `other`
sol = sol[:dim + 1 - deg1]
sol1 = _normalize(sol, self.annihilator.parent)
# annihilator of the solution
sol = sol1 * (self.annihilator)
sol = _normalize(sol.listofpoly, self.annihilator.parent, negative=False)
if not (self._have_init_cond() and other._have_init_cond()):
return HolonomicFunction(sol, self.x)
# both the functions have ordinary initial conditions
if self.is_singularics() == False and other.is_singularics() == False:
# directly add the corresponding value
if self.x0 == other.x0:
# try to extended the initial conditions
# using the annihilator
y1 = _extend_y0(self, sol.order)
y2 = _extend_y0(other, sol.order)
y0 = [a + b for a, b in zip(y1, y2)]
return HolonomicFunction(sol, self.x, self.x0, y0)
else:
# change the intiial conditions to a same point
selfat0 = self.annihilator.is_singular(0)
otherat0 = other.annihilator.is_singular(0)
if self.x0 == 0 and not selfat0 and not otherat0:
return self + other.change_ics(0)
elif other.x0 == 0 and not selfat0 and not otherat0:
return self.change_ics(0) + other
else:
selfatx0 = self.annihilator.is_singular(self.x0)
otheratx0 = other.annihilator.is_singular(self.x0)
if not selfatx0 and not otheratx0:
return self + other.change_ics(self.x0)
else:
return self.change_ics(other.x0) + other
if self.x0 != other.x0:
return HolonomicFunction(sol, self.x)
# if the functions have singular_ics
y1 = None
y2 = None
if self.is_singularics() == False and other.is_singularics() == True:
# convert the ordinary initial condition to singular.
_y0 = [j / factorial(i) for i, j in enumerate(self.y0)]
y1 = {S(0): _y0}
y2 = other.y0
elif self.is_singularics() == True and other.is_singularics() == False:
_y0 = [j / factorial(i) for i, j in enumerate(other.y0)]
y1 = self.y0
y2 = {S(0): _y0}
elif self.is_singularics() == True and other.is_singularics() == True:
y1 = self.y0
y2 = other.y0
# computing singular initial condition for the result
# taking union of the series terms of both functions
y0 = {}
for i in y1:
# add corresponding initial terms if the power
# on `x` is same
if i in y2:
y0[i] = [a + b for a, b in zip(y1[i], y2[i])]
else:
y0[i] = y1[i]
for i in y2:
if not i in y1:
y0[i] = y2[i]
return HolonomicFunction(sol, self.x, self.x0, y0)
def integrate(self, limits, initcond=False):
"""
Integrates the given holonomic function.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x, 0, [1]).integrate((x, 0, x)) # e^x - 1
HolonomicFunction((-1)*Dx + (1)*Dx**2, x, 0, [0, 1])
>>> HolonomicFunction(Dx**2 + 1, x, 0, [1, 0]).integrate((x, 0, x))
HolonomicFunction((1)*Dx + (1)*Dx**3, x, 0, [0, 1, 0])
"""
# to get the annihilator, just multiply by Dx from right
D = self.annihilator.parent.derivative_operator
# if the function have initial conditions of the series format
if self.is_singularics() == True:
r = self._singularics_to_ord()
if r:
return r.integrate(limits, initcond=initcond)
# computing singular initial condition for the function
# produced after integration.
y0 = {}
for i in self.y0:
c = self.y0[i]
c2 = []
for j in range(len(c)):
if c[j] == 0:
c2.append(S(0))
# if power on `x` is -1, the integration becomes log(x)
# TODO: Implement this case
elif i + j + 1 == 0:
raise NotImplementedError("logarithmic terms in the series are not supported")
else:
c2.append(c[j] / S(i + j + 1))
y0[i + 1] = c2
if hasattr(limits, "__iter__"):
raise NotImplementedError("Definite integration for singular initial conditions")
return HolonomicFunction(self.annihilator * D, self.x, self.x0, y0)
# if no initial conditions are available for the function
if not self._have_init_cond():
if initcond:
return HolonomicFunction(self.annihilator * D, self.x, self.x0, [S(0)])
return HolonomicFunction(self.annihilator * D, self.x)
# definite integral
# initial conditions for the answer will be stored at point `a`,
# where `a` is the lower limit of the integrand
if hasattr(limits, "__iter__"):
if len(limits) == 3 and limits[0] == self.x:
x0 = self.x0
a = limits[1]
b = limits[2]
definite = True
else:
definite = False
y0 = [S(0)]
y0 += self.y0
indefinite_integral = HolonomicFunction(self.annihilator * D, self.x, self.x0, y0)
if not definite:
return indefinite_integral
# use evalf to get the values at `a`
if x0 != a:
try:
indefinite_expr = indefinite_integral.to_expr()
except (NotHyperSeriesError, NotPowerSeriesError):
indefinite_expr = None
if indefinite_expr:
lower = indefinite_expr.subs(self.x, a)
if isinstance(lower, NaN):
lower = indefinite_expr.limit(self.x, a)
else:
lower = indefinite_integral.evalf(a)
if b == self.x:
y0[0] = y0[0] - lower
return HolonomicFunction(self.annihilator * D, self.x, x0, y0)
elif S(b).is_Number:
if indefinite_expr:
upper = indefinite_expr.subs(self.x, b)
if isinstance(upper, NaN):
upper = indefinite_expr.limit(self.x, b)
else:
upper = indefinite_integral.evalf(b)
return upper - lower
# if the upper limit is `x`, the answer will be a function
if b == self.x:
return HolonomicFunction(self.annihilator * D, self.x, a, y0)
# if the upper limits is a Number, a numerical value will be returned
elif S(b).is_Number:
try:
s = HolonomicFunction(self.annihilator * D, self.x, a,\
y0).to_expr()
indefinite = s.subs(self.x, b)
if not isinstance(indefinite, NaN):
return indefinite
else:
return s.limit(self.x, b)
except (NotHyperSeriesError, NotPowerSeriesError):
return HolonomicFunction(self.annihilator * D, self.x, a, y0).evalf(b)
return HolonomicFunction(self.annihilator * D, self.x)
def diff(self, *args):
r"""
Differentiation of the given Holonomic function.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).diff().to_expr()
cos(x)
>>> HolonomicFunction(Dx - 2, x, 0, [1]).diff().to_expr()
2*exp(2*x)
See Also
========
.integrate()
"""
if args:
if args[0] != self.x:
return S(0)
elif len(args) == 2:
sol = self
for i in range(args[1]):
sol = sol.diff(args[0])
return sol
ann = self.annihilator
dx = ann.parent.derivative_operator
# if the function is constant.
if ann.listofpoly[0] == ann.parent.base.zero and ann.order == 1:
return S(0)
# if the coefficient of y in the differential equation is zero.
# a shifting is done to compute the answer in this case.
elif ann.listofpoly[0] == ann.parent.base.zero:
sol = DifferentialOperator(ann.listofpoly[1:], ann.parent)
if self._have_init_cond():
# if ordinary initial condition
if self.is_singularics() == False:
return HolonomicFunction(sol, self.x, self.x0, self.y0[1:])
# TODO: support for singular initial condition
return HolonomicFunction(sol, self.x)
else:
return HolonomicFunction(sol, self.x)
# the general algorithm
R = ann.parent.base
K = R.get_field()
seq_dmf = [K.new(i.rep) for i in ann.listofpoly]
# -y = a1*y'/a0 + a2*y''/a0 ... + an*y^n/a0
rhs = [i / seq_dmf[0] for i in seq_dmf[1:]]
rhs.insert(0, K.zero)
# differentiate both lhs and rhs
sol = _derivate_diff_eq(rhs)
# add the term y' in lhs to rhs
sol = _add_lists(sol, [K.zero, K.one])
sol = _normalize(sol[1:], self.annihilator.parent, negative=False)
if not self._have_init_cond() or self.is_singularics() == True:
return HolonomicFunction(sol, self.x)
y0 = _extend_y0(self, sol.order + 1)[1:]
return HolonomicFunction(sol, self.x, self.x0, y0)
def __eq__(self, other):
if self.annihilator == other.annihilator:
if self.x == other.x:
if self._have_init_cond() and other._have_init_cond():
if self.x0 == other.x0 and self.y0 == other.y0:
return True
else:
return False
else:
return True
else:
return False
else:
return False
def __mul__(self, other):
ann_self = self.annihilator
if not isinstance(other, HolonomicFunction):
other = sympify(other)
if not other.is_constant():
raise NotImplementedError(" Can't multiply a HolonomicFunction and expressions/functions.")
if not self._have_init_cond():
return self
else:
y0 = _extend_y0(self, ann_self.order)
y1 = []
for j in y0:
y1.append(j * other)
return HolonomicFunction(ann_self, self.x, self.x0, y1)
if self.annihilator.parent.base != other.annihilator.parent.base:
a, b = self.unify(other)
return a * b
ann_other = other.annihilator
list_self = []
list_other = []
a = ann_self.order
b = ann_other.order
R = ann_self.parent.base
K = R.get_field()
for j in ann_self.listofpoly:
list_self.append(K.new(j.rep))
for j in ann_other.listofpoly:
list_other.append(K.new(j.rep))
# will be used to reduce the degree
self_red = [-list_self[i] / list_self[a] for i in range(a)]
other_red = [-list_other[i] / list_other[b] for i in range(b)]
# coeff_mull[i][j] is the coefficient of Dx^i(f).Dx^j(g)
coeff_mul = [[S(0) for i in range(b + 1)] for j in range(a + 1)]
coeff_mul[0][0] = S(1)
# making the ansatz
lin_sys = [[coeff_mul[i][j] for i in range(a) for j in range(b)]]
homo_sys = [[S(0) for q in range(a * b)]]
homo_sys = NewMatrix(homo_sys).transpose()
sol = (NewMatrix(lin_sys).transpose()).gauss_jordan_solve(homo_sys)
# until a non trivial solution is found
while sol[0].is_zero:
# updating the coefficents Dx^i(f).Dx^j(g) for next degree
for i in range(a - 1, -1, -1):
for j in range(b - 1, -1, -1):
coeff_mul[i][j + 1] += coeff_mul[i][j]
coeff_mul[i + 1][j] += coeff_mul[i][j]
if isinstance(coeff_mul[i][j], K.dtype):
coeff_mul[i][j] = DMFdiff(coeff_mul[i][j])
else:
coeff_mul[i][j] = coeff_mul[i][j].diff(self.x)
# reduce the terms to lower power using annihilators of f, g
for i in range(a + 1):
if not coeff_mul[i][b] == S(0):
for j in range(b):
coeff_mul[i][j] += other_red[j] * \
coeff_mul[i][b]
coeff_mul[i][b] = S(0)
# not d2 + 1, as that is already covered in previous loop
for j in range(b):
if not coeff_mul[a][j] == 0:
for i in range(a):
coeff_mul[i][j] += self_red[i] * \
coeff_mul[a][j]
coeff_mul[a][j] = S(0)
lin_sys.append([coeff_mul[i][j] for i in range(a)
for j in range(b)])
sol = (NewMatrix(lin_sys).transpose()).gauss_jordan_solve(homo_sys)
sol_ann = _normalize(sol[0][0:], self.annihilator.parent, negative=False)
if not (self._have_init_cond() and other._have_init_cond()):
return HolonomicFunction(sol_ann, self.x)
if self.is_singularics() == False and other.is_singularics() == False:
# if both the conditions are at same point
if self.x0 == other.x0:
# try to find more inital conditions
y0_self = _extend_y0(self, sol_ann.order)
y0_other = _extend_y0(other, sol_ann.order)
# h(x0) = f(x0) * g(x0)
y0 = [y0_self[0] * y0_other[0]]
# coefficient of Dx^j(f)*Dx^i(g) in Dx^i(fg)
for i in range(1, min(len(y0_self), len(y0_other))):
coeff = [[0 for i in range(i + 1)] for j in range(i + 1)]
for j in range(i + 1):
for k in range(i + 1):
if j + k == i:
coeff[j][k] = binomial(i, j)
sol = 0
for j in range(i + 1):
for k in range(i + 1):
sol += coeff[j][k]* y0_self[j] * y0_other[k]
y0.append(sol)
return HolonomicFunction(sol_ann, self.x, self.x0, y0)
# if the points are different, consider one
else:
selfat0 = self.annihilator.is_singular(0)
otherat0 = other.annihilator.is_singular(0)
if self.x0 == 0 and not selfat0 and not otherat0:
return self * other.change_ics(0)
elif other.x0 == 0 and not selfat0 and not otherat0:
return self.change_ics(0) * other
else:
selfatx0 = self.annihilator.is_singular(self.x0)
otheratx0 = other.annihilator.is_singular(self.x0)
if not selfatx0 and not otheratx0:
return self * other.change_ics(self.x0)
else:
return self.change_ics(other.x0) * other
if self.x0 != other.x0:
return HolonomicFunction(sol_ann, self.x)
# if the functions have singular_ics
y1 = None
y2 = None
if self.is_singularics() == False and other.is_singularics() == True:
_y0 = [j / factorial(i) for i, j in enumerate(self.y0)]
y1 = {S(0): _y0}
y2 = other.y0
elif self.is_singularics() == True and other.is_singularics() == False:
_y0 = [j / factorial(i) for i, j in enumerate(other.y0)]
y1 = self.y0
y2 = {S(0): _y0}
elif self.is_singularics() == True and other.is_singularics() == True:
y1 = self.y0
y2 = other.y0
y0 = {}
# multiply every possible pair of the series terms
for i in y1:
for j in y2:
k = min(len(y1[i]), len(y2[j]))
c = []
for a in range(k):
s = S(0)
for b in range(a + 1):
s += y1[i][b] * y2[j][a - b]
c.append(s)
if not i + j in y0:
y0[i + j] = c
else:
y0[i + j] = [a + b for a, b in zip(c, y0[i + j])]
return HolonomicFunction(sol_ann, self.x, self.x0, y0)
__rmul__ = __mul__
def __sub__(self, other):
return self + other * -1
def __rsub__(self, other):
return self * -1 + other
def __neg__(self):
return -1 * self
def __div__(self, other):
return self * (S.One / other)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, n):
if n < 0:
raise NotHolonomicError("Negative Power on a Holonomic Function")
if n == 0:
Dx = self.annihilator.parent.derivative_operator
return HolonomicFunction(Dx, self.x, S(0), [S(1)])
if n == 1:
return self
else:
if n % 2 == 1:
powreduce = self**(n - 1)
return powreduce * self
elif n % 2 == 0:
powreduce = self**(n / 2)
return powreduce * powreduce
def degree(self):
"""
Returns the highest power of `x` in the annihilator.
"""
sol = [i.degree() for i in self.annihilator.listofpoly]
return max(sol)
def composition(self, expr, *args, **kwargs):
"""
Returns function after composition of a holonomic
function with an algebraic function. The method can't compute
initial conditions for the result by itself, so they can be also be
provided.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x).composition(x**2, 0, [1]) # e^(x**2)
HolonomicFunction((-2*x) + (1)*Dx, x, 0, [1])
>>> HolonomicFunction(Dx**2 + 1, x).composition(x**2 - 1, 1, [1, 0])
HolonomicFunction((4*x**3) + (-1)*Dx + (x)*Dx**2, x, 1, [1, 0])
See Also
========
from_hyper()
"""
R = self.annihilator.parent
a = self.annihilator.order
diff = expr.diff(self.x)
listofpoly = self.annihilator.listofpoly
for i, j in enumerate(listofpoly):
if isinstance(j, self.annihilator.parent.base.dtype):
listofpoly[i] = self.annihilator.parent.base.to_sympy(j)
r = listofpoly[a].subs({self.x:expr})
subs = [-listofpoly[i].subs({self.x:expr}) / r for i in range (a)]
coeffs = [S(0) for i in range(a)] # coeffs[i] == coeff of (D^i f)(a) in D^k (f(a))
coeffs[0] = S(1)
system = [coeffs]
homogeneous = Matrix([[S(0) for i in range(a)]]).transpose()
sol = S(0)
while sol.is_zero:
coeffs_next = [p.diff(self.x) for p in coeffs]
for i in range(a - 1):
coeffs_next[i + 1] += (coeffs[i] * diff)
for i in range(a):
coeffs_next[i] += (coeffs[-1] * subs[i] * diff)
coeffs = coeffs_next
# check for linear relations
system.append(coeffs)
sol_tuple = (Matrix(system).transpose()).gauss_jordan_solve(homogeneous)
sol = sol_tuple[0]
tau = sol.atoms(Dummy).pop()
sol = sol.subs(tau, 1)
sol = _normalize(sol[0:], R, negative=False)
# if initial conditions are given for the resulting function
if args:
return HolonomicFunction(sol, self.x, args[0], args[1])
return HolonomicFunction(sol, self.x)
def to_sequence(self, lb=True):
r"""
Finds recurrence relation for the coefficients in the series expansion
of the function about :math:`x_0`, where :math:`x_0` is the point at
which the initial condition is stored.
If the point :math:`x_0` is ordinary, solution of the form :math:`[(R, n_0)]`
is returned. Where :math:`R` is the recurrence relation and :math:`n_0` is the
smallest ``n`` for which the recurrence holds true.
If the point :math:`x_0` is regular singular, a list of solutions in
the format :math:`(R, p, n_0)` is returned, i.e. `[(R, p, n_0), ... ]`.
Each tuple in this vector represents a recurrence relation :math:`R`
associated with a root of the indicial equation ``p``. Conditions of
a different format can also be provided in this case, see the
docstring of HolonomicFunction class.
If it's not possible to numerically compute a initial condition,
it is returned as a symbol :math:`C_j`, denoting the coefficient of
:math:`(x - x_0)^j` in the power series about :math:`x_0`.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols, S
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x, 0, [1]).to_sequence()
[(HolonomicSequence((-1) + (n + 1)Sn, n), u(0) = 1, 0)]
>>> HolonomicFunction((1 + x)*Dx**2 + Dx, x, 0, [0, 1]).to_sequence()
[(HolonomicSequence((n**2) + (n**2 + n)Sn, n), u(0) = 0, u(1) = 1, u(2) = -1/2, 2)]
>>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]}).to_sequence()
[(HolonomicSequence((n), n), u(0) = 1, 1/2, 1)]
See Also
========
HolonomicFunction.series()
References
==========
.. [1] https://hal.inria.fr/inria-00070025/document
.. [2] http://www.risc.jku.at/publications/download/risc_2244/DIPLFORM.pdf
"""
if self.x0 != 0:
return self.shift_x(self.x0).to_sequence()
# check whether a power series exists if the point is singular
if self.annihilator.is_singular(self.x0):
return self._frobenius(lb=lb)
dict1 = {}
n = Symbol('n', integer=True)
dom = self.annihilator.parent.base.dom
R, _ = RecurrenceOperators(dom.old_poly_ring(n), 'Sn')
# substituting each term of the form `x^k Dx^j` in the
# annihilator, according to the formula below:
# x^k Dx^j = Sum(rf(n + 1 - k, j) * a(n + j - k) * x^n, (n, k, oo))
# for explanation see [2].
for i, j in enumerate(self.annihilator.listofpoly):
listofdmp = j.all_coeffs()
degree = len(listofdmp) - 1
for k in range(degree + 1):
coeff = listofdmp[degree - k]
if coeff == 0:
continue
if (i - k, k) in dict1:
dict1[(i - k, k)] += (dom.to_sympy(coeff) * rf(n - k + 1, i))
else:
dict1[(i - k, k)] = (dom.to_sympy(coeff) * rf(n - k + 1, i))
sol = []
keylist = [i[0] for i in dict1]
lower = min(keylist)
upper = max(keylist)
degree = self.degree()
# the recurrence relation holds for all values of
# n greater than smallest_n, i.e. n >= smallest_n
smallest_n = lower + degree
dummys = {}
eqs = []
unknowns = []
# an appropriate shift of the recurrence
for j in range(lower, upper + 1):
if j in keylist:
temp = S(0)
for k in dict1.keys():
if k[0] == j:
temp += dict1[k].subs(n, n - lower)
sol.append(temp)
else:
sol.append(S(0))
# the recurrence relation
sol = RecurrenceOperator(sol, R)
# computing the initial conditions for recurrence
order = sol.order
all_roots = roots(R.base.to_sympy(sol.listofpoly[-1]), n, filter='Z')
all_roots = all_roots.keys()
if all_roots:
max_root = max(all_roots) + 1
smallest_n = max(max_root, smallest_n)
order += smallest_n
y0 = _extend_y0(self, order)
u0 = []
# u(n) = y^n(0)/factorial(n)
for i, j in enumerate(y0):
u0.append(j / factorial(i))
# if sufficient conditions can't be computed then
# try to use the series method i.e.
# equate the coefficients of x^k in the equation formed by
# substituting the series in differential equation, to zero.
if len(u0) < order:
for i in range(degree):
eq = S(0)
for j in dict1:
if i + j[0] < 0:
dummys[i + j[0]] = S(0)
elif i + j[0] < len(u0):
dummys[i + j[0]] = u0[i + j[0]]
elif not i + j[0] in dummys:
dummys[i + j[0]] = Symbol('C_%s' %(i + j[0]))
unknowns.append(dummys[i + j[0]])
if j[1] <= i:
eq += dict1[j].subs(n, i) * dummys[i + j[0]]
eqs.append(eq)
# solve the system of equations formed
soleqs = solve(eqs, *unknowns)
if isinstance(soleqs, dict):
for i in range(len(u0), order):
if i not in dummys:
dummys[i] = Symbol('C_%s' %i)
if dummys[i] in soleqs:
u0.append(soleqs[dummys[i]])
else:
u0.append(dummys[i])
if lb:
return [(HolonomicSequence(sol, u0), smallest_n)]
return [HolonomicSequence(sol, u0)]
for i in range(len(u0), order):
if i not in dummys:
dummys[i] = Symbol('C_%s' %i)
s = False
for j in soleqs:
if dummys[i] in j:
u0.append(j[dummys[i]])
s = True
if not s:
u0.append(dummys[i])
if lb:
return [(HolonomicSequence(sol, u0), smallest_n)]
return [HolonomicSequence(sol, u0)]
def _frobenius(self, lb=True):
# compute the roots of indicial equation
indicialroots = self._indicial()
reals = []
compl = []
for i in ordered(indicialroots.keys()):
if i.is_real:
reals.extend([i] * indicialroots[i])
else:
a, b = i.as_real_imag()
compl.extend([(i, a, b)] * indicialroots[i])
# sort the roots for a fixed ordering of solution
compl.sort(key=lambda x : x[1])
compl.sort(key=lambda x : x[2])
reals.sort()
x = self.x
# grouping the roots, roots differ by an integer are put in the same group.
grp = []
for i in reals:
intdiff = False
if len(grp) == 0:
grp.append([i])
continue
for j in grp:
if int(j[0] - i) == j[0] - i:
j.append(i)
intdiff = True
break
if not intdiff:
grp.append([i])
# True if none of the roots differ by an integer i.e.
# each element in group have only one member
independent = True if all(len(i) == 1 for i in grp) else False
allpos = all(i >= 0 for i in reals)
allint = all(int(i) == i for i in reals)
# if initial conditions are provided
# then use them.
if self.is_singularics() == True:
rootstoconsider = []
for i in ordered(self.y0.keys()):
for j in ordered(indicialroots.keys()):
if j == i:
rootstoconsider.append(i)
elif allpos and allint:
rootstoconsider = [min(reals)]
elif independent:
rootstoconsider = [i[0] for i in grp] + [j[0] for j in compl]
elif not allint:
rootstoconsider = []
for i in reals:
if not int(i) == i:
rootstoconsider.append(i)
elif not allpos:
if not self._have_init_cond() or S(self.y0[0]).is_finite == False:
rootstoconsider = [min(reals)]
else:
posroots = []
for i in reals:
if i >= 0:
posroots.append(i)
rootstoconsider = [min(posroots)]
n = Symbol('n', integer=True)
dom = self.annihilator.parent.base.dom
R, _ = RecurrenceOperators(dom.old_poly_ring(n), 'Sn')
finalsol = []
char = ord('C')
for p in rootstoconsider:
dict1 = {}
for i, j in enumerate(self.annihilator.listofpoly):
listofdmp = j.all_coeffs()
degree = len(listofdmp) - 1
for k in range(degree + 1):
coeff = listofdmp[degree - k]
if coeff == 0:
continue
if (i - k, k - i) in dict1:
dict1[(i - k, k - i)] += (dom.to_sympy(coeff) * rf(n - k + 1 + p, i))
else:
dict1[(i - k, k - i)] = (dom.to_sympy(coeff) * rf(n - k + 1 + p, i))
sol = []
keylist = [i[0] for i in dict1]
lower = min(keylist)
upper = max(keylist)
degree = max([i[1] for i in dict1])
degree2 = min([i[1] for i in dict1])
smallest_n = lower + degree
dummys = {}
eqs = []
unknowns = []
for j in range(lower, upper + 1):
if j in keylist:
temp = S(0)
for k in dict1.keys():
if k[0] == j:
temp += dict1[k].subs(n, n - lower)
sol.append(temp)
else:
sol.append(S(0))
# the recurrence relation
sol = RecurrenceOperator(sol, R)
# computing the initial conditions for recurrence
order = sol.order
all_roots = roots(R.base.to_sympy(sol.listofpoly[-1]), n, filter='Z')
all_roots = all_roots.keys()
if all_roots:
max_root = max(all_roots) + 1
smallest_n = max(max_root, smallest_n)
order += smallest_n
u0 = []
if self.is_singularics() == True:
u0 = self.y0[p]
elif self.is_singularics() == False and p >= 0 and int(p) == p and len(rootstoconsider) == 1:
y0 = _extend_y0(self, order + int(p))
# u(n) = y^n(0)/factorial(n)
if len(y0) > int(p):
for i in range(int(p), len(y0)):
u0.append(y0[i] / factorial(i))
if len(u0) < order:
for i in range(degree2, degree):
eq = S(0)
for j in dict1:
if i + j[0] < 0:
dummys[i + j[0]] = S(0)
elif i + j[0] < len(u0):
dummys[i + j[0]] = u0[i + j[0]]
elif not i + j[0] in dummys:
letter = chr(char) + '_%s' %(i + j[0])
dummys[i + j[0]] = Symbol(letter)
unknowns.append(dummys[i + j[0]])
if j[1] <= i:
eq += dict1[j].subs(n, i) * dummys[i + j[0]]
eqs.append(eq)
# solve the system of equations formed
soleqs = solve(eqs, *unknowns)
if isinstance(soleqs, dict):
for i in range(len(u0), order):
if i not in dummys:
letter = chr(char) + '_%s' %i
dummys[i] = Symbol(letter)
if dummys[i] in soleqs:
u0.append(soleqs[dummys[i]])
else:
u0.append(dummys[i])
if lb:
finalsol.append((HolonomicSequence(sol, u0), p, smallest_n))
continue
else:
finalsol.append((HolonomicSequence(sol, u0), p))
continue
for i in range(len(u0), order):
if i not in dummys:
letter = chr(char) + '_%s' %i
dummys[i] = Symbol(letter)
s = False
for j in soleqs:
if dummys[i] in j:
u0.append(j[dummys[i]])
s = True
if not s:
u0.append(dummys[i])
if lb:
finalsol.append((HolonomicSequence(sol, u0), p, smallest_n))
else:
finalsol.append((HolonomicSequence(sol, u0), p))
char += 1
return finalsol
def series(self, n=6, coefficient=False, order=True, _recur=None):
r"""
Finds the power series expansion of given holonomic function about :math:`x_0`.
A list of series might be returned if :math:`x_0` is a regular point with
multiple roots of the indcial equation.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x, 0, [1]).series() # e^x
1 + x + x**2/2 + x**3/6 + x**4/24 + x**5/120 + O(x**6)
>>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).series(n=8) # sin(x)
x - x**3/6 + x**5/120 - x**7/5040 + O(x**8)
See Also
========
HolonomicFunction.to_sequence()
"""
if _recur == None:
recurrence = self.to_sequence()
else:
recurrence = _recur
if isinstance(recurrence, tuple) and len(recurrence) == 2:
recurrence = recurrence[0]
constantpower = 0
elif isinstance(recurrence, tuple) and len(recurrence) == 3:
constantpower = recurrence[1]
recurrence = recurrence[0]
elif len(recurrence) == 1 and len(recurrence[0]) == 2:
recurrence = recurrence[0][0]
constantpower = 0
elif len(recurrence) == 1 and len(recurrence[0]) == 3:
constantpower = recurrence[0][1]
recurrence = recurrence[0][0]
else:
sol = []
for i in recurrence:
sol.append(self.series(_recur=i))
return sol
n = n - int(constantpower)
l = len(recurrence.u0) - 1
k = recurrence.recurrence.order
x = self.x
x0 = self.x0
seq_dmp = recurrence.recurrence.listofpoly
R = recurrence.recurrence.parent.base
K = R.get_field()
seq = []
for i, j in enumerate(seq_dmp):
seq.append(K.new(j.rep))
sub = [-seq[i] / seq[k] for i in range(k)]
sol = [i for i in recurrence.u0]
if l + 1 >= n:
pass
else:
# use the initial conditions to find the next term
for i in range(l + 1 - k, n - k):
coeff = S(0)
for j in range(k):
if i + j >= 0:
coeff += DMFsubs(sub[j], i) * sol[i + j]
sol.append(coeff)
if coefficient:
return sol
ser = S(0)
for i, j in enumerate(sol):
ser += x**(i + constantpower) * j
if order:
ser += Order(x**(n + int(constantpower)), x)
if x0 != 0:
return ser.subs(x, x - x0)
return ser
def _indicial(self):
"""
Computes roots of the Indicial equation.
"""
if self.x0 != 0:
return self.shift_x(self.x0)._indicial()
list_coeff = self.annihilator.listofpoly
R = self.annihilator.parent.base
x = self.x
s = R.zero
y = R.one
def _pole_degree(poly):
root_all = roots(R.to_sympy(poly), x, filter='Z')
if 0 in root_all.keys():
return root_all[0]
else:
return 0
degree = [j.degree() for j in list_coeff]
degree = max(degree)
inf = 10 * (max(1, degree) + max(1, self.annihilator.order))
deg = lambda q: inf if q.is_zero else _pole_degree(q)
b = deg(list_coeff[0])
for j in range(1, len(list_coeff)):
b = min(b, deg(list_coeff[j]) - j)
for i, j in enumerate(list_coeff):
listofdmp = j.all_coeffs()
degree = len(listofdmp) - 1
if - i - b <= 0 and degree - i - b >= 0:
s = s + listofdmp[degree - i - b] * y
y *= x - i
return roots(R.to_sympy(s), x)
def evalf(self, points, method='RK4', h=0.05, derivatives=False):
r"""
Finds numerical value of a holonomic function using numerical methods.
(RK4 by default). A set of points (real or complex) must be provided
which will be the path for the numerical integration.
The path should be given as a list :math:`[x_1, x_2, ... x_n]`. The numerical
values will be computed at each point in this order
:math:`x_1 --> x_2 --> x_3 ... --> x_n`.
Returns values of the function at :math:`x_1, x_2, ... x_n` in a list.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
A straight line on the real axis from (0 to 1)
>>> r = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
Runge-Kutta 4th order on e^x from 0.1 to 1.
Exact solution at 1 is 2.71828182845905
>>> HolonomicFunction(Dx - 1, x, 0, [1]).evalf(r)
[1.10517083333333, 1.22140257085069, 1.34985849706254, 1.49182424008069,
1.64872063859684, 1.82211796209193, 2.01375162659678, 2.22553956329232,
2.45960141378007, 2.71827974413517]
Euler's method for the same
>>> HolonomicFunction(Dx - 1, x, 0, [1]).evalf(r, method='Euler')
[1.1, 1.21, 1.331, 1.4641, 1.61051, 1.771561, 1.9487171, 2.14358881,
2.357947691, 2.5937424601]
One can also observe that the value obtained using Runge-Kutta 4th order
is much more accurate than Euler's method.
"""
from sympy.holonomic.numerical import _evalf
lp = False
# if a point `b` is given instead of a mesh
if not hasattr(points, "__iter__"):
lp = True
b = S(points)
if self.x0 == b:
return _evalf(self, [b], method=method, derivatives=derivatives)[-1]
if not b.is_Number:
raise NotImplementedError
a = self.x0
if a > b:
h = -h
n = int((b - a) / h)
points = [a + h]
for i in range(n - 1):
points.append(points[-1] + h)
for i in roots(self.annihilator.parent.base.to_sympy(self.annihilator.listofpoly[-1]), self.x):
if i == self.x0 or i in points:
raise SingularityError(self, i)
if lp:
return _evalf(self, points, method=method, derivatives=derivatives)[-1]
return _evalf(self, points, method=method, derivatives=derivatives)
def change_x(self, z):
"""
Changes only the variable of Holonomic Function, for internal
purposes. For composition use HolonomicFunction.composition()
"""
dom = self.annihilator.parent.base.dom
R = dom.old_poly_ring(z)
parent, _ = DifferentialOperators(R, 'Dx')
sol = []
for j in self.annihilator.listofpoly:
sol.append(R(j.rep))
sol = DifferentialOperator(sol, parent)
return HolonomicFunction(sol, z, self.x0, self.y0)
def shift_x(self, a):
"""
Substitute `x + a` for `x`.
"""
x = self.x
listaftershift = self.annihilator.listofpoly
base = self.annihilator.parent.base
sol = [base.from_sympy(base.to_sympy(i).subs(x, x + a)) for i in listaftershift]
sol = DifferentialOperator(sol, self.annihilator.parent)
x0 = self.x0 - a
if not self._have_init_cond():
return HolonomicFunction(sol, x)
return HolonomicFunction(sol, x, x0, self.y0)
def to_hyper(self, as_list=False, _recur=None):
"""
Returns a hypergeometric function (or linear combination of them)
representing the given holonomic function.
Returns an answer of the form:
`a_1 \cdot x^{b_1} \cdot{hyper()} + a_2 \cdot x^{b_2} \cdot{hyper()} ...`
This is very useful as one can now use ``hyperexpand`` to find the
symbolic expressions/functions.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> # sin(x)
>>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).to_hyper()
x*hyper((), (3/2,), -x**2/4)
>>> # exp(x)
>>> HolonomicFunction(Dx - 1, x, 0, [1]).to_hyper()
hyper((), (), x)
See Also
========
from_hyper, from_meijerg
"""
if _recur == None:
recurrence = self.to_sequence()
else:
recurrence = _recur
if isinstance(recurrence, tuple) and len(recurrence) == 2:
smallest_n = recurrence[1]
recurrence = recurrence[0]
constantpower = 0
elif isinstance(recurrence, tuple) and len(recurrence) == 3:
smallest_n = recurrence[2]
constantpower = recurrence[1]
recurrence = recurrence[0]
elif len(recurrence) == 1 and len(recurrence[0]) == 2:
smallest_n = recurrence[0][1]
recurrence = recurrence[0][0]
constantpower = 0
elif len(recurrence) == 1 and len(recurrence[0]) == 3:
smallest_n = recurrence[0][2]
constantpower = recurrence[0][1]
recurrence = recurrence[0][0]
else:
sol = self.to_hyper(as_list=as_list, _recur=recurrence[0])
for i in recurrence[1:]:
sol += self.to_hyper(as_list=as_list, _recur=i)
return sol
u0 = recurrence.u0
r = recurrence.recurrence
x = self.x
x0 = self.x0
# order of the recurrence relation
m = r.order
# when no recurrence exists, and the power series have finite terms
if m == 0:
nonzeroterms = roots(r.parent.base.to_sympy(r.listofpoly[0]), recurrence.n, filter='R')
sol = S(0)
for j, i in enumerate(nonzeroterms):
if i < 0 or int(i) != i:
continue
i = int(i)
if i < len(u0):
if isinstance(u0[i], (PolyElement, FracElement)):
u0[i] = u0[i].as_expr()
sol += u0[i] * x**i
else:
sol += Symbol('C_%s' %j) * x**i
if isinstance(sol, (PolyElement, FracElement)):
sol = sol.as_expr() * x**constantpower
else:
sol = sol * x**constantpower
if as_list:
if x0 != 0:
return [(sol.subs(x, x - x0), )]
return [(sol, )]
if x0 != 0:
return sol.subs(x, x - x0)
return sol
if smallest_n + m > len(u0):
raise NotImplementedError("Can't compute sufficient Initial Conditions")
# check if the recurrence represents a hypergeometric series
is_hyper = True
for i in range(1, len(r.listofpoly)-1):
if r.listofpoly[i] != r.parent.base.zero:
is_hyper = False
break
if not is_hyper:
raise NotHyperSeriesError(self, self.x0)
a = r.listofpoly[0]
b = r.listofpoly[-1]
# the constant multiple of argument of hypergeometric function
if isinstance(a.rep[0], (PolyElement, FracElement)):
c = - (S(a.rep[0].as_expr()) * m**(a.degree())) / (S(b.rep[0].as_expr()) * m**(b.degree()))
else:
c = - (S(a.rep[0]) * m**(a.degree())) / (S(b.rep[0]) * m**(b.degree()))
sol = 0
arg1 = roots(r.parent.base.to_sympy(a), recurrence.n)
arg2 = roots(r.parent.base.to_sympy(b), recurrence.n)
# iterate thorugh the initial conditions to find
# the hypergeometric representation of the given
# function.
# The answer will be a linear combination
# of different hypergeometric series which satisfies
# the recurrence.
if as_list:
listofsol = []
for i in range(smallest_n + m):
# if the recurrence relation doesn't hold for `n = i`,
# then a Hypergeometric representation doesn't exist.
# add the algebraic term a * x**i to the solution,
# where a is u0[i]
if i < smallest_n:
if as_list:
listofsol.append(((S(u0[i]) * x**(i+constantpower)).subs(x, x-x0), ))
else:
sol += S(u0[i]) * x**i
continue
# if the coefficient u0[i] is zero, then the
# independent hypergeomtric series starting with
# x**i is not a part of the answer.
if S(u0[i]) == 0:
continue
ap = []
bq = []
# substitute m * n + i for n
for k in ordered(arg1.keys()):
ap.extend([nsimplify((i - k) / m)] * arg1[k])
for k in ordered(arg2.keys()):
bq.extend([nsimplify((i - k) / m)] * arg2[k])
# convention of (k + 1) in the denominator
if 1 in bq:
bq.remove(1)
else:
ap.append(1)
if as_list:
listofsol.append(((S(u0[i])*x**(i+constantpower)).subs(x, x-x0), (hyper(ap, bq, c*x**m)).subs(x, x-x0)))
else:
sol += S(u0[i]) * hyper(ap, bq, c * x**m) * x**i
if as_list:
return listofsol
sol = sol * x**constantpower
if x0 != 0:
return sol.subs(x, x - x0)
return sol
def to_expr(self):
"""
Converts a Holonomic Function back to elementary functions.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols, S
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(x**2*Dx**2 + x*Dx + (x**2 - 1), x, 0, [0, S(1)/2]).to_expr()
besselj(1, x)
>>> HolonomicFunction((1 + x)*Dx**3 + Dx**2, x, 0, [1, 1, 1]).to_expr()
x*log(x + 1) + log(x + 1) + 1
"""
return hyperexpand(self.to_hyper()).simplify()
def change_ics(self, b, lenics=None):
"""
Changes the point `x0` to `b` for initial conditions.
Examples
========
>>> from sympy.holonomic import expr_to_holonomic
>>> from sympy import symbols, sin, cos, exp
>>> x = symbols('x')
>>> expr_to_holonomic(sin(x)).change_ics(1)
HolonomicFunction((1) + (1)*Dx**2, x, 1, [sin(1), cos(1)])
>>> expr_to_holonomic(exp(x)).change_ics(2)
HolonomicFunction((-1) + (1)*Dx, x, 2, [exp(2)])
"""
symbolic = True
if lenics == None and len(self.y0) > self.annihilator.order:
lenics = len(self.y0)
dom = self.annihilator.parent.base.domain
try:
sol = expr_to_holonomic(self.to_expr(), x=self.x, x0=b, lenics=lenics, domain=dom)
except (NotPowerSeriesError, NotHyperSeriesError):
symbolic = False
if symbolic and sol.x0 == b:
return sol
y0 = self.evalf(b, derivatives=True)
return HolonomicFunction(self.annihilator, self.x, b, y0)
def to_meijerg(self):
"""
Returns a linear combination of Meijer G-functions.
Examples
========
>>> from sympy.holonomic import expr_to_holonomic
>>> from sympy import sin, cos, hyperexpand, log, symbols
>>> x = symbols('x')
>>> hyperexpand(expr_to_holonomic(cos(x) + sin(x)).to_meijerg())
sin(x) + cos(x)
>>> hyperexpand(expr_to_holonomic(log(x)).to_meijerg()).simplify()
log(x)
See Also
========
to_hyper()
"""
# convert to hypergeometric first
rep = self.to_hyper(as_list=True)
sol = S(0)
for i in rep:
if len(i) == 1:
sol += i[0]
elif len(i) == 2:
sol += i[0] * _hyper_to_meijerg(i[1])
return sol
def from_hyper(func, x0=0, evalf=False):
r"""
Converts a hypergeometric function to holonomic.
``func`` is the Hypergeometric Function and ``x0`` is the point at
which initial conditions are required.
Examples
========
>>> from sympy.holonomic.holonomic import from_hyper, DifferentialOperators
>>> from sympy import symbols, hyper, S
>>> x = symbols('x')
>>> from_hyper(hyper([], [S(3)/2], x**2/4))
HolonomicFunction((-x) + (2)*Dx + (x)*Dx**2, x, 1, [sinh(1), -sinh(1) + cosh(1)])
"""
a = func.ap
b = func.bq
z = func.args[2]
x = z.atoms(Symbol).pop()
R, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')
# generalized hypergeometric differential equation
r1 = 1
for i in range(len(a)):
r1 = r1 * (x * Dx + a[i])
r2 = Dx
for i in range(len(b)):
r2 = r2 * (x * Dx + b[i] - 1)
sol = r1 - r2
simp = hyperexpand(func)
if isinstance(simp, Infinity) or isinstance(simp, NegativeInfinity):
return HolonomicFunction(sol, x).composition(z)
def _find_conditions(simp, x, x0, order, evalf=False):
y0 = []
for i in range(order):
if evalf:
val = simp.subs(x, x0).evalf()
else:
val = simp.subs(x, x0)
# return None if it is Infinite or NaN
if (val.is_finite is not None and not val.is_finite) or isinstance(val, NaN):
return None
y0.append(val)
simp = simp.diff(x)
return y0
# if the function is known symbolically
if not isinstance(simp, hyper):
y0 = _find_conditions(simp, x, x0, sol.order)
while not y0:
# if values don't exist at 0, then try to find initial
# conditions at 1. If it doesn't exist at 1 too then
# try 2 and so on.
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order)
return HolonomicFunction(sol, x).composition(z, x0, y0)
if isinstance(simp, hyper):
x0 = 1
# use evalf if the function can't be simpified
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
while not y0:
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
return HolonomicFunction(sol, x).composition(z, x0, y0)
return HolonomicFunction(sol, x).composition(z)
def from_meijerg(func, x0=0, evalf=False, initcond=True, domain=QQ):
"""
Converts a Meijer G-function to Holonomic.
``func`` is the G-Function and ``x0`` is the point at
which initial conditions are required.
Examples
========
>>> from sympy.holonomic.holonomic import from_meijerg, DifferentialOperators
>>> from sympy import symbols, meijerg, S
>>> x = symbols('x')
>>> from_meijerg(meijerg(([], []), ([S(1)/2], [0]), x**2/4))
HolonomicFunction((1) + (1)*Dx**2, x, 0, [0, 1/sqrt(pi)])
"""
a = func.ap
b = func.bq
n = len(func.an)
m = len(func.bm)
p = len(a)
z = func.args[2]
x = z.atoms(Symbol).pop()
R, Dx = DifferentialOperators(domain.old_poly_ring(x), 'Dx')
# compute the differential equation satisfied by the
# Meijer G-function.
mnp = (-1)**(m + n - p)
r1 = x * mnp
for i in range(len(a)):
r1 *= x * Dx + 1 - a[i]
r2 = 1
for i in range(len(b)):
r2 *= x * Dx - b[i]
sol = r1 - r2
if not initcond:
return HolonomicFunction(sol, x).composition(z)
simp = hyperexpand(func)
if isinstance(simp, Infinity) or isinstance(simp, NegativeInfinity):
return HolonomicFunction(sol, x).composition(z)
def _find_conditions(simp, x, x0, order, evalf=False):
y0 = []
for i in range(order):
if evalf:
val = simp.subs(x, x0).evalf()
else:
val = simp.subs(x, x0)
if (val.is_finite is not None and not val.is_finite) or isinstance(val, NaN):
return None
y0.append(val)
simp = simp.diff(x)
return y0
# computing initial conditions
if not isinstance(simp, meijerg):
y0 = _find_conditions(simp, x, x0, sol.order)
while not y0:
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order)
return HolonomicFunction(sol, x).composition(z, x0, y0)
if isinstance(simp, meijerg):
x0 = 1
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
while not y0:
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
return HolonomicFunction(sol, x).composition(z, x0, y0)
return HolonomicFunction(sol, x).composition(z)
x_1 = Dummy('x_1')
_lookup_table = None
domain_for_table = None
from sympy.integrals.meijerint import _mytype
def expr_to_holonomic(func, x=None, x0=0, y0=None, lenics=None, domain=None, initcond=True):
"""
Converts a function or an expression to a holonomic function.
Parameters
==========
func:
The expression to be converted.
x:
variable for the function.
x0:
point at which initial condition must be computed.
y0:
One can optionally provide initial condition if the method
isn't able to do it automatically.
lenics:
Number of terms in the initial condition. By default it is
equal to the order of the annihilator.
domain:
Ground domain for the polynomials in `x` appearing as coefficients
in the annihilator.
initcond:
Set it false if you don't want the initial conditions to be computed.
Examples
========
>>> from sympy.holonomic.holonomic import expr_to_holonomic
>>> from sympy import sin, exp, symbols
>>> x = symbols('x')
>>> expr_to_holonomic(sin(x))
HolonomicFunction((1) + (1)*Dx**2, x, 0, [0, 1])
>>> expr_to_holonomic(exp(x))
HolonomicFunction((-1) + (1)*Dx, x, 0, [1])
See Also
========
meijerint._rewrite1, _convert_poly_rat_alg, _create_table
"""
func = sympify(func)
syms = func.free_symbols
if not x:
if len(syms) == 1:
x= syms.pop()
else:
raise ValueError("Specify the variable for the function")
elif x in syms:
syms.remove(x)
extra_syms = list(syms)
if domain == None:
if func.has(Float):
domain = RR
else:
domain = QQ
if len(extra_syms) != 0:
domain = domain[extra_syms].get_field()
# try to convert if the function is polynomial or rational
solpoly = _convert_poly_rat_alg(func, x, x0=x0, y0=y0, lenics=lenics, domain=domain, initcond=initcond)
if solpoly:
return solpoly
# create the lookup table
global _lookup_table, domain_for_table
if not _lookup_table:
domain_for_table = domain
_lookup_table = {}
_create_table(_lookup_table, domain=domain)
elif domain != domain_for_table:
domain_for_table = domain
_lookup_table = {}
_create_table(_lookup_table, domain=domain)
# use the table directly to convert to Holonomic
if func.is_Function:
f = func.subs(x, x_1)
t = _mytype(f, x_1)
if t in _lookup_table:
l = _lookup_table[t]
sol = l[0][1].change_x(x)
else:
sol = _convert_meijerint(func, x, initcond=False, domain=domain)
if not sol:
raise NotImplementedError
if y0:
sol.y0 = y0
if y0 or not initcond:
sol.x0 = x0
return sol
if not lenics:
lenics = sol.annihilator.order
_y0 = _find_conditions(func, x, x0, lenics)
while not _y0:
x0 += 1
_y0 = _find_conditions(func, x, x0, lenics)
return HolonomicFunction(sol.annihilator, x, x0, _y0)
if y0 or not initcond:
sol = sol.composition(func.args[0])
if y0:
sol.y0 = y0
sol.x0 = x0
return sol
if not lenics:
lenics = sol.annihilator.order
_y0 = _find_conditions(func, x, x0, lenics)
while not _y0:
x0 += 1
_y0 = _find_conditions(func, x, x0, lenics)
return sol.composition(func.args[0], x0, _y0)
# iterate though the expression recursively
args = func.args
f = func.func
from sympy.core import Add, Mul, Pow
sol = expr_to_holonomic(args[0], x=x, initcond=False, domain=domain)
if f is Add:
for i in range(1, len(args)):
sol += expr_to_holonomic(args[i], x=x, initcond=False, domain=domain)
elif f is Mul:
for i in range(1, len(args)):
sol *= expr_to_holonomic(args[i], x=x, initcond=False, domain=domain)
elif f is Pow:
sol = sol**args[1]
sol.x0 = x0
if not sol:
raise NotImplementedError
if y0:
sol.y0 = y0
if y0 or not initcond:
return sol
if sol.y0:
return sol
if not lenics:
lenics = sol.annihilator.order
if sol.annihilator.is_singular(x0):
r = sol._indicial()
l = list(r)
if len(r) == 1 and r[l[0]] == S(1):
r = l[0]
g = func / (x - x0)**r
singular_ics = _find_conditions(g, x, x0, lenics)
singular_ics = [j / factorial(i) for i, j in enumerate(singular_ics)]
y0 = {r:singular_ics}
return HolonomicFunction(sol.annihilator, x, x0, y0)
_y0 = _find_conditions(func, x, x0, lenics)
while not _y0:
x0 += 1
_y0 = _find_conditions(func, x, x0, lenics)
return HolonomicFunction(sol.annihilator, x, x0, _y0)
## Some helper functions ##
def _normalize(list_of, parent, negative=True):
"""
Normalize a given annihilator
"""
num = []
denom = []
base = parent.base
K = base.get_field()
lcm_denom = base.from_sympy(S(1))
list_of_coeff = []
# convert polynomials to the elements of associated
# fraction field
for i, j in enumerate(list_of):
if isinstance(j, base.dtype):
list_of_coeff.append(K.new(j.rep))
elif not isinstance(j, K.dtype):
list_of_coeff.append(K.from_sympy(sympify(j)))
else:
list_of_coeff.append(j)
# corresponding numerators of the sequence of polynomials
num.append(list_of_coeff[i].numer())
# corresponding denominators
denom.append(list_of_coeff[i].denom())
# lcm of denominators in the coefficients
for i in denom:
lcm_denom = i.lcm(lcm_denom)
if negative:
lcm_denom = -lcm_denom
lcm_denom = K.new(lcm_denom.rep)
# multiply the coefficients with lcm
for i, j in enumerate(list_of_coeff):
list_of_coeff[i] = j * lcm_denom
gcd_numer = base((list_of_coeff[-1].numer() / list_of_coeff[-1].denom()).rep)
# gcd of numerators in the coefficients
for i in num:
gcd_numer = i.gcd(gcd_numer)
gcd_numer = K.new(gcd_numer.rep)
# divide all the coefficients by the gcd
for i, j in enumerate(list_of_coeff):
frac_ans = j / gcd_numer
list_of_coeff[i] = base((frac_ans.numer() / frac_ans.denom()).rep)
return DifferentialOperator(list_of_coeff, parent)
def _derivate_diff_eq(listofpoly):
"""
Let a differential equation a0(x)y(x) + a1(x)y'(x) + ... = 0
where a0, a1,... are polynomials or rational functions. The function
returns b0, b1, b2... such that the differential equation
b0(x)y(x) + b1(x)y'(x) +... = 0 is formed after differentiating the
former equation.
"""
sol = []
a = len(listofpoly) - 1
sol.append(DMFdiff(listofpoly[0]))
for i, j in enumerate(listofpoly[1:]):
sol.append(DMFdiff(j) + listofpoly[i])
sol.append(listofpoly[a])
return sol
def _hyper_to_meijerg(func):
"""
Converts a `hyper` to meijerg.
"""
ap = func.ap
bq = func.bq
p = len(ap)
q = len(bq)
ispoly = any(i <= 0 and int(i) == i for i in ap)
if ispoly:
return hyperexpand(func)
z = func.args[2]
# paramters of the `meijerg` function.
an = (1 - i for i in ap)
anp = ()
bm = (S(0), )
bmq = (1 - i for i in bq)
k = S(1)
for i in bq:
k = k * gamma(i)
for i in ap:
k = k / gamma(i)
return k * meijerg(an, anp, bm, bmq, -z)
def _add_lists(list1, list2):
"""Takes polynomial sequences of two annihilators a and b and returns
the list of polynomials of sum of a and b.
"""
if len(list1) <= len(list2):
sol = [a + b for a, b in zip(list1, list2)] + list2[len(list1):]
else:
sol = [a + b for a, b in zip(list1, list2)] + list1[len(list2):]
return sol
def _extend_y0(Holonomic, n):
"""
Tries to find more initial conditions by substituting the initial
value point in the differential equation.
"""
if Holonomic.annihilator.is_singular(Holonomic.x0) or Holonomic.is_singularics() == True:
return Holonomic.y0
annihilator = Holonomic.annihilator
a = annihilator.order
x = Holonomic.x
listofpoly = []
y0 = Holonomic.y0
R = annihilator.parent.base
K = R.get_field()
for i, j in enumerate(annihilator.listofpoly):
if isinstance(j, annihilator.parent.base.dtype):
listofpoly.append(K.new(j.rep))
if len(y0) < a or n <= len(y0):
return y0
else:
list_red = [-listofpoly[i] / listofpoly[a]
for i in range(a)]
if len(y0) > a:
y1 = [y0[i] for i in range(a)]
else:
y1 = [i for i in y0]
for i in range(n - a):
sol = 0
for a, b in zip(y1, list_red):
r = DMFsubs(b, Holonomic.x0)
try:
if not r.is_finite:
return y0
except AttributeError:
pass
if isinstance(r, (PolyElement, FracElement)):
r = r.as_expr()
sol += a * r
y1.append(sol)
list_red = _derivate_diff_eq(list_red)
return y0 + y1[len(y0):]
def DMFdiff(frac):
# differentiate a DMF object represented as p/q
if not isinstance(frac, DMF):
return frac.diff()
K = frac.ring
p = K.numer(frac)
q = K.denom(frac)
sol_num = - p * q.diff() + q * p.diff()
sol_denom = q**2
return K((sol_num.rep, sol_denom.rep))
def DMFsubs(frac, x0, mpm=False):
# substitute the point x0 in DMF object of the form p/q
if not isinstance(frac, DMF):
return frac
p = frac.num
q = frac.den
sol_p = S(0)
sol_q = S(0)
if mpm:
from mpmath import mp
for i, j in enumerate(reversed(p)):
if mpm:
j = sympify(j)._to_mpmath(mp.prec)
sol_p += j * x0**i
for i, j in enumerate(reversed(q)):
if mpm:
j = sympify(j)._to_mpmath(mp.prec)
sol_q += j * x0**i
if isinstance(sol_p, (PolyElement, FracElement)):
sol_p = sol_p.as_expr()
if isinstance(sol_q, (PolyElement, FracElement)):
sol_q = sol_q.as_expr()
return sol_p / sol_q
def _convert_poly_rat_alg(func, x, x0=0, y0=None, lenics=None, domain=QQ, initcond=True):
"""
Converts polynomials, rationals and algebraic functions to holonomic.
"""
ispoly = func.is_polynomial()
if not ispoly:
israt = func.is_rational_function()
else:
israt = True
if not (ispoly or israt):
basepoly, ratexp = func.as_base_exp()
if basepoly.is_polynomial() and ratexp.is_Number:
if isinstance(ratexp, Float):
ratexp = nsimplify(ratexp)
m, n = ratexp.p, ratexp.q
is_alg = True
else:
is_alg = False
else:
is_alg = True
if not (ispoly or israt or is_alg):
return None
R = domain.old_poly_ring(x)
_, Dx = DifferentialOperators(R, 'Dx')
# if the function is constant
if not func.has(x):
return HolonomicFunction(Dx, x, 0, [func])
if ispoly:
# differential equation satisfied by polynomial
sol = func * Dx - func.diff(x)
sol = _normalize(sol.listofpoly, sol.parent, negative=False)
is_singular = sol.is_singular(x0)
# try to compute the conditions for singular points
if y0 == None and x0 == 0 and is_singular:
rep = R.from_sympy(func).rep
for i, j in enumerate(reversed(rep)):
if j == 0:
continue
else:
coeff = list(reversed(rep))[i:]
indicial = i
break
for i, j in enumerate(coeff):
if isinstance(j, (PolyElement, FracElement)):
coeff[i] = j.as_expr()
y0 = {indicial: S(coeff)}
elif israt:
order = 1
p, q = func.as_numer_denom()
# differential equation satisfied by rational
sol = p * q * Dx + p * q.diff(x) - q * p.diff(x)
sol = _normalize(sol.listofpoly, sol.parent, negative=False)
elif is_alg:
sol = n * (x / m) * Dx - 1
sol = HolonomicFunction(sol, x).composition(basepoly).annihilator
is_singular = sol.is_singular(x0)
# try to compute the conditions for singular points
if y0 == None and x0 == 0 and is_singular and \
(lenics == None or lenics <= 1):
rep = R.from_sympy(basepoly).rep
for i, j in enumerate(reversed(rep)):
if j == 0:
continue
if isinstance(j, (PolyElement, FracElement)):
j = j.as_expr()
coeff = S(j)**ratexp
indicial = S(i) * ratexp
break
if isinstance(coeff, (PolyElement, FracElement)):
coeff = coeff.as_expr()
y0 = {indicial: S([coeff])}
if y0 or not initcond:
return HolonomicFunction(sol, x, x0, y0)
if not lenics:
lenics = sol.order
if sol.is_singular(x0):
r = HolonomicFunction(sol, x, x0)._indicial()
l = list(r)
if len(r) == 1 and r[l[0]] == S(1):
r = l[0]
g = func / (x - x0)**r
singular_ics = _find_conditions(g, x, x0, lenics)
singular_ics = [j / factorial(i) for i, j in enumerate(singular_ics)]
y0 = {r:singular_ics}
return HolonomicFunction(sol, x, x0, y0)
y0 = _find_conditions(func, x, x0, lenics)
while not y0:
x0 += 1
y0 = _find_conditions(func, x, x0, lenics)
return HolonomicFunction(sol, x, x0, y0)
def _convert_meijerint(func, x, initcond=True, domain=QQ):
args = meijerint._rewrite1(func, x)
if args:
fac, po, g, _ = args
else:
return None
# lists for sum of meijerg functions
fac_list = [fac * i[0] for i in g]
t = po.as_base_exp()
s = t[1] if t[0] is x else S(0)
po_list = [s + i[1] for i in g]
G_list = [i[2] for i in g]
# finds meijerg representation of x**s * meijerg(a1 ... ap, b1 ... bq, z)
def _shift(func, s):
z = func.args[-1]
if z.has(I):
z = z.subs(exp_polar, exp)
d = z.collect(x, evaluate=False)
b = list(d)[0]
a = d[b]
t = b.as_base_exp()
b = t[1] if t[0] is x else S(0)
r = s / b
an = (i + r for i in func.args[0][0])
ap = (i + r for i in func.args[0][1])
bm = (i + r for i in func.args[1][0])
bq = (i + r for i in func.args[1][1])
return a**-r, meijerg((an, ap), (bm, bq), z)
coeff, m = _shift(G_list[0], po_list[0])
sol = fac_list[0] * coeff * from_meijerg(m, initcond=initcond, domain=domain)
# add all the meijerg functions after converting to holonomic
for i in range(1, len(G_list)):
coeff, m = _shift(G_list[i], po_list[i])
sol += fac_list[i] * coeff * from_meijerg(m, initcond=initcond, domain=domain)
return sol
def _create_table(table, domain=QQ):
"""
Creates the look-up table. For a similar implementation
see meijerint._create_lookup_table.
"""
def add(formula, annihilator, arg, x0=0, y0=[]):
"""
Adds a formula in the dictionary
"""
table.setdefault(_mytype(formula, x_1), []).append((formula,
HolonomicFunction(annihilator, arg, x0, y0)))
R = domain.old_poly_ring(x_1)
_, Dx = DifferentialOperators(R, 'Dx')
from sympy import (sin, cos, exp, log, erf, sqrt, pi,
sinh, cosh, sinc, erfc, Si, Ci, Shi, erfi)
# add some basic functions
add(sin(x_1), Dx**2 + 1, x_1, 0, [0, 1])
add(cos(x_1), Dx**2 + 1, x_1, 0, [1, 0])
add(exp(x_1), Dx - 1, x_1, 0, 1)
add(log(x_1), Dx + x_1*Dx**2, x_1, 1, [0, 1])
add(erf(x_1), 2*x_1*Dx + Dx**2, x_1, 0, [0, 2/sqrt(pi)])
add(erfc(x_1), 2*x_1*Dx + Dx**2, x_1, 0, [1, -2/sqrt(pi)])
add(erfi(x_1), -2*x_1*Dx + Dx**2, x_1, 0, [0, 2/sqrt(pi)])
add(sinh(x_1), Dx**2 - 1, x_1, 0, [0, 1])
add(cosh(x_1), Dx**2 - 1, x_1, 0, [1, 0])
add(sinc(x_1), x_1 + 2*Dx + x_1*Dx**2, x_1)
add(Si(x_1), x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
add(Ci(x_1), x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
add(Shi(x_1), -x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
def _find_conditions(func, x, x0, order):
y0 = []
for i in range(order):
val = func.subs(x, x0)
if isinstance(val, NaN):
val = limit(func, x, x0)
if (val.is_finite is not None and not val.is_finite) or isinstance(val, NaN):
return None
y0.append(val)
func = func.diff(x)
return y0
| bsd-3-clause |
ecell/ecell4 | ecell4/extra/azure_batch.py | 1 | 32998 | # azure_batch.py
# Copyright (c) 2017 Kazunari Kaizu
# Released under the GNU General Public License
# python_tutorial_client.py
# Copyright (c) 2017 Microsoft Corporation
# Released under the MIT license
from __future__ import print_function
import datetime
import os
import sys
import time
import binascii
import pickle
import textwrap
import inspect
import itertools
import re
import io
from logging import getLogger
_log = getLogger(__name__)
try:
import azure.storage.blob as azureblob
import azure.batch.batch_service_client as batch
import azure.batch.batch_auth as batchauth
import azure.batch.models as batchmodels
except ImportError as e:
_log.error(
"No module named 'azure' was found."
" Install it with 'pip install azure'")
raise e
_STANDARD_OUT_FILE_NAME = 'stdout.txt'
_STANDARD_ERROR_FILE_NAME = 'stderr.txt'
_SAMPLES_CONFIG_FILE_NAME = 'configuration.cfg'
try:
import configparser
except ImportError:
import ConfigParser as configparser
def print_batch_exception(batch_exception):
"""Prints the contents of the specified Batch exception.
:param batch_exception:
"""
_log.error('-------------------------------------------')
_log.error('Exception encountered:')
if batch_exception.error and \
batch_exception.error.message and \
batch_exception.error.message.value:
_log.error(batch_exception.error.message.value)
if batch_exception.error.values:
_log.error('')
for mesg in batch_exception.error.values:
_log.error('{}:\t{}'.format(mesg.key, mesg.value))
_log.error('-------------------------------------------')
def upload_file_to_container(block_blob_client, container_name, file_path):
"""Uploads a local file to an Azure Blob storage container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param str file_path: The local path to the file.
:rtype: `azure.batch.models.ResourceFile`
:return: A ResourceFile initialized with a SAS URL appropriate for Batch
tasks.
"""
blob_name = os.path.basename(file_path)
_log.info('Uploading file {} to container [{}]...'.format(file_path, container_name))
block_blob_client.create_blob_from_path(container_name,
blob_name,
file_path)
sas_token = block_blob_client.generate_blob_shared_access_signature(
container_name,
blob_name,
permission=azureblob.BlobPermissions.READ,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2))
sas_url = block_blob_client.make_blob_url(container_name,
blob_name,
sas_token=sas_token)
return batchmodels.ResourceFile(http_url=sas_url, file_path=blob_name)
def get_container_sas_token(block_blob_client,
container_name, blob_permissions):
"""Obtains a shared access signature granting the specified permissions to the
container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param str container_name: The name of the Azure Blob storage container.
:param BlobPermissions blob_permissions:
:rtype: str
:return: A SAS token granting the specified permissions to the container.
"""
# Obtain the SAS token for the container, setting the expiry time and
# permissions. In this case, no start time is specified, so the shared
# access signature becomes valid immediately.
container_sas_token = \
block_blob_client.generate_container_shared_access_signature(
container_name,
permission=blob_permissions,
expiry=datetime.datetime.utcnow() + datetime.timedelta(hours=2))
return container_sas_token
def select_latest_verified_vm_image_with_node_agent_sku(
batch_client, publisher, offer, sku_starts_with):
"""Select the latest verified image that Azure Batch supports given
a publisher, offer and sku (starts with filter).
Originally in azure-batch-samples.Python.Batch.common.helpers
:param batch_client: The batch client to use.
:type batch_client: `batchserviceclient.BatchServiceClient`
:param str publisher: vm image publisher
:param str offer: vm image offer
:param str sku_starts_with: vm sku starts with filter
:rtype: tuple
:return: (node agent sku id to use, vm image ref to use)
"""
# get verified vm image list and node agent sku ids from service
node_agent_skus = batch_client.account.list_node_agent_skus()
# pick the latest supported sku
skus_to_use = [
(sku, image_ref) for sku in node_agent_skus for image_ref in sorted(
sku.verified_image_references, key=lambda item: item.sku)
if image_ref.publisher.lower() == publisher.lower() and
image_ref.offer.lower() == offer.lower() and
image_ref.sku.startswith(sku_starts_with)
]
# skus are listed in reverse order, pick first for latest
sku_to_use, image_ref_to_use = skus_to_use[0]
return (sku_to_use.id, image_ref_to_use)
def wrap_commands_in_shell(ostype, commands):
"""Wrap commands in a shell
Originally in azure-batch-samples.Python.Batch.common.helpers
:param list commands: list of commands to wrap
:param str ostype: OS type, linux or windows
:rtype: str
:return: a shell wrapping commands
"""
if ostype.lower() == 'linux':
return '/bin/bash -c \'set -e; set -o pipefail; {}; wait\''.format(
';'.join(commands))
elif ostype.lower() == 'windows':
return 'cmd.exe /c "{}"'.format('&'.join(commands))
else:
raise ValueError('unknown ostype: {}'.format(ostype))
def create_pool(batch_service_client, pool_id,
resource_files, publisher, offer, sku,
task_file, vm_size, node_count):
"""Creates a pool of compute nodes with the specified OS settings.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str pool_id: An ID for the new pool.
:param list resource_files: A collection of resource files for the pool's
start task.
:param str publisher: Marketplace image publisher
:param str offer: Marketplace image offer
:param str sku: Marketplace image sku
:param str task_file: A file name of the script
:param str vm_size: A type of vm
:param str node_count: The number of nodes
"""
_log.info('Creating pool [{}]...'.format(pool_id))
# Create a new pool of Linux compute nodes using an Azure Virtual Machines
# Marketplace image. For more information about creating pools of Linux
# nodes, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
# Specify the commands for the pool's start task. The start task is run
# on each node as it joins the pool, and when it's rebooted or re-imaged.
# We use the start task to prep the node for running our task script.
task_commands = [
# Copy the python_tutorial_task.py script to the "shared" directory
# that all tasks that run on the node have access to. Note that
# we are using the -p flag with cp to preserve the file uid/gid,
# otherwise since this start task is run as an admin, it would not
# be accessible by tasks run as a non-admin user.
'cp -p {} $AZ_BATCH_NODE_SHARED_DIR'.format(os.path.basename(task_file)),
# Install pip
'curl -fSsL https://bootstrap.pypa.io/get-pip.py | python',
# Install the azure-storage module so that the task script can access
# Azure Blob storage, pre-cryptography version
'pip install azure-storage==0.32.0',
# Install E-Cell 4
'pip install https://1028-6348303-gh.circle-artifacts.com/0/root/circle/wheelhouse/ecell-4.1.2-cp27-cp27mu-manylinux1_x86_64.whl']
# Get the node agent SKU and image reference for the virtual machine
# configuration.
# For more information about the virtual machine configuration, see:
# https://azure.microsoft.com/documentation/articles/batch-linux-nodes/
sku_to_use, image_ref_to_use = \
select_latest_verified_vm_image_with_node_agent_sku(
batch_service_client, publisher, offer, sku)
user = batchmodels.AutoUserSpecification(
scope=batchmodels.AutoUserScope.pool,
elevation_level=batchmodels.ElevationLevel.admin)
new_pool = batch.models.PoolAddParameter(
id=pool_id,
virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
image_reference=image_ref_to_use,
node_agent_sku_id=sku_to_use),
vm_size=vm_size,
target_dedicated_nodes=0,
target_low_priority_nodes=node_count,
start_task=batch.models.StartTask(
command_line=wrap_commands_in_shell('linux', task_commands),
user_identity=batchmodels.UserIdentity(auto_user=user),
wait_for_success=True,
resource_files=resource_files),
)
try:
batch_service_client.pool.add(new_pool)
except batchmodels.BatchErrorException as err:
print_batch_exception(err)
raise
def create_job(batch_service_client, job_id, pool_id):
"""Creates a job with the specified ID, associated with the specified pool.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID for the job.
:param str pool_id: The ID for the pool.
"""
print('Creating job [{}]...'.format(job_id))
job = batch.models.JobAddParameter(
id=job_id,
pool_info=batch.models.PoolInformation(pool_id=pool_id))
try:
batch_service_client.job.add(job)
except batchmodels.batch_error.BatchErrorException as err:
print_batch_exception(err)
raise
def add_tasks(batch_service_client, job_id, loads,
output_container_name, output_container_sas_token,
task_file, acount_name):
"""Adds a task for each input file in the collection to the specified job.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The ID of the job to which to add the tasks.
:param list input_files: A collection of input files. One task will be
created for each input file.
:param output_container_name: The ID of an Azure Blob storage container to
which the tasks will upload their results.
:param output_container_sas_token: A SAS token granting write access to
the specified Azure Blob storage container.
:param str task_file: A file name of the script
:param str account_name: A storage account
"""
_log.info('Adding {} tasks to job [{}]...'.format(len(loads), job_id))
# _log.info('Adding {} tasks to job [{}]...'.format(len(input_files), job_id))
tasks = list()
for (input_file, output_file, i, j) in loads:
command = ['python $AZ_BATCH_NODE_SHARED_DIR/{} '
'--filepath {} --output {} --storageaccount {} '
'--task_id {} --job_id {} '
'--storagecontainer {} --sastoken "{}"'.format(
os.path.basename(task_file),
input_file.file_path,
output_file,
acount_name,
i, j,
output_container_name,
output_container_sas_token)]
_log.debug('CMD : "{}"'.format(command[0]))
tasks.append(batch.models.TaskAddParameter(
id='topNtask{}-{}'.format(i, j),
command_line=command,
resource_files=[input_file]
)
)
batch_service_client.task.add_collection(job_id, tasks)
task_ids = [task.id for task in tasks]
_log.info('{} tasks were added.'.format(len(task_ids)))
return task_ids
def wait_for_tasks_to_complete(batch_service_client, job_ids, timeout):
"""Returns when all tasks in the specified job reach the Completed state.
:param batch_service_client: A Batch service client.
:type batch_service_client: `azure.batch.BatchServiceClient`
:param str job_id: The id of the job whose tasks should be to monitored.
:param timedelta timeout: The duration to wait for task completion. If all
tasks in the specified job do not reach Completed state within this time
period, an exception will be raised.
"""
timeout_expiration = datetime.datetime.now() + timeout
print("Monitoring all tasks for 'Completed' state, timeout in {}...".format(timeout), end='')
while datetime.datetime.now() < timeout_expiration:
print('.', end='')
sys.stdout.flush()
# tasks = batch_service_client.task.list(job_id)
# incomplete_tasks = [task for task in tasks if
# task.state != batchmodels.TaskState.completed]
for (job_id, _) in job_ids:
tasks = batch_service_client.task.list(job_id)
incomplete_tasks = [task for task in tasks if
task.state != batchmodels.TaskState.completed]
if incomplete_tasks:
break
if not incomplete_tasks:
print()
return True
else:
time.sleep(1)
raise RuntimeError("ERROR: Tasks did not reach 'Completed' state within "
"timeout period of " + str(timeout))
def download_blobs_from_container(block_blob_client,
container_name, directory_path,
prefix=None):
"""Downloads all blobs from the specified Azure Blob storage container.
:param block_blob_client: A blob service client.
:type block_blob_client: `azure.storage.blob.BlockBlobService`
:param container_name: The Azure Blob storage container from which to
download files.
:param directory_path: The local directory to which to download the files.
:param str prefix: A name prefix to filter blobs. None as its default
"""
_log.info('Downloading all files from container [{}]...'.format(container_name))
container_blobs = block_blob_client.list_blobs(container_name, prefix=None)
_log.info('{} blobs are found [{}]'.format(len(tuple(container_blobs)), ', '.join(blob.name for blob in container_blobs.items)))
for blob in container_blobs.items:
destination_file_path = os.path.join(directory_path, blob.name)
block_blob_client.get_blob_to_path(container_name,
blob.name,
destination_file_path)
_log.info(' Downloaded blob [{}] from container [{}] to {}'.format(
blob.name,
container_name,
destination_file_path))
_log.info(' Download complete!')
def _read_stream_as_string(stream, encoding):
"""Read stream as string
Originally in azure-batch-samples.Python.Batch.common.helpers
:param stream: input stream generator
:param str encoding: The encoding of the file. The default is utf-8.
:return: The file content.
:rtype: str
"""
output = io.BytesIO()
try:
for data in stream:
output.write(data)
if encoding is None:
encoding = 'utf-8'
return output.getvalue().decode(encoding)
finally:
output.close()
raise RuntimeError('could not write data to stream or decode bytes')
def read_task_file_as_string(
batch_client, job_id, task_id, file_name, encoding=None):
"""Reads the specified file as a string.
Originally in azure-batch-samples.Python.Batch.common.helpers
:param batch_client: The batch client to use.
:type batch_client: `batchserviceclient.BatchServiceClient`
:param str job_id: The id of the job.
:param str task_id: The id of the task.
:param str file_name: The name of the file to read.
:param str encoding: The encoding of the file. The default is utf-8.
:return: The file content.
:rtype: str
"""
stream = batch_client.file.get_from_task(job_id, task_id, file_name)
return _read_stream_as_string(stream, encoding)
def print_task_output(batch_client, job_id, task_ids, encoding=None):
"""Prints the stdout and stderr for each task specified.
Originally in azure-batch-samples.Python.Batch.common.helpers
:param batch_client: The batch client to use.
:type batch_client: `batchserviceclient.BatchServiceClient`
:param str job_id: The id of the job to monitor.
:param task_ids: The collection of tasks to print the output for.
:type task_ids: `list`
:param str encoding: The encoding to use when downloading the file.
"""
for task_id in task_ids:
file_text = read_task_file_as_string(
batch_client,
job_id,
task_id,
_STANDARD_OUT_FILE_NAME,
encoding)
print("{} content for task {}: ".format(
_STANDARD_OUT_FILE_NAME,
task_id))
print(file_text)
file_text = read_task_file_as_string(
batch_client,
job_id,
task_id,
_STANDARD_ERROR_FILE_NAME,
encoding)
print("{} content for task {}: ".format(
_STANDARD_ERROR_FILE_NAME,
task_id))
print(file_text)
def run_azure(target, jobs, n=1, path='.', delete=True, config=None):
"""Execute a function for multiple sets of arguments on Microsoft Azure,
and return the results as a list.
:param function target: A target function.
:param list jobs: A list of sets of arguments given to the target.
:param int n: The number of repeats running the target. 1 as default.
:param str path: A path to save temp files. The current path as default.
:param bool delete: Delete temp files after finishing jobs, or not. True as default.
:param config: str or configparser.ConfigParser. A config file. An example is the following:
```
[azure]
batch.name = foo
batch.key = bar
batch.url = hoge
storage.name = fuga
storage.key = spam
pool.nodecount = 2
# pool.id = MyPool
# pool.vmsize = Standard_D11_v2
# os.publisher = Canonical
# os.offer = UbuntuServer
# os.sku = 16
# job.id = MyJob
```
:return: A list of results corresponding the `jobs` list.
:rtype: list
"""
if config is None:
raise ValueError('Argument \'config\' must be given.')
elif isinstance(config, str):
if not os.path.isfile(config):
raise FileNotFoundError('A file [{}] could not be found.'.format(config))
config_filename = config
config = configparser.ConfigParser()
config.sections()
config.read(config_filename)
config.sections()
elif not isinstance(config, configparser.ConfigParser):
raise ValueError('\'config\' must be eighter str or ConfigParser. [{}] was given.'.format(repr(config)))
if 'azure' not in config:
raise KeyError('Key \'azure\' could not be found in the given config.')
for key in ('batch.name', 'batch.key', 'batch.url', 'storage.name', 'storage.key', 'pool.nodecount'):
if key not in config['azure']:
raise KeyError('Key \'{}\' could not be found in the \'azure\' section.'.format(key))
# Update the Batch and Storage account credential strings below with the values
# unique to your accounts. These are used when constructing connection strings
# for the Batch and Storage client objects.
_BATCH_ACCOUNT_NAME = config['azure']['batch.name']
_BATCH_ACCOUNT_KEY = config['azure']['batch.key']
_BATCH_ACCOUNT_URL = config['azure']['batch.url']
_STORAGE_ACCOUNT_NAME = config['azure']['storage.name']
_STORAGE_ACCOUNT_KEY = config['azure']['storage.key']
_POOL_NODE_COUNT = config['azure']['pool.nodecount']
_POOL_ID = config['azure'].get('pool.id', 'MyPool')
_POOL_VM_SIZE = config['azure'].get('pool.vmsize', 'Standard_D11_v2')
_NODE_OS_PUBLISHER = config['azure'].get('os.publisher', 'Canonical')
_NODE_OS_OFFER = config['azure'].get('os.offer', 'UbuntuServer')
_NODE_OS_SKU = config['azure'].get('os.sku', '16')
_JOB_ID = config['azure'].get('job.id', 'MyJob')
if not _POOL_NODE_COUNT.isdigit():
raise ValueError('The wrong pool node count was given [{}]. This must be an integer'.format(_POOL_NODE_COUNT))
proc_per_node = 2 #XXX: Does this depend on pool vm?
nproc = int(_POOL_NODE_COUNT) * proc_per_node
code_header = """
from __future__ import print_function
import argparse
import os
import string
import azure.storage.blob as azureblob
parser = argparse.ArgumentParser()
parser.add_argument('--filepath', required=True,
help='The path to the text file to process. The path'
'may include a compute node\\'s environment'
'variables, such as'
'$AZ_BATCH_NODE_SHARED_DIR/filename.txt')
parser.add_argument('--output', required=True,
help='The path to the output.')
parser.add_argument('--job_id', type=int, required=True)
parser.add_argument('--task_id', type=int, required=True)
parser.add_argument('--storageaccount', required=True,
help='The name the Azure Storage account that owns the'
'blob storage container to which to upload'
'results.')
parser.add_argument('--storagecontainer', required=True,
help='The Azure Blob storage container to which to'
'upload results.')
parser.add_argument('--sastoken', required=True,
help='The SAS token providing write access to the'
'Storage container.')
args = parser.parse_args()
input_file = os.path.realpath(args.filepath)
output_file = args.output
import pickle
with open(input_file, mode='rb') as fin:
inputs = pickle.load(fin)
"""
code_footer = """
with open(output_file, mode='wb') as fout:
pickle.dump(res, fout, protocol=2)
# Create the blob client using the container's SAS token.
# This allows us to create a client that provides write
# access only to the container.
blob_client = azureblob.BlockBlobService(account_name=args.storageaccount,
sas_token=args.sastoken)
output_file_path = os.path.realpath(output_file)
blob_client.create_blob_from_path(args.storagecontainer,
output_file,
output_file_path)
"""
# src = textwrap.dedent(inspect.getsource(target)).replace(r'"', r'\"')
src = textwrap.dedent(inspect.getsource(target))
if re.match('[\s\t]+', src.split('\n')[0]) is not None:
raise RuntimeError(
"Wrong indentation was found in the source translated")
code = code_header
code += src
code += 'res = {}(inputs, args.task_id, args.job_id)'.format(target.__name__)
code += code_footer
target = code
suffix = binascii.hexlify(os.urandom(4)).decode()
start_time = datetime.datetime.now().replace(microsecond=0)
_log.info('Sample start: {}'.format(start_time))
if not os.path.isdir(path):
os.mkdir(path)
# task_file = target
# task_file = 'task-{}.py'.format(suffix)
task_file = '{}/task-{}.py'.format(path, suffix)
with open(task_file, 'w') as fout:
fout.write(target)
# Prepare input pickle files
input_file_names = []
output_file_names = []
for i, job in enumerate(jobs):
filename = '{}/input-{}_{}.pickle'.format(path, suffix, i)
input_file_names.append(filename)
for j in range(n):
output_file_names.append('output-{}_{}.{}.pickle'.format(suffix, i, j + 1))
with open(filename, mode='wb') as fout:
pickle.dump(job, fout, protocol=2)
# Create the blob client, for use in obtaining references to
# blob storage containers and uploading files to containers.
blob_client = azureblob.BlockBlobService(
account_name=_STORAGE_ACCOUNT_NAME,
account_key=_STORAGE_ACCOUNT_KEY)
n_jobs = -(-(len(jobs) * n) // nproc) # ceil for int
_log.info('{} jobs will be created.'.format(n_jobs))
res = None
try:
# Use the blob client to create the containers in Azure Storage if they
# don't yet exist.
app_container_name = 'application-{}'.format(suffix)
input_container_name = 'input-{}'.format(suffix)
output_container_name = 'output-{}'.format(suffix)
# app_container_name = 'application'
# input_container_name = 'input'
# output_container_name = 'output'
blob_client.create_container(app_container_name, fail_on_exist=False)
blob_client.create_container(input_container_name, fail_on_exist=False)
blob_client.create_container(output_container_name, fail_on_exist=False)
# Paths to the task script. This script will be executed by the tasks that
# run on the compute nodes.
application_file_paths = [os.path.realpath(task_file)]
# The collection of data files that are to be processed by the tasks.
input_file_paths = [os.path.realpath(filename) for filename in input_file_names]
# Upload the application script to Azure Storage. This is the script that
# will process the data files, and is executed by each of the tasks on the
# compute nodes.
application_files = [
upload_file_to_container(blob_client, app_container_name, file_path)
for file_path in application_file_paths]
# Upload the data files. This is the data that will be processed by each of
# the tasks executed on the compute nodes in the pool.
input_files = [
upload_file_to_container(blob_client, input_container_name, file_path)
for file_path in input_file_paths]
# Obtain a shared access signature that provides write access to the output
# container to which the tasks will upload their output.
output_container_sas_token = get_container_sas_token(
blob_client,
output_container_name,
azureblob.BlobPermissions.WRITE)
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage
credentials = batchauth.SharedKeyCredentials(_BATCH_ACCOUNT_NAME,
_BATCH_ACCOUNT_KEY)
#print(_BATCH_ACCOUNT_URL)
batch_client = batch.BatchServiceClient(
credentials,
batch_url=_BATCH_ACCOUNT_URL)
# Create the pool that will contain the compute nodes that will execute the
# tasks. The resource files we pass in are used for configuring the pool's
# start task, which is executed each time a node first joins the pool (or
# is rebooted or re-imaged).
create_pool(batch_client,
_POOL_ID + '-' + suffix,
application_files,
_NODE_OS_PUBLISHER,
_NODE_OS_OFFER,
_NODE_OS_SKU,
task_file,
_POOL_VM_SIZE, _POOL_NODE_COUNT)
# Create the job that will run the tasks.
loads = []
for i, input_file in enumerate(input_files):
for j, output_file in enumerate(output_file_names[i * n: (i + 1) * n]):
loads.append((input_file, output_file, i + 1, j + 1))
assert n_jobs == -(-len(loads) // nproc) # ceil for int
job_names = []
for i in range(n_jobs):
job_name = '{}-{}-{}'.format(_JOB_ID, suffix, i + 1)
create_job(batch_client, job_name, _POOL_ID + '-' + suffix)
# Add the tasks to the job. We need to supply a container shared access
# signature (SAS) token for the tasks so that they can upload their output
# to Azure Storage.
task_ids = add_tasks(batch_client,
job_name,
loads[i * nproc: (i + 1) * nproc],
output_container_name,
output_container_sas_token,
task_file,
_STORAGE_ACCOUNT_NAME)
job_names.append((job_name, task_ids))
# Pause execution until tasks reach Completed state.
wait_for_tasks_to_complete(batch_client,
job_names,
datetime.timedelta(minutes=20))
_log.info(" Success! All tasks reached the 'Completed' state within the specified timeout period.")
# Download the task output files from the output Storage container to a
# local directory. Note that we could have also downloaded the output
# files directly from the compute nodes themselves.
download_blobs_from_container(blob_client,
output_container_name,
os.path.abspath(path))
for job_id, task_ids in job_names:
print_task_output(batch_client, job_id, task_ids)
# Print out some timing info
end_time = datetime.datetime.now().replace(microsecond=0)
_log.info('Sample end: {}'.format(end_time))
_log.info('Elapsed time: {}'.format(end_time - start_time))
res = []
for output_file in output_file_names:
with open(os.path.join(path, output_file), mode='rb') as fin:
res.append(pickle.load(fin))
res = [res[i * n: (i + 1) * n] for i in range(len(jobs))]
finally:
# Clean up storage resources
_log.info('Deleting containers...')
blob_client.delete_container(app_container_name)
blob_client.delete_container(input_container_name)
blob_client.delete_container(output_container_name)
# Clean up Batch resources (if the user so chooses).
for i in range(n_jobs):
job_name = '{}-{}-{}'.format(_JOB_ID, suffix, i + 1)
_log.info('Deleting job [{}] ...'.format(job_name))
batch_client.job.delete(job_name)
_log.info('Deleting pool...')
batch_client.pool.delete(_POOL_ID + '-' + suffix)
if delete:
_log.info('Deleting temporary files...')
for filename in output_file_names:
filename = os.path.join(path, filename)
if os.path.isfile(filename):
os.remove(filename)
for filename in itertools.chain(input_file_paths, application_file_paths):
if os.path.isfile(filename):
os.remove(filename)
return res
def singlerun(job, task_id=0, job_id=0):
"""This task is for an example."""
import ecell4_base
import ecell4
import ecell4.util.simulation
import ecell4.util.decorator
print('ecell4_base.__version__ = {:s}'.format(ecell4_base.__version__))
print('ecell4.__version__ = {:s}'.format(ecell4.__version__))
print('job={}, task_id={}, job_id={}'.format(str(job), task_id, job_id))
with ecell4.util.decorator.reaction_rules():
A + B == C | (0.01, 0.3)
res = ecell4.util.simulation.run_simulation(
1.0,
y0={'A': job[0], 'B': job[1], 'C': job[2]},
rndseed=job_id,
solver='gillespie',
return_type='array')
print('A simulation was successfully done.')
return res
if __name__ == '__main__':
from logging import basicConfig, StreamHandler, DEBUG
# basicConfig(level=DEBUG)
handler = StreamHandler()
handler.setLevel(DEBUG)
getLogger(__name__).setLevel(DEBUG)
getLogger(__name__).addHandler(handler)
# jobs = [(n, n, n) for n in range(10, 70, 10)]
jobs = [(30, 30, 30), (60, 60, 60)]
res = run_azure(singlerun, jobs, n=2, path='.', config='example.ini')
print(res)
import numpy
import matplotlib.pylab as plt
for i, dataset in enumerate(res):
for j, data in enumerate(dataset):
data = numpy.array(data).T
plt.plot(data[0], data[3], '-', label='task{}-{}'.format(i, j))
plt.xlabel('Time')
plt.ylabel('# of Molecules')
# plt.legend(loc='best')
# plt.savefig('res.png')
plt.show()
| gpl-3.0 |
equialgo/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 3 | 50305 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..utils import check_array, check_random_state, check_X_y
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if getattr(self, "coef_", None) is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if not hasattr(self, "t_"):
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and hasattr(self, "coef_"):
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if getattr(self, "coef_", None) is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and getattr(self, "average_coef_", None) is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and getattr(self, "coef_", None) is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if not hasattr(self, "t_"):
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
odejesush/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 3 | 42938 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import sys
import tempfile
# pylint: disable=g-bad-todo
# TODO(#6568): Remove this hack that makes dlopen() not crash.
# pylint: enable=g-bad-todo
# pylint: disable=g-import-not-at-top
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testModelFnArgs(self):
expected_param = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
def _argument_checker(features, labels, mode, params, config):
_, _ = features, labels
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertTrue(config.i_am_test)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(
model_fn=_argument_checker,
params=expected_param,
config=expected_config)
est.fit(input_fn=boston_input_fn, steps=1)
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features, labels, mode, params, config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing training_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(
boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffold(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTrainingDictionaryInput(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=50)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir)
# Check we can evaluate and predict.
scores2 = est2.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores2['MSE'], scores['MSE'])
predictions = np.array(list(est2.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions,
float64_target['labels'])
self.assertAllClose(other_score, scores['MSE'])
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testBostonAll(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=100)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(scores['MSE'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testBostonAllDictionaryInput(self):
boston = base.load_boston()
est = estimator.Estimator(model_fn=linear_model_fn)
boston_input = {'input': boston.data}
float64_target = {'labels': boston.target.astype(np.float64)}
est.fit(x=boston_input, y=float64_target, steps=100)
scores = est.evaluate(
x=boston_input,
y=float64_target,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
predictions = np.array(list(est.predict(x=boston_input)))
other_score = _sklearn.mean_squared_error(predictions, boston.target)
self.assertAllClose(other_score, scores['MSE'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisAll(self):
iris = base.load_iris()
est = estimator.SKCompat(
estimator.Estimator(model_fn=logistic_model_no_mode_fn))
est.fit(iris.data, iris.target, steps=100)
scores = est.score(
x=iris.data,
y=iris.target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = est.predict(x=iris.data)
predictions_class = est.predict(x=iris.data, outputs=['class'])['class']
self.assertEqual(predictions['prob'].shape[0], iris.target.shape[0])
self.assertAllClose(predictions['class'], predictions_class)
self.assertAllClose(
predictions['class'], np.argmax(
predictions['prob'], axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions['class'])
self.assertAllClose(scores['accuracy'], other_score)
self.assertTrue('global_step' in scores)
self.assertEqual(100, scores['global_step'])
def testIrisAllDictionaryInput(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
iris_data = {'input': iris.data}
iris_target = {'labels': iris.target}
est.fit(iris_data, iris_target, steps=100)
scores = est.evaluate(
x=iris_data,
y=iris_target,
metrics={('accuracy', 'class'): metric_ops.streaming_accuracy})
predictions = list(est.predict(x=iris_data))
predictions_class = list(est.predict(x=iris_data, outputs=['class']))
self.assertEqual(len(predictions), iris.target.shape[0])
classes_batch = np.array([p['class'] for p in predictions])
self.assertAllClose(classes_batch,
np.array([p['class'] for p in predictions_class]))
self.assertAllClose(
classes_batch,
np.argmax(
np.array([p['prob'] for p in predictions]), axis=1))
other_score = _sklearn.accuracy_score(iris.target, classes_batch)
self.assertAllClose(other_score, scores['accuracy'])
self.assertTrue('global_step' in scores)
self.assertEqual(scores['global_step'], 100)
def testIrisInputFn(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisInputFnLabelsDict(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
est.fit(input_fn=iris_input_fn_labels_dict, steps=100)
_ = est.evaluate(
input_fn=iris_input_fn_labels_dict,
steps=1,
metrics={
'accuracy':
metric_spec.MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='class',
label_key='labels')
})
predictions = list(est.predict(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_eval_fn, steps=1)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testPredictInputFnWithQueue(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn_with_queue, num_epochs=2)
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0] * 2)
def testPredictConstInputFn(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
def input_fn():
features = array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual(
{
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
},
feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool), None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
| apache-2.0 |
GuessWhoSamFoo/pandas | pandas/tests/arrays/categorical/test_dtypes.py | 3 | 6796 | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from pandas.compat import long
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas import Categorical, CategoricalIndex, Index, Series, Timestamp
import pandas.util.testing as tm
class TestCategoricalDtypes(object):
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
assert c1.is_dtype_equal(c1)
assert c2.is_dtype_equal(c2)
assert c3.is_dtype_equal(c3)
assert c1.is_dtype_equal(c2)
assert not c1.is_dtype_equal(c3)
assert not c1.is_dtype_equal(Index(list('aabca')))
assert not c1.is_dtype_equal(c1.astype(object))
assert c1.is_dtype_equal(CategoricalIndex(c1))
assert (c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
assert not c1.is_dtype_equal(CategoricalIndex(c1, ordered=True))
# GH 16659
s1 = Series(c1)
s2 = Series(c2)
s3 = Series(c3)
assert c1.is_dtype_equal(s1)
assert c2.is_dtype_equal(s2)
assert c3.is_dtype_equal(s3)
assert c1.is_dtype_equal(s2)
assert not c1.is_dtype_equal(s3)
assert not c1.is_dtype_equal(s1.astype(object))
def test_set_dtype_same(self):
c = Categorical(['a', 'b', 'c'])
result = c._set_dtype(CategoricalDtype(['a', 'b', 'c']))
tm.assert_categorical_equal(result, c)
def test_set_dtype_new_categories(self):
c = Categorical(['a', 'b', 'c'])
result = c._set_dtype(CategoricalDtype(list('abcd')))
tm.assert_numpy_array_equal(result.codes, c.codes)
tm.assert_index_equal(result.dtype.categories, Index(list('abcd')))
@pytest.mark.parametrize('values, categories, new_categories', [
# No NaNs, same cats, same order
(['a', 'b', 'a'], ['a', 'b'], ['a', 'b'],),
# No NaNs, same cats, different order
(['a', 'b', 'a'], ['a', 'b'], ['b', 'a'],),
# Same, unsorted
(['b', 'a', 'a'], ['a', 'b'], ['a', 'b'],),
# No NaNs, same cats, different order
(['b', 'a', 'a'], ['a', 'b'], ['b', 'a'],),
# NaNs
(['a', 'b', 'c'], ['a', 'b'], ['a', 'b']),
(['a', 'b', 'c'], ['a', 'b'], ['b', 'a']),
(['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
(['b', 'a', 'c'], ['a', 'b'], ['a', 'b']),
# Introduce NaNs
(['a', 'b', 'c'], ['a', 'b'], ['a']),
(['a', 'b', 'c'], ['a', 'b'], ['b']),
(['b', 'a', 'c'], ['a', 'b'], ['a']),
(['b', 'a', 'c'], ['a', 'b'], ['a']),
# No overlap
(['a', 'b', 'c'], ['a', 'b'], ['d', 'e']),
])
@pytest.mark.parametrize('ordered', [True, False])
def test_set_dtype_many(self, values, categories, new_categories,
ordered):
c = Categorical(values, categories)
expected = Categorical(values, new_categories, ordered)
result = c._set_dtype(expected.dtype)
tm.assert_categorical_equal(result, expected)
def test_set_dtype_no_overlap(self):
c = Categorical(['a', 'b', 'c'], ['d', 'e'])
result = c._set_dtype(CategoricalDtype(['a', 'b']))
expected = Categorical([None, None, None], categories=['a', 'b'])
tm.assert_categorical_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(['foo', 'bar', 'baz'])
assert result.codes.dtype == 'int8'
result = Categorical(['foo%05d' % i for i in range(400)])
assert result.codes.dtype == 'int16'
result = Categorical(['foo%05d' % i for i in range(40000)])
assert result.codes.dtype == 'int32'
# adding cats
result = Categorical(['foo', 'bar', 'baz'])
assert result.codes.dtype == 'int8'
result = result.add_categories(['foo%05d' % i for i in range(400)])
assert result.codes.dtype == 'int16'
# removing cats
result = result.remove_categories(['foo%05d' % i for i in range(300)])
assert result.codes.dtype == 'int8'
@pytest.mark.parametrize('ordered', [True, False])
def test_astype(self, ordered):
# string
cat = Categorical(list('abbaaccc'), ordered=ordered)
result = cat.astype(object)
expected = np.array(cat)
tm.assert_numpy_array_equal(result, expected)
msg = 'could not convert string to float'
with pytest.raises(ValueError, match=msg):
cat.astype(float)
# numeric
cat = Categorical([0, 1, 2, 2, 1, 0, 1, 0, 2], ordered=ordered)
result = cat.astype(object)
expected = np.array(cat, dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = cat.astype(int)
expected = np.array(cat, dtype=np.int)
tm.assert_numpy_array_equal(result, expected)
result = cat.astype(float)
expected = np.array(cat, dtype=np.float)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('dtype_ordered', [True, False])
@pytest.mark.parametrize('cat_ordered', [True, False])
def test_astype_category(self, dtype_ordered, cat_ordered):
# GH 10696/18593
data = list('abcaacbab')
cat = Categorical(data, categories=list('bac'), ordered=cat_ordered)
# standard categories
dtype = CategoricalDtype(ordered=dtype_ordered)
result = cat.astype(dtype)
expected = Categorical(
data, categories=cat.categories, ordered=dtype_ordered)
tm.assert_categorical_equal(result, expected)
# non-standard categories
dtype = CategoricalDtype(list('adc'), dtype_ordered)
result = cat.astype(dtype)
expected = Categorical(data, dtype=dtype)
tm.assert_categorical_equal(result, expected)
if dtype_ordered is False:
# dtype='category' can't specify ordered, so only test once
result = cat.astype('category')
expected = cat
tm.assert_categorical_equal(result, expected)
def test_iter_python_types(self):
# GH-19909
# TODO(Py2): Remove long
cat = Categorical([1, 2])
assert isinstance(list(cat)[0], (int, long))
assert isinstance(cat.tolist()[0], (int, long))
def test_iter_python_types_datetime(self):
cat = Categorical([Timestamp('2017-01-01'),
Timestamp('2017-01-02')])
assert isinstance(list(cat)[0], Timestamp)
assert isinstance(cat.tolist()[0], Timestamp)
| bsd-3-clause |
flightgong/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 31 | 2633 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
"""Affinity Propagation algorithm """
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
"""Test AffinityPropagation.predict"""
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
"""Test exception in AffinityPropagation.predict"""
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
icexelloss/spark | python/pyspark/sql/tests/test_pandas_udf_grouped_map.py | 4 | 20724 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import unittest
import sys
from collections import OrderedDict
from decimal import Decimal
from pyspark.sql import Row
from pyspark.sql.functions import array, explode, col, lit, udf, sum, pandas_udf, PandasUDFType
from pyspark.sql.types import *
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.util.testing import assert_frame_equal
if have_pyarrow:
import pyarrow as pa
"""
Tests below use pd.DataFrame.assign that will infer mixed types (unicode/str) for column names
from kwargs w/ Python 2, so need to set check_column_type=False and avoid this check
"""
if sys.version < '3':
_check_column_type = False
else:
_check_column_type = True
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class GroupedMapPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i) for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))).drop('vs')
def test_supported_types(self):
values = [
1, 2, 3,
4, 5, 1.1,
2.2, Decimal(1.123),
[1, 2, 2], True, 'hello',
bytearray([0x01, 0x02])
]
output_fields = [
('id', IntegerType()), ('byte', ByteType()), ('short', ShortType()),
('int', IntegerType()), ('long', LongType()), ('float', FloatType()),
('double', DoubleType()), ('decim', DecimalType(10, 3)),
('array', ArrayType(IntegerType())), ('bool', BooleanType()), ('str', StringType()),
('bin', BinaryType())
]
output_schema = StructType([StructField(*x) for x in output_fields])
df = self.spark.createDataFrame([values], schema=output_schema)
# Different forms of group map pandas UDF, results of these are the same
udf1 = pandas_udf(
lambda pdf: pdf.assign(
byte=pdf.byte * 2,
short=pdf.short * 2,
int=pdf.int * 2,
long=pdf.long * 2,
float=pdf.float * 2,
double=pdf.double * 2,
decim=pdf.decim * 2,
bool=False if pdf.bool else True,
str=pdf.str + 'there',
array=pdf.array,
bin=pdf.bin
),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf2 = pandas_udf(
lambda _, pdf: pdf.assign(
byte=pdf.byte * 2,
short=pdf.short * 2,
int=pdf.int * 2,
long=pdf.long * 2,
float=pdf.float * 2,
double=pdf.double * 2,
decim=pdf.decim * 2,
bool=False if pdf.bool else True,
str=pdf.str + 'there',
array=pdf.array,
bin=pdf.bin
),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf3 = pandas_udf(
lambda key, pdf: pdf.assign(
id=key[0],
byte=pdf.byte * 2,
short=pdf.short * 2,
int=pdf.int * 2,
long=pdf.long * 2,
float=pdf.float * 2,
double=pdf.double * 2,
decim=pdf.decim * 2,
bool=False if pdf.bool else True,
str=pdf.str + 'there',
array=pdf.array,
bin=pdf.bin
),
output_schema,
PandasUDFType.GROUPED_MAP
)
result1 = df.groupby('id').apply(udf1).sort('id').toPandas()
expected1 = df.toPandas().groupby('id').apply(udf1.func).reset_index(drop=True)
result2 = df.groupby('id').apply(udf2).sort('id').toPandas()
expected2 = expected1
result3 = df.groupby('id').apply(udf3).sort('id').toPandas()
expected3 = expected1
assert_frame_equal(expected1, result1, check_column_type=_check_column_type)
assert_frame_equal(expected2, result2, check_column_type=_check_column_type)
assert_frame_equal(expected3, result3, check_column_type=_check_column_type)
def test_array_type_correct(self):
df = self.data.withColumn("arr", array(col("id"))).repartition(1, "id")
output_schema = StructType(
[StructField('id', LongType()),
StructField('v', IntegerType()),
StructField('arr', ArrayType(LongType()))])
udf = pandas_udf(
lambda pdf: pdf,
output_schema,
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(udf).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(udf.func).reset_index(drop=True)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_register_grouped_map_udf(self):
foo_udf = pandas_udf(lambda x: x, "id long", PandasUDFType.GROUPED_MAP)
with QuietTest(self.sc):
with self.assertRaisesRegexp(
ValueError,
'f.*SQL_BATCHED_UDF.*SQL_SCALAR_PANDAS_UDF.*SQL_GROUPED_AGG_PANDAS_UDF.*'):
self.spark.catalog.registerFunction("foo_udf", foo_udf)
def test_decorator(self):
df = self.data
@pandas_udf(
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
def foo(pdf):
return pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_coerce(self):
df = self.data
foo = pandas_udf(
lambda pdf: pdf,
'id long, v double',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
expected = expected.assign(v=expected.v.astype('float64'))
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_complex_groupby(self):
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby(col('id') % 2 == 0).apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = pdf.groupby(pdf['id'] % 2 == 0, as_index=False).apply(normalize.func)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_empty_groupby(self):
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby().apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = normalize.func(pdf)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_datatype_string(self):
df = self.data
foo_udf = pandas_udf(
lambda pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo_udf).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo_udf.func).reset_index(drop=True)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
def test_wrong_return_type(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*MapType'):
pandas_udf(
lambda pdf: pdf,
'id long, v map<int, int>',
PandasUDFType.GROUPED_MAP)
def test_wrong_args(self):
df = self.data
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(lambda x: x)
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(udf(lambda x: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(sum(df.v))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(df.v + 1)
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
df.groupby('id').apply(
pandas_udf(lambda: 1, StructType([StructField("d", DoubleType())])))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(pandas_udf(lambda x, y: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf.*GROUPED_MAP'):
df.groupby('id').apply(
pandas_udf(lambda x, y: x, DoubleType(), PandasUDFType.SCALAR))
def test_unsupported_types(self):
common_err_msg = 'Invalid returnType.*grouped map Pandas UDF.*'
unsupported_types = [
StructField('map', MapType(StringType(), IntegerType())),
StructField('arr_ts', ArrayType(TimestampType())),
StructField('null', NullType()),
StructField('struct', StructType([StructField('l', LongType())])),
]
for unsupported_type in unsupported_types:
schema = StructType([StructField('id', LongType(), True), unsupported_type])
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, common_err_msg):
pandas_udf(lambda x: x, schema, PandasUDFType.GROUPED_MAP)
# Regression test for SPARK-23314
def test_timestamp_dst(self):
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
foo_udf = pandas_udf(lambda pdf: pdf, 'time timestamp', PandasUDFType.GROUPED_MAP)
result = df.groupby('time').apply(foo_udf).sort('time')
assert_frame_equal(df.toPandas(), result.toPandas(), check_column_type=_check_column_type)
def test_udf_with_key(self):
import numpy as np
df = self.data
pdf = df.toPandas()
def foo1(key, pdf):
assert type(key) == tuple
assert type(key[0]) == np.int64
return pdf.assign(v1=key[0],
v2=pdf.v * key[0],
v3=pdf.v * pdf.id,
v4=pdf.v * pdf.id.mean())
def foo2(key, pdf):
assert type(key) == tuple
assert type(key[0]) == np.int64
assert type(key[1]) == np.int32
return pdf.assign(v1=key[0],
v2=key[1],
v3=pdf.v * key[0],
v4=pdf.v + key[1])
def foo3(key, pdf):
assert type(key) == tuple
assert len(key) == 0
return pdf.assign(v1=pdf.v * pdf.id)
# v2 is int because numpy.int64 * pd.Series<int32> results in pd.Series<int32>
# v3 is long because pd.Series<int64> * pd.Series<int32> results in pd.Series<int64>
udf1 = pandas_udf(
foo1,
'id long, v int, v1 long, v2 int, v3 long, v4 double',
PandasUDFType.GROUPED_MAP)
udf2 = pandas_udf(
foo2,
'id long, v int, v1 long, v2 int, v3 int, v4 int',
PandasUDFType.GROUPED_MAP)
udf3 = pandas_udf(
foo3,
'id long, v int, v1 long',
PandasUDFType.GROUPED_MAP)
# Test groupby column
result1 = df.groupby('id').apply(udf1).sort('id', 'v').toPandas()
expected1 = pdf.groupby('id', as_index=False)\
.apply(lambda x: udf1.func((x.id.iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
assert_frame_equal(expected1, result1, check_column_type=_check_column_type)
# Test groupby expression
result2 = df.groupby(df.id % 2).apply(udf1).sort('id', 'v').toPandas()
expected2 = pdf.groupby(pdf.id % 2, as_index=False)\
.apply(lambda x: udf1.func((x.id.iloc[0] % 2,), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
assert_frame_equal(expected2, result2, check_column_type=_check_column_type)
# Test complex groupby
result3 = df.groupby(df.id, df.v % 2).apply(udf2).sort('id', 'v').toPandas()
expected3 = pdf.groupby([pdf.id, pdf.v % 2], as_index=False)\
.apply(lambda x: udf2.func((x.id.iloc[0], (x.v % 2).iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
assert_frame_equal(expected3, result3, check_column_type=_check_column_type)
# Test empty groupby
result4 = df.groupby().apply(udf3).sort('id', 'v').toPandas()
expected4 = udf3.func((), pdf)
assert_frame_equal(expected4, result4, check_column_type=_check_column_type)
def test_column_order(self):
# Helper function to set column names from a list
def rename_pdf(pdf, names):
pdf.rename(columns={old: new for old, new in
zip(pd_result.columns, names)}, inplace=True)
df = self.data
grouped_df = df.groupby('id')
grouped_pdf = df.toPandas().groupby('id', as_index=False)
# Function returns a pdf with required column names, but order could be arbitrary using dict
def change_col_order(pdf):
# Constructing a DataFrame from a dict should result in the same order,
# but use from_items to ensure the pdf column order is different than schema
return pd.DataFrame.from_items([
('id', pdf.id),
('u', pdf.v * 2),
('v', pdf.v)])
ordered_udf = pandas_udf(
change_col_order,
'id long, v int, u int',
PandasUDFType.GROUPED_MAP
)
# The UDF result should assign columns by name from the pdf
result = grouped_df.apply(ordered_udf).sort('id', 'v')\
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(change_col_order)
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
# Function returns a pdf with positional columns, indexed by range
def range_col_order(pdf):
# Create a DataFrame with positional columns, fix types to long
return pd.DataFrame(list(zip(pdf.id, pdf.v * 3, pdf.v)), dtype='int64')
range_udf = pandas_udf(
range_col_order,
'id long, u long, v long',
PandasUDFType.GROUPED_MAP
)
# The UDF result uses positional columns from the pdf
result = grouped_df.apply(range_udf).sort('id', 'v') \
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(range_col_order)
rename_pdf(pd_result, ['id', 'u', 'v'])
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
# Function returns a pdf with columns indexed with integers
def int_index(pdf):
return pd.DataFrame(OrderedDict([(0, pdf.id), (1, pdf.v * 4), (2, pdf.v)]))
int_index_udf = pandas_udf(
int_index,
'id long, u int, v int',
PandasUDFType.GROUPED_MAP
)
# The UDF result should assign columns by position of integer index
result = grouped_df.apply(int_index_udf).sort('id', 'v') \
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(int_index)
rename_pdf(pd_result, ['id', 'u', 'v'])
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
assert_frame_equal(expected, result, check_column_type=_check_column_type)
@pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)
def column_name_typo(pdf):
return pd.DataFrame({'iid': pdf.id, 'v': pdf.v})
@pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)
def invalid_positional_types(pdf):
return pd.DataFrame([(u'a', 1.2)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, "KeyError: 'id'"):
grouped_df.apply(column_name_typo).collect()
with self.assertRaisesRegexp(Exception, "an integer is required"):
grouped_df.apply(invalid_positional_types).collect()
def test_positional_assignment_conf(self):
with self.sql_conf({
"spark.sql.legacy.execution.pandas.groupedMap.assignColumnsByName": False}):
@pandas_udf("a string, b float", PandasUDFType.GROUPED_MAP)
def foo(_):
return pd.DataFrame([('hi', 1)], columns=['x', 'y'])
df = self.data
result = df.groupBy('id').apply(foo).select('a', 'b').collect()
for r in result:
self.assertEqual(r.a, 'hi')
self.assertEqual(r.b, 1)
def test_self_join_with_pandas(self):
@pandas_udf('key long, col string', PandasUDFType.GROUPED_MAP)
def dummy_pandas_udf(df):
return df[['key', 'col']]
df = self.spark.createDataFrame([Row(key=1, col='A'), Row(key=1, col='B'),
Row(key=2, col='C')])
df_with_pandas = df.groupBy('key').apply(dummy_pandas_udf)
# this was throwing an AnalysisException before SPARK-24208
res = df_with_pandas.alias('temp0').join(df_with_pandas.alias('temp1'),
col('temp0.key') == col('temp1.key'))
self.assertEquals(res.count(), 5)
def test_mixed_scalar_udfs_followed_by_grouby_apply(self):
df = self.spark.range(0, 10).toDF('v1')
df = df.withColumn('v2', udf(lambda x: x + 1, 'int')(df['v1'])) \
.withColumn('v3', pandas_udf(lambda x: x + 2, 'int')(df['v1']))
result = df.groupby() \
.apply(pandas_udf(lambda x: pd.DataFrame([x.sum().sum()]),
'sum int',
PandasUDFType.GROUPED_MAP))
self.assertEquals(result.collect()[0]['sum'], 165)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_grouped_map import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
zrhans/pythonanywhere | .virtualenvs/django19/lib/python3.4/site-packages/matplotlib/sankey.py | 8 | 40828 | #!/usr/bin/env python
"""
Module for creating Sankey diagrams using matplotlib
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import zip
# Original version by Yannick Copin ([email protected]) 10/2/2010, available
# at:
# http://matplotlib.org/examples/api/sankey_demo_old.html
# Modifications by Kevin Davies ([email protected]) 6/3/2011:
# --Used arcs for the curves (so that the widths of the paths are uniform)
# --Converted the function to a class and created methods to join multiple
# simple Sankey diagrams
# --Provided handling for cases where the total of the inputs isn't 100
# Now, the default layout is based on the assumption that the inputs sum to
# 1. A scaling parameter can be used in other cases.
# --The call structure was changed to be more explicit about layout,
# including the length of the trunk, length of the paths, gap between the
# paths, and the margin around the diagram.
# --Allowed the lengths of paths to be adjusted individually, with an option
# to automatically justify them
# --The call structure was changed to make the specification of path
# orientation more flexible. Flows are passed through one array, with
# inputs being positive and outputs being negative. An orientation
# argument specifies the direction of the arrows. The "main"
# inputs/outputs are now specified via an orientation of 0, and there may
# be several of each.
# --Changed assertions to ValueError to catch common calling errors (by
# Francesco Montesano, [email protected])
# --Added the physical unit as a string argument to be used in the labels, so
# that the values of the flows can usually be applied automatically
# --Added an argument for a minimum magnitude below which flows are not shown
# --Added a tapered trunk in the case that the flows do not sum to 0
# --Allowed the diagram to be rotated
import numpy as np
from matplotlib.cbook import iterable, Bunch
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.transforms import Affine2D
from matplotlib import verbose
from matplotlib import docstring
__author__ = "Kevin L. Davies"
__credits__ = ["Yannick Copin"]
__license__ = "BSD"
__version__ = "2011/09/16"
# Angles [deg/90]
RIGHT = 0
UP = 1
# LEFT = 2
DOWN = 3
class Sankey(object):
"""
Sankey diagram in matplotlib
Sankey diagrams are a specific type of flow diagram, in which
the width of the arrows is shown proportionally to the flow
quantity. They are typically used to visualize energy or
material or cost transfers between processes.
`Wikipedia (6/1/2011) <http://en.wikipedia.org/wiki/Sankey_diagram>`_
"""
def __init__(self, ax=None, scale=1.0, unit='', format='%G', gap=0.25,
radius=0.1, shoulder=0.03, offset=0.15, head_angle=100,
margin=0.4, tolerance=1e-6, **kwargs):
"""
Create a new Sankey instance.
Optional keyword arguments:
=============== ===================================================
Field Description
=============== ===================================================
*ax* axes onto which the data should be plotted
If *ax* isn't provided, new axes will be created.
*scale* scaling factor for the flows
*scale* sizes the width of the paths in order to
maintain proper layout. The same scale is applied
to all subdiagrams. The value should be chosen
such that the product of the scale and the sum of
the inputs is approximately 1.0 (and the product of
the scale and the sum of the outputs is
approximately -1.0).
*unit* string representing the physical unit associated
with the flow quantities
If *unit* is None, then none of the quantities are
labeled.
*format* a Python number formatting string to be used in
labeling the flow as a quantity (i.e., a number
times a unit, where the unit is given)
*gap* space between paths that break in/break away
to/from the top or bottom
*radius* inner radius of the vertical paths
*shoulder* size of the shoulders of output arrowS
*offset* text offset (from the dip or tip of the arrow)
*head_angle* angle of the arrow heads (and negative of the angle
of the tails) [deg]
*margin* minimum space between Sankey outlines and the edge
of the plot area
*tolerance* acceptable maximum of the magnitude of the sum of
flows
The magnitude of the sum of connected flows cannot
be greater than *tolerance*.
=============== ===================================================
The optional arguments listed above are applied to all subdiagrams so
that there is consistent alignment and formatting.
If :class:`Sankey` is instantiated with any keyword arguments other
than those explicitly listed above (``**kwargs``), they will be passed
to :meth:`add`, which will create the first subdiagram.
In order to draw a complex Sankey diagram, create an instance of
:class:`Sankey` by calling it without any kwargs::
sankey = Sankey()
Then add simple Sankey sub-diagrams::
sankey.add() # 1
sankey.add() # 2
#...
sankey.add() # n
Finally, create the full diagram::
sankey.finish()
Or, instead, simply daisy-chain those calls::
Sankey().add().add... .add().finish()
.. seealso::
:meth:`add`
:meth:`finish`
**Examples:**
.. plot:: mpl_examples/api/sankey_demo_basics.py
"""
# Check the arguments.
if gap < 0:
raise ValueError(
"The gap is negative.\nThis isn't allowed because it "
"would cause the paths to overlap.")
if radius > gap:
raise ValueError(
"The inner radius is greater than the path spacing.\n"
"This isn't allowed because it would cause the paths to overlap.")
if head_angle < 0:
raise ValueError(
"The angle is negative.\nThis isn't allowed "
"because it would cause inputs to look like "
"outputs and vice versa.")
if tolerance < 0:
raise ValueError(
"The tolerance is negative.\nIt must be a magnitude.")
# Create axes if necessary.
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[])
self.diagrams = []
# Store the inputs.
self.ax = ax
self.unit = unit
self.format = format
self.scale = scale
self.gap = gap
self.radius = radius
self.shoulder = shoulder
self.offset = offset
self.margin = margin
self.pitch = np.tan(np.pi * (1 - head_angle / 180.0) / 2.0)
self.tolerance = tolerance
# Initialize the vertices of tight box around the diagram(s).
self.extent = np.array((np.inf, -np.inf, np.inf, -np.inf))
# If there are any kwargs, create the first subdiagram.
if len(kwargs):
self.add(**kwargs)
def _arc(self, quadrant=0, cw=True, radius=1, center=(0, 0)):
"""
Return the codes and vertices for a rotated, scaled, and translated
90 degree arc.
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*quadrant* uses 0-based indexing (0, 1, 2, or 3)
*cw* if True, clockwise
*center* (x, y) tuple of the arc's center
=============== ==========================================
"""
# Note: It would be possible to use matplotlib's transforms to rotate,
# scale, and translate the arc, but since the angles are discrete,
# it's just as easy and maybe more efficient to do it here.
ARC_CODES = [Path.LINETO,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4,
Path.CURVE4]
# Vertices of a cubic Bezier curve approximating a 90 deg arc
# These can be determined by Path.arc(0,90).
ARC_VERTICES = np.array([[1.00000000e+00, 0.00000000e+00],
[1.00000000e+00, 2.65114773e-01],
[8.94571235e-01, 5.19642327e-01],
[7.07106781e-01, 7.07106781e-01],
[5.19642327e-01, 8.94571235e-01],
[2.65114773e-01, 1.00000000e+00],
# Insignificant
# [6.12303177e-17, 1.00000000e+00]])
[0.00000000e+00, 1.00000000e+00]])
if quadrant == 0 or quadrant == 2:
if cw:
vertices = ARC_VERTICES
else:
vertices = ARC_VERTICES[:, ::-1] # Swap x and y.
elif quadrant == 1 or quadrant == 3:
# Negate x.
if cw:
# Swap x and y.
vertices = np.column_stack((-ARC_VERTICES[:, 1],
ARC_VERTICES[:, 0]))
else:
vertices = np.column_stack((-ARC_VERTICES[:, 0],
ARC_VERTICES[:, 1]))
if quadrant > 1:
radius = -radius # Rotate 180 deg.
return list(zip(ARC_CODES, radius * vertices +
np.tile(center, (ARC_VERTICES.shape[0], 1))))
def _add_input(self, path, angle, flow, length):
"""
Add an input to a path and return its tip and label locations.
"""
if angle is None:
return [0, 0], [0, 0]
else:
x, y = path[-1][1] # Use the last point as a reference.
dipdepth = (flow / 2) * self.pitch
if angle == RIGHT:
x -= length
dip = [x + dipdepth, y + flow / 2.0]
path.extend([(Path.LINETO, [x, y]),
(Path.LINETO, dip),
(Path.LINETO, [x, y + flow]),
(Path.LINETO, [x + self.gap, y + flow])])
label_location = [dip[0] - self.offset, dip[1]]
else: # Vertical
x -= self.gap
if angle == UP:
sign = 1
else:
sign = -1
dip = [x - flow / 2, y - sign * (length - dipdepth)]
if angle == DOWN:
quadrant = 2
else:
quadrant = 1
# Inner arc isn't needed if inner radius is zero
if self.radius:
path.extend(self._arc(quadrant=quadrant,
cw=angle == UP,
radius=self.radius,
center=(x + self.radius,
y - sign * self.radius)))
else:
path.append((Path.LINETO, [x, y]))
path.extend([(Path.LINETO, [x, y - sign * length]),
(Path.LINETO, dip),
(Path.LINETO, [x - flow, y - sign * length])])
path.extend(self._arc(quadrant=quadrant,
cw=angle == DOWN,
radius=flow + self.radius,
center=(x + self.radius,
y - sign * self.radius)))
path.append((Path.LINETO, [x - flow, y + sign * flow]))
label_location = [dip[0], dip[1] - sign * self.offset]
return dip, label_location
def _add_output(self, path, angle, flow, length):
"""
Append an output to a path and return its tip and label locations.
.. note:: *flow* is negative for an output.
"""
if angle is None:
return [0, 0], [0, 0]
else:
x, y = path[-1][1] # Use the last point as a reference.
tipheight = (self.shoulder - flow / 2) * self.pitch
if angle == RIGHT:
x += length
tip = [x + tipheight, y + flow / 2.0]
path.extend([(Path.LINETO, [x, y]),
(Path.LINETO, [x, y + self.shoulder]),
(Path.LINETO, tip),
(Path.LINETO, [x, y - self.shoulder + flow]),
(Path.LINETO, [x, y + flow]),
(Path.LINETO, [x - self.gap, y + flow])])
label_location = [tip[0] + self.offset, tip[1]]
else: # Vertical
x += self.gap
if angle == UP:
sign = 1
else:
sign = -1
tip = [x - flow / 2.0, y + sign * (length + tipheight)]
if angle == UP:
quadrant = 3
else:
quadrant = 0
# Inner arc isn't needed if inner radius is zero
if self.radius:
path.extend(self._arc(quadrant=quadrant,
cw=angle == UP,
radius=self.radius,
center=(x - self.radius,
y + sign * self.radius)))
else:
path.append((Path.LINETO, [x, y]))
path.extend([(Path.LINETO, [x, y + sign * length]),
(Path.LINETO, [x - self.shoulder,
y + sign * length]),
(Path.LINETO, tip),
(Path.LINETO, [x + self.shoulder - flow,
y + sign * length]),
(Path.LINETO, [x - flow, y + sign * length])])
path.extend(self._arc(quadrant=quadrant,
cw=angle == DOWN,
radius=self.radius - flow,
center=(x - self.radius,
y + sign * self.radius)))
path.append((Path.LINETO, [x - flow, y + sign * flow]))
label_location = [tip[0], tip[1] + sign * self.offset]
return tip, label_location
def _revert(self, path, first_action=Path.LINETO):
"""
A path is not simply revertable by path[::-1] since the code
specifies an action to take from the **previous** point.
"""
reverse_path = []
next_code = first_action
for code, position in path[::-1]:
reverse_path.append((next_code, position))
next_code = code
return reverse_path
# This might be more efficient, but it fails because 'tuple' object
# doesn't support item assignment:
# path[1] = path[1][-1:0:-1]
# path[1][0] = first_action
# path[2] = path[2][::-1]
# return path
@docstring.dedent_interpd
def add(self, patchlabel='', flows=None, orientations=None, labels='',
trunklength=1.0, pathlengths=0.25, prior=None, connect=(0, 0),
rotation=0, **kwargs):
"""
Add a simple Sankey diagram with flows at the same hierarchical level.
Return value is the instance of :class:`Sankey`.
Optional keyword arguments:
=============== ===================================================
Keyword Description
=============== ===================================================
*patchlabel* label to be placed at the center of the diagram
Note: *label* (not *patchlabel*) will be passed to
the patch through ``**kwargs`` and can be used to
create an entry in the legend.
*flows* array of flow values
By convention, inputs are positive and outputs are
negative.
*orientations* list of orientations of the paths
Valid values are 1 (from/to the top), 0 (from/to
the left or right), or -1 (from/to the bottom). If
*orientations* == 0, inputs will break in from the
left and outputs will break away to the right.
*labels* list of specifications of the labels for the flows
Each value may be *None* (no labels), '' (just
label the quantities), or a labeling string. If a
single value is provided, it will be applied to all
flows. If an entry is a non-empty string, then the
quantity for the corresponding flow will be shown
below the string. However, if the *unit* of the
main diagram is None, then quantities are never
shown, regardless of the value of this argument.
*trunklength* length between the bases of the input and output
groups
*pathlengths* list of lengths of the arrows before break-in or
after break-away
If a single value is given, then it will be applied
to the first (inside) paths on the top and bottom,
and the length of all other arrows will be
justified accordingly. The *pathlengths* are not
applied to the horizontal inputs and outputs.
*prior* index of the prior diagram to which this diagram
should be connected
*connect* a (prior, this) tuple indexing the flow of the
prior diagram and the flow of this diagram which
should be connected
If this is the first diagram or *prior* is *None*,
*connect* will be ignored.
*rotation* angle of rotation of the diagram [deg]
*rotation* is ignored if this diagram is connected
to an existing one (using *prior* and *connect*).
The interpretation of the *orientations* argument
will be rotated accordingly (e.g., if *rotation*
== 90, an *orientations* entry of 1 means to/from
the left).
=============== ===================================================
Valid kwargs are :meth:`matplotlib.patches.PathPatch` arguments:
%(Patch)s
As examples, ``fill=False`` and ``label='A legend entry'``.
By default, ``facecolor='#bfd1d4'`` (light blue) and
``linewidth=0.5``.
The indexing parameters (*prior* and *connect*) are zero-based.
The flows are placed along the top of the diagram from the inside out
in order of their index within the *flows* list or array. They are
placed along the sides of the diagram from the top down and along the
bottom from the outside in.
If the sum of the inputs and outputs is nonzero, the discrepancy
will appear as a cubic Bezier curve along the top and bottom edges of
the trunk.
.. seealso::
:meth:`finish`
"""
# Check and preprocess the arguments.
if flows is None:
flows = np.array([1.0, -1.0])
else:
flows = np.array(flows)
n = flows.shape[0] # Number of flows
if rotation is None:
rotation = 0
else:
# In the code below, angles are expressed in deg/90.
rotation /= 90.0
if orientations is None:
orientations = [0, 0]
if len(orientations) != n:
raise ValueError(
"orientations and flows must have the same length.\n"
"orientations has length %d, but flows has length %d."
% (len(orientations), n))
if labels != '' and getattr(labels, '__iter__', False):
# iterable() isn't used because it would give True if labels is a
# string
if len(labels) != n:
raise ValueError(
"If labels is a list, then labels and flows must have the "
"same length.\nlabels has length %d, but flows has length %d."
% (len(labels), n))
else:
labels = [labels] * n
if trunklength < 0:
raise ValueError(
"trunklength is negative.\nThis isn't allowed, because it would "
"cause poor layout.")
if np.absolute(np.sum(flows)) > self.tolerance:
verbose.report(
"The sum of the flows is nonzero (%f).\nIs the "
"system not at steady state?" % np.sum(flows), 'helpful')
scaled_flows = self.scale * flows
gain = sum(max(flow, 0) for flow in scaled_flows)
loss = sum(min(flow, 0) for flow in scaled_flows)
if not (0.5 <= gain <= 2.0):
verbose.report(
"The scaled sum of the inputs is %f.\nThis may "
"cause poor layout.\nConsider changing the scale so"
" that the scaled sum is approximately 1.0." % gain, 'helpful')
if not (-2.0 <= loss <= -0.5):
verbose.report(
"The scaled sum of the outputs is %f.\nThis may "
"cause poor layout.\nConsider changing the scale so"
" that the scaled sum is approximately 1.0." % gain, 'helpful')
if prior is not None:
if prior < 0:
raise ValueError("The index of the prior diagram is negative.")
if min(connect) < 0:
raise ValueError(
"At least one of the connection indices is negative.")
if prior >= len(self.diagrams):
raise ValueError(
"The index of the prior diagram is %d, but there are "
"only %d other diagrams.\nThe index is zero-based."
% (prior, len(self.diagrams)))
if connect[0] >= len(self.diagrams[prior].flows):
raise ValueError(
"The connection index to the source diagram is %d, but "
"that diagram has only %d flows.\nThe index is zero-based."
% (connect[0], len(self.diagrams[prior].flows)))
if connect[1] >= n:
raise ValueError(
"The connection index to this diagram is %d, but this diagram"
"has only %d flows.\n The index is zero-based."
% (connect[1], n))
if self.diagrams[prior].angles[connect[0]] is None:
raise ValueError(
"The connection cannot be made. Check that the magnitude "
"of flow %d of diagram %d is greater than or equal to the "
"specified tolerance." % (connect[0], prior))
flow_error = (self.diagrams[prior].flows[connect[0]] +
flows[connect[1]])
if abs(flow_error) >= self.tolerance:
raise ValueError(
"The scaled sum of the connected flows is %f, which is not "
"within the tolerance (%f)." % (flow_error, self.tolerance))
# Determine if the flows are inputs.
are_inputs = [None] * n
for i, flow in enumerate(flows):
if flow >= self.tolerance:
are_inputs[i] = True
elif flow <= -self.tolerance:
are_inputs[i] = False
else:
verbose.report(
"The magnitude of flow %d (%f) is below the "
"tolerance (%f).\nIt will not be shown, and it "
"cannot be used in a connection."
% (i, flow, self.tolerance), 'helpful')
# Determine the angles of the arrows (before rotation).
angles = [None] * n
for i, (orient, is_input) in enumerate(zip(orientations, are_inputs)):
if orient == 1:
if is_input:
angles[i] = DOWN
elif not is_input:
# Be specific since is_input can be None.
angles[i] = UP
elif orient == 0:
if is_input is not None:
angles[i] = RIGHT
else:
if orient != -1:
raise ValueError(
"The value of orientations[%d] is %d, "
"but it must be [ -1 | 0 | 1 ]." % (i, orient))
if is_input:
angles[i] = UP
elif not is_input:
angles[i] = DOWN
# Justify the lengths of the paths.
if iterable(pathlengths):
if len(pathlengths) != n:
raise ValueError(
"If pathlengths is a list, then pathlengths and flows must "
"have the same length.\npathlengths has length %d, but flows "
"has length %d." % (len(pathlengths), n))
else: # Make pathlengths into a list.
urlength = pathlengths
ullength = pathlengths
lrlength = pathlengths
lllength = pathlengths
d = dict(RIGHT=pathlengths)
pathlengths = [d.get(angle, 0) for angle in angles]
# Determine the lengths of the top-side arrows
# from the middle outwards.
for i, (angle, is_input, flow) in enumerate(zip(angles, are_inputs,
scaled_flows)):
if angle == DOWN and is_input:
pathlengths[i] = ullength
ullength += flow
elif angle == UP and not is_input:
pathlengths[i] = urlength
urlength -= flow # Flow is negative for outputs.
# Determine the lengths of the bottom-side arrows
# from the middle outwards.
for i, (angle, is_input, flow) in enumerate(reversed(list(zip(
angles, are_inputs, scaled_flows)))):
if angle == UP and is_input:
pathlengths[n - i - 1] = lllength
lllength += flow
elif angle == DOWN and not is_input:
pathlengths[n - i - 1] = lrlength
lrlength -= flow
# Determine the lengths of the left-side arrows
# from the bottom upwards.
has_left_input = False
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, zip(scaled_flows, pathlengths))))):
if angle == RIGHT:
if is_input:
if has_left_input:
pathlengths[n - i - 1] = 0
else:
has_left_input = True
# Determine the lengths of the right-side arrows
# from the top downwards.
has_right_output = False
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == RIGHT:
if not is_input:
if has_right_output:
pathlengths[i] = 0
else:
has_right_output = True
# Begin the subpaths, and smooth the transition if the sum of the flows
# is nonzero.
urpath = [(Path.MOVETO, [(self.gap - trunklength / 2.0), # Upper right
gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0,
gain / 2.0]),
(Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0,
gain / 2.0]),
(Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0,
-loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0,
-loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap),
-loss / 2.0])]
llpath = [(Path.LINETO, [(trunklength / 2.0 - self.gap), # Lower left
loss / 2.0]),
(Path.LINETO, [(trunklength / 2.0 - self.gap) / 2.0,
loss / 2.0]),
(Path.CURVE4, [(trunklength / 2.0 - self.gap) / 8.0,
loss / 2.0]),
(Path.CURVE4, [(self.gap - trunklength / 2.0) / 8.0,
-gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0) / 2.0,
-gain / 2.0]),
(Path.LINETO, [(self.gap - trunklength / 2.0),
-gain / 2.0])]
lrpath = [(Path.LINETO, [(trunklength / 2.0 - self.gap), # Lower right
loss / 2.0])]
ulpath = [(Path.LINETO, [self.gap - trunklength / 2.0, # Upper left
gain / 2.0])]
# Add the subpaths and assign the locations of the tips and labels.
tips = np.zeros((n, 2))
label_locations = np.zeros((n, 2))
# Add the top-side inputs and outputs from the middle outwards.
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == DOWN and is_input:
tips[i, :], label_locations[i, :] = self._add_input(
ulpath, angle, *spec)
elif angle == UP and not is_input:
tips[i, :], label_locations[i, :] = self._add_output(
urpath, angle, *spec)
# Add the bottom-side inputs and outputs from the middle outwards.
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))))):
if angle == UP and is_input:
tip, label_location = self._add_input(llpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
elif angle == DOWN and not is_input:
tip, label_location = self._add_output(lrpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
# Add the left-side inputs from the bottom upwards.
has_left_input = False
for i, (angle, is_input, spec) in enumerate(reversed(list(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))))):
if angle == RIGHT and is_input:
if not has_left_input:
# Make sure the lower path extends
# at least as far as the upper one.
if llpath[-1][1][0] > ulpath[-1][1][0]:
llpath.append((Path.LINETO, [ulpath[-1][1][0],
llpath[-1][1][1]]))
has_left_input = True
tip, label_location = self._add_input(llpath, angle, *spec)
tips[n - i - 1, :] = tip
label_locations[n - i - 1, :] = label_location
# Add the right-side outputs from the top downwards.
has_right_output = False
for i, (angle, is_input, spec) in enumerate(zip(
angles, are_inputs, list(zip(scaled_flows, pathlengths)))):
if angle == RIGHT and not is_input:
if not has_right_output:
# Make sure the upper path extends
# at least as far as the lower one.
if urpath[-1][1][0] < lrpath[-1][1][0]:
urpath.append((Path.LINETO, [lrpath[-1][1][0],
urpath[-1][1][1]]))
has_right_output = True
tips[i, :], label_locations[i, :] = self._add_output(
urpath, angle, *spec)
# Trim any hanging vertices.
if not has_left_input:
ulpath.pop()
llpath.pop()
if not has_right_output:
lrpath.pop()
urpath.pop()
# Concatenate the subpaths in the correct order (clockwise from top).
path = (urpath + self._revert(lrpath) + llpath + self._revert(ulpath) +
[(Path.CLOSEPOLY, urpath[0][1])])
# Create a patch with the Sankey outline.
codes, vertices = list(zip(*path))
vertices = np.array(vertices)
def _get_angle(a, r):
if a is None:
return None
else:
return a + r
if prior is None:
if rotation != 0: # By default, none of this is needed.
angles = [_get_angle(angle, rotation) for angle in angles]
rotate = Affine2D().rotate_deg(rotation * 90).transform_affine
tips = rotate(tips)
label_locations = rotate(label_locations)
vertices = rotate(vertices)
text = self.ax.text(0, 0, s=patchlabel, ha='center', va='center')
else:
rotation = (self.diagrams[prior].angles[connect[0]] -
angles[connect[1]])
angles = [_get_angle(angle, rotation) for angle in angles]
rotate = Affine2D().rotate_deg(rotation * 90).transform_affine
tips = rotate(tips)
offset = self.diagrams[prior].tips[connect[0]] - tips[connect[1]]
translate = Affine2D().translate(*offset).transform_affine
tips = translate(tips)
label_locations = translate(rotate(label_locations))
vertices = translate(rotate(vertices))
kwds = dict(s=patchlabel, ha='center', va='center')
text = self.ax.text(*offset, **kwds)
if False: # Debug
print("llpath\n", llpath)
print("ulpath\n", self._revert(ulpath))
print("urpath\n", urpath)
print("lrpath\n", self._revert(lrpath))
xs, ys = list(zip(*vertices))
self.ax.plot(xs, ys, 'go-')
patch = PathPatch(Path(vertices, codes),
fc=kwargs.pop('fc', kwargs.pop('facecolor',
'#bfd1d4')), # Custom defaults
lw=kwargs.pop('lw', kwargs.pop('linewidth', 0.5)),
**kwargs)
self.ax.add_patch(patch)
# Add the path labels.
texts = []
for number, angle, label, location in zip(flows, angles, labels,
label_locations):
if label is None or angle is None:
label = ''
elif self.unit is not None:
quantity = self.format % abs(number) + self.unit
if label != '':
label += "\n"
label += quantity
texts.append(self.ax.text(x=location[0], y=location[1],
s=label,
ha='center', va='center'))
# Text objects are placed even they are empty (as long as the magnitude
# of the corresponding flow is larger than the tolerance) in case the
# user wants to provide labels later.
# Expand the size of the diagram if necessary.
self.extent = (min(np.min(vertices[:, 0]),
np.min(label_locations[:, 0]),
self.extent[0]),
max(np.max(vertices[:, 0]),
np.max(label_locations[:, 0]),
self.extent[1]),
min(np.min(vertices[:, 1]),
np.min(label_locations[:, 1]),
self.extent[2]),
max(np.max(vertices[:, 1]),
np.max(label_locations[:, 1]),
self.extent[3]))
# Include both vertices _and_ label locations in the extents; there are
# where either could determine the margins (e.g., arrow shoulders).
# Add this diagram as a subdiagram.
self.diagrams.append(Bunch(patch=patch, flows=flows, angles=angles,
tips=tips, text=text, texts=texts))
# Allow a daisy-chained call structure (see docstring for the class).
return self
def finish(self):
"""
Adjust the axes and return a list of information about the Sankey
subdiagram(s).
Return value is a list of subdiagrams represented with the following
fields:
=============== ===================================================
Field Description
=============== ===================================================
*patch* Sankey outline (an instance of
:class:`~maplotlib.patches.PathPatch`)
*flows* values of the flows (positive for input, negative
for output)
*angles* list of angles of the arrows [deg/90]
For example, if the diagram has not been rotated,
an input to the top side will have an angle of 3
(DOWN), and an output from the top side will have
an angle of 1 (UP). If a flow has been skipped
(because its magnitude is less than *tolerance*),
then its angle will be *None*.
*tips* array in which each row is an [x, y] pair
indicating the positions of the tips (or "dips") of
the flow paths
If the magnitude of a flow is less the *tolerance*
for the instance of :class:`Sankey`, the flow is
skipped and its tip will be at the center of the
diagram.
*text* :class:`~matplotlib.text.Text` instance for the
label of the diagram
*texts* list of :class:`~matplotlib.text.Text` instances
for the labels of flows
=============== ===================================================
.. seealso::
:meth:`add`
"""
self.ax.axis([self.extent[0] - self.margin,
self.extent[1] + self.margin,
self.extent[2] - self.margin,
self.extent[3] + self.margin])
self.ax.set_aspect('equal', adjustable='datalim')
return self.diagrams
| apache-2.0 |
icexelloss/arrow | python/pyarrow/tests/test_ipc.py | 1 | 18676 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import pytest
import socket
import sys
import threading
import numpy as np
from pandas.util.testing import (assert_frame_equal,
assert_series_equal)
import pandas as pd
import pyarrow as pa
class IpcFixture(object):
def __init__(self, sink_factory=lambda: io.BytesIO()):
self._sink_factory = sink_factory
self.sink = self.get_sink()
def get_sink(self):
return self._sink_factory()
def get_source(self):
return self.sink.getvalue()
def write_batches(self, num_batches=5, as_table=False):
nrows = 5
df = pd.DataFrame({
'one': np.random.randn(nrows),
'two': ['foo', np.nan, 'bar', 'bazbaz', 'qux']})
batch = pa.RecordBatch.from_pandas(df)
writer = self._get_writer(self.sink, batch.schema)
frames = []
batches = []
for i in range(num_batches):
unique_df = df.copy()
unique_df['one'] = np.random.randn(len(df))
batch = pa.RecordBatch.from_pandas(unique_df)
frames.append(unique_df)
batches.append(batch)
if as_table:
table = pa.Table.from_batches(batches)
writer.write_table(table)
else:
for batch in batches:
writer.write_batch(batch)
writer.close()
return frames, batches
class FileFormatFixture(IpcFixture):
def _get_writer(self, sink, schema):
return pa.RecordBatchFileWriter(sink, schema)
def _check_roundtrip(self, as_table=False):
_, batches = self.write_batches(as_table=as_table)
file_contents = pa.BufferReader(self.get_source())
reader = pa.open_file(file_contents)
assert reader.num_record_batches == len(batches)
for i, batch in enumerate(batches):
# it works. Must convert back to DataFrame
batch = reader.get_batch(i)
assert batches[i].equals(batch)
assert reader.schema.equals(batches[0].schema)
class StreamFormatFixture(IpcFixture):
def _get_writer(self, sink, schema):
return pa.RecordBatchStreamWriter(sink, schema)
class MessageFixture(IpcFixture):
def _get_writer(self, sink, schema):
return pa.RecordBatchStreamWriter(sink, schema)
@pytest.fixture
def ipc_fixture():
return IpcFixture()
@pytest.fixture
def file_fixture():
return FileFormatFixture()
@pytest.fixture
def stream_fixture():
return StreamFormatFixture()
def test_empty_file():
buf = b''
with pytest.raises(pa.ArrowInvalid):
pa.open_file(pa.BufferReader(buf))
def test_file_simple_roundtrip(file_fixture):
file_fixture._check_roundtrip(as_table=False)
def test_file_write_table(file_fixture):
file_fixture._check_roundtrip(as_table=True)
@pytest.mark.parametrize("sink_factory", [
lambda: io.BytesIO(),
lambda: pa.BufferOutputStream()
])
def test_file_read_all(sink_factory):
fixture = FileFormatFixture(sink_factory)
_, batches = fixture.write_batches()
file_contents = pa.BufferReader(fixture.get_source())
reader = pa.open_file(file_contents)
result = reader.read_all()
expected = pa.Table.from_batches(batches)
assert result.equals(expected)
def test_open_file_from_buffer(file_fixture):
# ARROW-2859; APIs accept the buffer protocol
_, batches = file_fixture.write_batches()
source = file_fixture.get_source()
reader1 = pa.open_file(source)
reader2 = pa.open_file(pa.BufferReader(source))
reader3 = pa.RecordBatchFileReader(source)
result1 = reader1.read_all()
result2 = reader2.read_all()
result3 = reader3.read_all()
assert result1.equals(result2)
assert result1.equals(result3)
def test_file_read_pandas(file_fixture):
frames, _ = file_fixture.write_batches()
file_contents = pa.BufferReader(file_fixture.get_source())
reader = pa.open_file(file_contents)
result = reader.read_pandas()
expected = pd.concat(frames)
assert_frame_equal(result, expected)
@pytest.mark.skipif(sys.version_info < (3, 6),
reason="need Python 3.6")
def test_file_pathlib(file_fixture, tmpdir):
import pathlib
_, batches = file_fixture.write_batches()
source = file_fixture.get_source()
path = tmpdir.join('file.arrow').strpath
with open(path, 'wb') as f:
f.write(source)
t1 = pa.open_file(pathlib.Path(path)).read_all()
t2 = pa.open_file(pa.OSFile(path)).read_all()
assert t1.equals(t2)
def test_empty_stream():
buf = io.BytesIO(b'')
with pytest.raises(pa.ArrowInvalid):
pa.open_stream(buf)
def test_stream_categorical_roundtrip(stream_fixture):
df = pd.DataFrame({
'one': np.random.randn(5),
'two': pd.Categorical(['foo', np.nan, 'bar', 'foo', 'foo'],
categories=['foo', 'bar'],
ordered=True)
})
batch = pa.RecordBatch.from_pandas(df)
writer = stream_fixture._get_writer(stream_fixture.sink, batch.schema)
writer.write_batch(pa.RecordBatch.from_pandas(df))
writer.close()
table = (pa.open_stream(pa.BufferReader(stream_fixture.get_source()))
.read_all())
assert_frame_equal(table.to_pandas(), df)
def test_open_stream_from_buffer(stream_fixture):
# ARROW-2859
_, batches = stream_fixture.write_batches()
source = stream_fixture.get_source()
reader1 = pa.open_stream(source)
reader2 = pa.open_stream(pa.BufferReader(source))
reader3 = pa.RecordBatchStreamReader(source)
result1 = reader1.read_all()
result2 = reader2.read_all()
result3 = reader3.read_all()
assert result1.equals(result2)
assert result1.equals(result3)
def test_stream_write_dispatch(stream_fixture):
# ARROW-1616
df = pd.DataFrame({
'one': np.random.randn(5),
'two': pd.Categorical(['foo', np.nan, 'bar', 'foo', 'foo'],
categories=['foo', 'bar'],
ordered=True)
})
table = pa.Table.from_pandas(df, preserve_index=False)
batch = pa.RecordBatch.from_pandas(df, preserve_index=False)
writer = stream_fixture._get_writer(stream_fixture.sink, table.schema)
writer.write(table)
writer.write(batch)
writer.close()
table = (pa.open_stream(pa.BufferReader(stream_fixture.get_source()))
.read_all())
assert_frame_equal(table.to_pandas(),
pd.concat([df, df], ignore_index=True))
def test_stream_write_table_batches(stream_fixture):
# ARROW-504
df = pd.DataFrame({
'one': np.random.randn(20),
})
b1 = pa.RecordBatch.from_pandas(df[:10], preserve_index=False)
b2 = pa.RecordBatch.from_pandas(df, preserve_index=False)
table = pa.Table.from_batches([b1, b2, b1])
writer = stream_fixture._get_writer(stream_fixture.sink, table.schema)
writer.write_table(table, chunksize=15)
writer.close()
batches = list(pa.open_stream(stream_fixture.get_source()))
assert list(map(len, batches)) == [10, 15, 5, 10]
result_table = pa.Table.from_batches(batches)
assert_frame_equal(result_table.to_pandas(),
pd.concat([df[:10], df, df[:10]],
ignore_index=True))
def test_stream_simple_roundtrip(stream_fixture):
_, batches = stream_fixture.write_batches()
file_contents = pa.BufferReader(stream_fixture.get_source())
reader = pa.open_stream(file_contents)
assert reader.schema.equals(batches[0].schema)
total = 0
for i, next_batch in enumerate(reader):
assert next_batch.equals(batches[i])
total += 1
assert total == len(batches)
with pytest.raises(StopIteration):
reader.read_next_batch()
def test_stream_read_all(stream_fixture):
_, batches = stream_fixture.write_batches()
file_contents = pa.BufferReader(stream_fixture.get_source())
reader = pa.open_stream(file_contents)
result = reader.read_all()
expected = pa.Table.from_batches(batches)
assert result.equals(expected)
def test_stream_read_pandas(stream_fixture):
frames, _ = stream_fixture.write_batches()
file_contents = stream_fixture.get_source()
reader = pa.open_stream(file_contents)
result = reader.read_pandas()
expected = pd.concat(frames)
assert_frame_equal(result, expected)
@pytest.fixture
def example_messages(stream_fixture):
_, batches = stream_fixture.write_batches()
file_contents = stream_fixture.get_source()
buf_reader = pa.BufferReader(file_contents)
reader = pa.MessageReader.open_stream(buf_reader)
return batches, list(reader)
def test_message_ctors_no_segfault():
with pytest.raises(TypeError):
repr(pa.Message())
with pytest.raises(TypeError):
repr(pa.MessageReader())
def test_message_reader(example_messages):
_, messages = example_messages
assert len(messages) == 6
assert messages[0].type == 'schema'
assert isinstance(messages[0].metadata, pa.Buffer)
assert isinstance(messages[0].body, pa.Buffer)
for msg in messages[1:]:
assert msg.type == 'record batch'
assert isinstance(msg.metadata, pa.Buffer)
assert isinstance(msg.body, pa.Buffer)
def test_message_serialize_read_message(example_messages):
_, messages = example_messages
msg = messages[0]
buf = msg.serialize()
restored = pa.read_message(buf)
restored2 = pa.read_message(pa.BufferReader(buf))
restored3 = pa.read_message(buf.to_pybytes())
assert msg.equals(restored)
assert msg.equals(restored2)
assert msg.equals(restored3)
def test_message_read_record_batch(example_messages):
batches, messages = example_messages
for batch, message in zip(batches, messages[1:]):
read_batch = pa.read_record_batch(message, batch.schema)
assert read_batch.equals(batch)
# ----------------------------------------------------------------------
# Socket streaming testa
class StreamReaderServer(threading.Thread):
def init(self, do_read_all):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.bind(('127.0.0.1', 0))
self._sock.listen(1)
host, port = self._sock.getsockname()
self._do_read_all = do_read_all
self._schema = None
self._batches = []
self._table = None
return port
def run(self):
connection, client_address = self._sock.accept()
try:
source = connection.makefile(mode='rb')
reader = pa.open_stream(source)
self._schema = reader.schema
if self._do_read_all:
self._table = reader.read_all()
else:
for i, batch in enumerate(reader):
self._batches.append(batch)
finally:
connection.close()
def get_result(self):
return(self._schema, self._table if self._do_read_all
else self._batches)
class SocketStreamFixture(IpcFixture):
def __init__(self):
# XXX(wesm): test will decide when to start socket server. This should
# probably be refactored
pass
def start_server(self, do_read_all):
self._server = StreamReaderServer()
port = self._server.init(do_read_all)
self._server.start()
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect(('127.0.0.1', port))
self.sink = self.get_sink()
def stop_and_get_result(self):
import struct
self.sink.write(struct.pack('i', 0))
self.sink.flush()
self._sock.close()
self._server.join()
return self._server.get_result()
def get_sink(self):
return self._sock.makefile(mode='wb')
def _get_writer(self, sink, schema):
return pa.RecordBatchStreamWriter(sink, schema)
@pytest.fixture
def socket_fixture():
return SocketStreamFixture()
def test_socket_simple_roundtrip(socket_fixture):
socket_fixture.start_server(do_read_all=False)
_, writer_batches = socket_fixture.write_batches()
reader_schema, reader_batches = socket_fixture.stop_and_get_result()
assert reader_schema.equals(writer_batches[0].schema)
assert len(reader_batches) == len(writer_batches)
for i, batch in enumerate(writer_batches):
assert reader_batches[i].equals(batch)
def test_socket_read_all(socket_fixture):
socket_fixture.start_server(do_read_all=True)
_, writer_batches = socket_fixture.write_batches()
_, result = socket_fixture.stop_and_get_result()
expected = pa.Table.from_batches(writer_batches)
assert result.equals(expected)
# ----------------------------------------------------------------------
# Miscellaneous IPC tests
def test_ipc_zero_copy_numpy():
df = pd.DataFrame({'foo': [1.5]})
batch = pa.RecordBatch.from_pandas(df)
sink = pa.BufferOutputStream()
write_file(batch, sink)
buffer = sink.getvalue()
reader = pa.BufferReader(buffer)
batches = read_file(reader)
data = batches[0].to_pandas()
rdf = pd.DataFrame(data)
assert_frame_equal(df, rdf)
def test_ipc_stream_no_batches():
# ARROW-2307
table = pa.Table.from_arrays([pa.array([1, 2, 3, 4]),
pa.array(['foo', 'bar', 'baz', 'qux'])],
names=['a', 'b'])
sink = pa.BufferOutputStream()
writer = pa.RecordBatchStreamWriter(sink, table.schema)
writer.close()
source = sink.getvalue()
reader = pa.open_stream(source)
result = reader.read_all()
assert result.schema.equals(table.schema)
assert len(result) == 0
def test_get_record_batch_size():
N = 10
itemsize = 8
df = pd.DataFrame({'foo': np.random.randn(N)})
batch = pa.RecordBatch.from_pandas(df)
assert pa.get_record_batch_size(batch) > (N * itemsize)
def _check_serialize_pandas_round_trip(df, use_threads=False):
buf = pa.serialize_pandas(df, nthreads=2 if use_threads else 1)
result = pa.deserialize_pandas(buf, use_threads=use_threads)
assert_frame_equal(result, df)
def test_pandas_serialize_round_trip():
index = pd.Index([1, 2, 3], name='my_index')
columns = ['foo', 'bar']
df = pd.DataFrame(
{'foo': [1.5, 1.6, 1.7], 'bar': list('abc')},
index=index, columns=columns
)
_check_serialize_pandas_round_trip(df)
def test_pandas_serialize_round_trip_nthreads():
index = pd.Index([1, 2, 3], name='my_index')
columns = ['foo', 'bar']
df = pd.DataFrame(
{'foo': [1.5, 1.6, 1.7], 'bar': list('abc')},
index=index, columns=columns
)
_check_serialize_pandas_round_trip(df, use_threads=True)
def test_pandas_serialize_round_trip_multi_index():
index1 = pd.Index([1, 2, 3], name='level_1')
index2 = pd.Index(list('def'), name=None)
index = pd.MultiIndex.from_arrays([index1, index2])
columns = ['foo', 'bar']
df = pd.DataFrame(
{'foo': [1.5, 1.6, 1.7], 'bar': list('abc')},
index=index,
columns=columns,
)
_check_serialize_pandas_round_trip(df)
def test_serialize_pandas_empty_dataframe():
df = pd.DataFrame()
_check_serialize_pandas_round_trip(df)
def test_pandas_serialize_round_trip_not_string_columns():
df = pd.DataFrame(list(zip([1.5, 1.6, 1.7], 'abc')))
buf = pa.serialize_pandas(df)
result = pa.deserialize_pandas(buf)
assert_frame_equal(result, df)
def test_serialize_pandas_no_preserve_index():
df = pd.DataFrame({'a': [1, 2, 3]}, index=[1, 2, 3])
expected = pd.DataFrame({'a': [1, 2, 3]})
buf = pa.serialize_pandas(df, preserve_index=False)
result = pa.deserialize_pandas(buf)
assert_frame_equal(result, expected)
buf = pa.serialize_pandas(df, preserve_index=True)
result = pa.deserialize_pandas(buf)
assert_frame_equal(result, df)
def test_serialize_with_pandas_objects():
df = pd.DataFrame({'a': [1, 2, 3]}, index=[1, 2, 3])
s = pd.Series([1, 2, 3, 4])
data = {
'a_series': df['a'],
'a_frame': df,
's_series': s
}
serialized = pa.serialize(data).to_buffer()
deserialized = pa.deserialize(serialized)
assert_frame_equal(deserialized['a_frame'], df)
assert_series_equal(deserialized['a_series'], df['a'])
assert deserialized['a_series'].name == 'a'
assert_series_equal(deserialized['s_series'], s)
assert deserialized['s_series'].name is None
def test_schema_batch_serialize_methods():
nrows = 5
df = pd.DataFrame({
'one': np.random.randn(nrows),
'two': ['foo', np.nan, 'bar', 'bazbaz', 'qux']})
batch = pa.RecordBatch.from_pandas(df)
s_schema = batch.schema.serialize()
s_batch = batch.serialize()
recons_schema = pa.read_schema(s_schema)
recons_batch = pa.read_record_batch(s_batch, recons_schema)
assert recons_batch.equals(batch)
def test_schema_serialization_with_metadata():
field_metadata = {b'foo': b'bar', b'kind': b'field'}
schema_metadata = {b'foo': b'bar', b'kind': b'schema'}
f0 = pa.field('a', pa.int8())
f1 = pa.field('b', pa.string(), metadata=field_metadata)
schema = pa.schema([f0, f1], metadata=schema_metadata)
s_schema = schema.serialize()
recons_schema = pa.read_schema(s_schema)
assert recons_schema.equals(schema)
assert recons_schema.metadata == schema_metadata
assert recons_schema[0].metadata is None
assert recons_schema[1].metadata == field_metadata
def write_file(batch, sink):
writer = pa.RecordBatchFileWriter(sink, batch.schema)
writer.write_batch(batch)
writer.close()
def read_file(source):
reader = pa.open_file(source)
return [reader.get_batch(i)
for i in range(reader.num_record_batches)]
| apache-2.0 |
iABC2XYZ/abc | Epics/DataAna8.2.py | 1 | 4977 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 27 15:44:34 2017
@author: p
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
plt.close('all')
def GenWeight(shape):
initial = tf.truncated_normal(shape, stddev=1.)
return tf.Variable(initial)
def GenBias(shape):
initial = tf.constant(1., shape=shape)
return tf.Variable(initial)
def getDataRow(exData,sizeRow):
numEx=np.shape(exData)[0]
idChoose=np.random.randint(0,high=numEx,size=(sizeRow))
yCHV=np.reshape(exData[idChoose,0:14],(sizeRow,7,2))
xBPM=np.reshape(exData[idChoose,14:24],(sizeRow,5,2))
return xBPM,yCHV
def conv1d(x, W):
return tf.nn.conv1d(x, W, stride=1, padding="SAME")
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding="SAME")
exData=np.loadtxt('/home/e/ABC/abc/Epics/Rec.dat')
bpm=tf.placeholder(tf.float32,shape=(None,5,2))
cHV=tf.placeholder(tf.float32,shape=(None,7,2))
xInput=bpm
yInput=cHV
#
nChan1=200
w1= GenWeight([1,2,nChan1])
b1=GenBias([nChan1])
x1=tf.nn.relu(conv1d(xInput, w1)+b1)
#
nChan2=1
n2=nChan1/nChan2
x2=tf.reshape(x1,(-1,5,n2,nChan2))
#
nChan3=13
w3= GenWeight([1,1,nChan2,nChan3])
b3=GenBias([nChan3])
x3=tf.nn.relu(conv2d(x2, w3)+b3)
#
nChan4=13
w4= GenWeight([2,2,nChan2,nChan4])
b4=GenBias([nChan4])
x4=tf.nn.relu(conv2d(x2, w4)+b4)
#
nChan5=13
w5= GenWeight([3,3,nChan2,nChan5])
b5=GenBias([nChan5])
x5=tf.nn.relu(conv2d(x2, w5)+b5)
#
x6=tf.concat((tf.concat((x3,x4),axis=3),x5),axis=3)
#
nChan7=5
w7= GenWeight([3,3,nChan3+nChan4+nChan5,nChan7])
b7=GenBias([nChan7])
x7=tf.nn.relu(conv2d(x6, w7)+b7)
#
x8=tf.reshape(x7,(-1,5*n2*nChan7))
#
w9=GenWeight([5*n2*nChan7,14])
b9=GenBias([14])
x9=tf.matmul(x8,w9)+b9
#
n9_2=250
w9_2=GenWeight([5*n2*nChan7,n9_2])
b9_2=GenBias([n9_2])
x9_2=tf.nn.relu(tf.matmul(x8,w9_2)+b9_2)
#
w10_2=GenWeight([n9_2,14])
b10_2=GenBias([14])
x10_2=tf.matmul(x9_2,w10_2)+b10_2
##
xFinal=x10_2
xOutput=tf.reshape(xFinal,(-1,14))
yOutput=tf.reshape(yInput,(-1,14))
lossFn=tf.reduce_mean(tf.square(xOutput-yOutput))
trainBPM_1=tf.train.AdamOptimizer(0.05)
optBPM_1=trainBPM_1.minimize(lossFn)
trainBPM_2=tf.train.AdamOptimizer(0.01)
optBPM_2=trainBPM_2.minimize(lossFn)
trainBPM_3=tf.train.AdamOptimizer(0.005)
optBPM_3=trainBPM_3.minimize(lossFn)
trainBPM_4=tf.train.AdamOptimizer(0.001)
optBPM_4=trainBPM_4.minimize(lossFn)
iniBPM=tf.global_variables_initializer()
try:
if vars().has_key('se'):
se.close()
except:
pass
se= tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
se.run(iniBPM)
nIt=2e7
sizeRow=100
stepLossRec=50
nLossRec=np.int32(nIt/stepLossRec+1)
lossRec=np.zeros((nLossRec))
iRec=0
for i in range(np.int32(nIt)):
xBPM,yCHV=getDataRow(exData,sizeRow)
se.run(optBPM_4,feed_dict={bpm:xBPM,cHV:yCHV})
if i % stepLossRec==0:
lossRecTmp=se.run(lossFn,feed_dict={bpm:xBPM,cHV:yCHV})
lossRec[iRec]=lossRecTmp
iRec+=1
print lossRecTmp
plt.figure('lossRec')
numPlot=30
plt.clf()
if iRec<=numPlot:
xPlot=np.linspace(0,iRec-1,iRec)
yPlot=lossRec[0:iRec:]
yPlotMean=np.cumsum(yPlot)/(xPlot+1)
else:
xPlot=np.linspace(iRec-numPlot,iRec-1,numPlot)
yPlot=lossRec[iRec-numPlot:iRec:]
yPlotMean[0:-1:]=yPlotMean[1::]
yPlotMean[-1]=np.mean(yPlot)
plt.hold
plt.plot(xPlot,yPlot,'*b')
plt.plot(xPlot,yPlotMean,'go')
plt.grid('on')
plt.title(i)
plt.pause(0.05)
xBPM,yCHV=getDataRow(exData,1)
yCHV_Cal=se.run(xFinal,feed_dict={bpm:xBPM})
plt.figure(2)
plt.clf()
plt.hold
plt.plot(np.reshape(yCHV[0,:],(14)),'bd')
plt.plot(yCHV_Cal[0,:],'rd')
plt.title(i)
plt.pause(0.05)
#se.close()
xBPMReal_1=np.ones((5,2))*0.
xBPMReal_1[0,:]=[2.2,-1.6]
xBPMReal_2=np.ones((5,2))*3.
xBPMReal_3=np.ones((5,2))*(-3.)
xBPMReal_4=np.ones((5,2))
xBPMReal_4[:,0]=xBPMReal_4[:,0]*3.
xBPMReal_4[:,1]=xBPMReal_4[:,1]*(-3.)
xBPMReal=np.zeros((4,5,2))
xBPMReal[0,:,:]=xBPMReal_1
xBPMReal[1,:,:]=xBPMReal_2
xBPMReal[2,:,:]=xBPMReal_3
xBPMReal[3,:,:]=xBPMReal_4
yCHV_Cal4Real=se.run(xFinal,feed_dict={bpm:xBPMReal})
yCHV_Cal4Real_1=np.reshape(yCHV_Cal4Real[0,::],(7,2))
yCHV_Cal4Real_2=np.reshape(yCHV_Cal4Real[1,::],(7,2))
yCHV_Cal4Real_3=np.reshape(yCHV_Cal4Real[2,::],(7,2))
yCHV_Cal4Real_4=np.reshape(yCHV_Cal4Real[3,::],(7,2))
print '----------------- yCHV_Cal4Real_1 --------------------------'
print yCHV_Cal4Real_1
print '----------------- yCHV_Cal4Real_2 --------------------------'
print yCHV_Cal4Real_2
print '----------------- yCHV_Cal4Real_3 --------------------------'
print yCHV_Cal4Real_3
print '----------------- yCHV_Cal4Real_4 --------------------------'
print yCHV_Cal4Real_4
| gpl-3.0 |
raghakot/deep-learning-experiments | exp2/explore.py | 1 | 2858 | import numpy as np
import seaborn as sb
import pandas as pd
import matplotlib.pyplot as plt
from scipy import ndimage
from train import get_model
from cifar10 import X_test, y_test
from keras.layers import Convolution2D as conv_old
from layers import Convolution2D_4 as conv_4
from layers import Convolution2D_8 as conv_8
def get_probs_matched(imgs, model, idx):
probs = model.predict_proba(imgs, verbose=0)
preds = probs.argmax(axis=-1)
probs = probs[:, y_test[idx][0]]
matched = (len(np.where(preds == y_test[idx][0])[0]) * 100.0) / len(imgs)
return probs, matched
def compare(test_id, angles, models):
img = X_test[test_id]
imgs = np.array([ndimage.rotate(img, rot, reshape=False) for rot in angles])
all_probs = []
all_matched = []
for model in models:
probs, matched = get_probs_matched(imgs, model, test_id)
all_probs.append(probs)
all_matched.append(matched)
return all_probs, all_matched
def plot_multi(names, models, angles, runs=1000):
indices = np.random.permutation(len(X_test))[:runs]
matched_all = []
for i, idx in enumerate(indices):
print("Processing {}/{}".format(i, len(indices)))
probs, matched = compare(idx, angles, models)
matched_all.append(matched)
matched_all = np.array(matched_all)
order = np.argsort(np.mean(matched_all, axis=0))
df = pd.DataFrame.from_items([(names[i], matched_all[:, i]) for i in order])
sb.boxplot(data=df)
plt.show()
def plot_single(names, models, angles, test_id):
all_probs, all_matched = compare(test_id, angles, models)
legends = []
order = np.argsort(all_matched)
for i in order:
plt.plot(angles, all_probs[i])
legends.append('{} {:.2f}%'.format(names[i], all_matched[i]))
plt.ylabel('Prediction probability of correct class')
plt.legend(legends, loc=9, bbox_to_anchor=(0.5, -0.05), ncol=len(names))
plt.show()
if __name__ == '__main__':
names = ['baseline', '8_rot_4', '8_rot_3', '8_rot_2', '8_rot_1', '4_rot_4', '4_rot_3', '4_rot_2', '4_rot_1']
convs = [
None,
[conv_8, conv_8, conv_8, conv_8],
[conv_old, conv_8, conv_8, conv_8],
[conv_old, conv_old, conv_8, conv_8],
[conv_old, conv_old, conv_old, conv_8],
[conv_4, conv_4, conv_4, conv_4],
[conv_old, conv_4, conv_4, conv_4],
[conv_old, conv_old, conv_4, conv_4],
[conv_old, conv_old, conv_old, conv_4],
]
models = []
indices = range(len(names))
for i in indices:
model = get_model(convs[i])
model.load_weights('./weights/{}.hdf5'.format(names[i]))
models.append(model)
angles = np.arange(0, 360, 1)
# plot_single([names[i] for i in indices], models, angles=angles, test_id=5)
plot_multi(names, models, angles=angles, runs=1000)
| mit |
rahlk/RAAT | src/Planners/XTREE/Prediction.py | 1 | 8992 | from __future__ import division
from pdb import set_trace
from os import environ, getcwd
from os import walk
from os.path import expanduser
from pdb import set_trace
import sys
# Update PYTHONPATH
HOME = expanduser('~')
axe = HOME + '/git/axe/axe/' # AXE
pystat = HOME + '/git/pystats/' # PySTAT
cwd = getcwd() # Current Directory
sys.path.extend([axe, pystat, cwd])
from scipy.stats.mstats import mode
from scipy.spatial.distance import euclidean
from numpy import mean
from random import choice, uniform as rand
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from smote import *
import pandas as pd
from tools.axe.abcd import _Abcd
from methods1 import *
from tools.sk import rdivDemo
def formatData(tbl):
Rows = [i.cells for i in tbl._rows]
headers = [i.name for i in tbl.headers]
return pd.DataFrame(Rows, columns=headers)
def Bugs(tbl):
cells = [i.cells[-2] for i in tbl._rows]
return cells
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PREDICTION SYSTEMS:
# ```````````````````
# 1. WHERE2 2. RANDOM FORESTS, 3. DECISION TREES, 4. ADABOOST,
# 5. LOGISTIC REGRESSION
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def where2prd(train, test, tunings=[None, None], smoteit=False, thresh=1):
"WHERE2"
def flatten(x):
"""
Takes an N times nested list of list like [[a,b],[c, [d, e]],[f]]
and returns a single list [a,b,c,d,e,f]
"""
result = []
for el in x:
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def leaves(node):
"""
Returns all terminal nodes.
"""
L = []
if len(node.kids) > 1:
for l in node.kids:
L.extend(leaves(l))
return L
elif len(node.kids) == 1:
return [node.kids]
else:
return [node]
train_DF = createTbl(
train,
settings=tunings[0],
_smote=False,
isBin=True,
bugThres=2)
test_df = createTbl(test)
t = discreteNums(train_DF, map(lambda x: x.cells, train_DF._rows))
myTree = tdiv(t, opt=tunings[1])
testCase = test_df._rows
rows, preds = [], []
for tC in testCase:
newRow = tC
loc = drop(tC, myTree) # Drop a test case in the tree & see where it lands
leafNodes = flatten(leaves(loc))
# set_trace()
rows = [leaf.rows for leaf in leafNodes][0]
vals = [r.cells[-2] for r in rows]
preds.append(0 if mean([k for k in vals]).tolist() < thresh else 1)
# if median(vals) > 0 else preds.extend([0])
return preds
def _where2pred():
"Test where2"
dir = '../Data'
one, two = explore(dir)
# set_trace()
# Training data
train = one[0][:-1]
# Test data
test = [one[0][-1]]
actual = Bugs(createTbl(test, isBin=True))
preds = where2prd(train, test)
# for a, b in zip(actual, preds): print a, b
# set_trace()
return _Abcd(before=actual, after=preds, show=False)[-1]
def rforest(train, test, tunings=None, smoteit=True, duplicate=True):
"RF "
# Apply random forest Classifier to predict the number of bugs.
if smoteit:
train = SMOTE(train, atleast=50, atmost=101, resample=duplicate)
if not tunings:
clf = RandomForestClassifier(n_estimators=100, random_state=1)
else:
clf = RandomForestClassifier(n_estimators=int(tunings[0]),
max_features=tunings[1] / 100,
min_samples_leaf=int(tunings[2]),
min_samples_split=int(tunings[3])
)
train_DF = formatData(train)
test_DF = formatData(test)
features = train_DF.columns[:-2]
klass = train_DF[train_DF.columns[-2]]
# set_trace()
clf.fit(train_DF[features], klass)
preds = clf.predict(test_DF[test_DF.columns[:-2]])
return preds
def rforest2(train, test, tunings=None, smoteit=True, duplicate=True):
"RF "
# Apply random forest Classifier to predict the number of bugs.
if smoteit:
train = SMOTE(train, atleast=50, atmost=101, resample=duplicate)
if not tunings:
clf = RandomForestRegressor(n_estimators=100, random_state=1)
else:
clf = RandomForestRegressor(n_estimators=int(tunings[0]),
max_features=tunings[1] / 100,
min_samples_leaf=int(tunings[2]),
min_samples_split=int(tunings[3])
)
train_DF = formatData(train)
test_DF = formatData(test)
features = train_DF.columns[:-2]
klass = train_DF[train_DF.columns[-2]]
# set_trace()
clf.fit(train_DF[features], klass)
preds = clf.predict(test_DF[test_DF.columns[:-2]])
return preds
def _RF():
"Test RF"
dir = '../Data'
one, two = explore(dir)
# Training data
train_DF = createTbl([one[0][0]])
# Test data
test_df = createTbl([one[0][1]])
actual = Bugs(test_df)
preds = rforest(train_DF, test_df, mss=6, msl=8,
max_feat=4, n_est=5756,
smoteit=False)
print _Abcd(before=actual, after=preds, show=False)[-1]
def CART(train, test, tunings=None, smoteit=True, duplicate=True):
" CART"
# Apply random forest Classifier to predict the number of bugs.
if smoteit:
train = SMOTE(train, atleast=50, atmost=101, resample=duplicate)
if not tunings:
clf = DecisionTreeClassifier()
else:
clf = DecisionTreeClassifier(max_depth=int(tunings[0]),
min_samples_split=int(tunings[1]),
min_samples_leaf=int(tunings[2]),
max_features=float(tunings[3] / 100),
max_leaf_nodes=int(tunings[4]),
criterion='entropy')
train_DF = formatData(train)
test_DF = formatData(test)
features = train_DF.columns[:-2]
klass = train_DF[train_DF.columns[-2]]
# set_trace()
clf.fit(train_DF[features].astype('float32'), klass.astype('float32'))
preds = clf.predict(test_DF[test_DF.columns[:-2]].astype('float32')).tolist()
return preds
def _CART():
"Test CART"
dir = './Data'
one, two = explore(dir)
# Training data
train_DF = createTbl(one[0])
# Test data
test_df = createTbl(two[0])
actual = Bugs(test_df)
preds = CART(train_DF, test_df)
set_trace()
_Abcd(train=actual, test=preds, verbose=True)
def adaboost(train, test, smoteit=True):
"ADABOOST"
if smoteit:
train = SMOTE(train)
clf = AdaBoostClassifier()
train_DF = formatData(train)
test_DF = formatData(test)
features = train_DF.columns[:-2]
klass = train_DF[train_DF.columns[-2]]
# set_trace()
clf.fit(train_DF[features], klass)
preds = clf.predict(test_DF[test_DF.columns[:-2]]).tolist()
return preds
def _adaboost():
"Test AdaBoost"
dir = './Data'
one, two = explore(dir)
# Training data
train_DF = createTbl(one[0])
# Test data
test_df = createTbl(two[0])
actual = Bugs(test_df)
preds = adaboost(train_DF, test_df)
set_trace()
_Abcd(train=actual, test=preds, verbose=True)
def logit(train, test, smoteit=True):
"Logistic Regression"
if smoteit:
train = SMOTE(train)
clf = LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=1.0,
fit_intercept=True, intercept_scaling=1,
class_weight=None, random_state=None)
train_DF = formatData(train)
test_DF = formatData(test)
features = train_DF.columns[:-2]
klass = train_DF[train_DF.columns[-2]]
# set_trace()
clf.fit(train_DF[features], klass)
preds = clf.predict(test_DF[test_DF.columns[:-2]]).tolist()
return preds
def _logit():
"Test LOGIT"
dir = './Data'
one, two = explore(dir)
# Training data
train_DF = createTbl(one[0])
# Test data
test_df = createTbl(two[0])
actual = Bugs(test_df)
preds = logit(train_DF, test_df)
set_trace()
_Abcd(train=actual, test=preds, verbose=True)
def knn(train, test, smoteit=True):
"kNN"
if smoteit:
train = SMOTE(train)
neigh = KNeighborsClassifier()
train_DF = formatData(train)
test_DF = formatData(test)
features = train_DF.columns[:-2]
klass = train_DF[train_DF.columns[-2]]
# set_trace()
neigh.fit(train_DF[features], klass)
preds = neigh.predict(test_DF[test_DF.columns[:-2]]).tolist()
return preds
if __name__ == '__main__':
random.seed(0)
Dat = []
for _ in xrange(10):
print(_where2pred())
# Dat.insert(0, 'Where2 untuned')
# rdivDemo([Dat])
| mit |
xuewei4d/scikit-learn | sklearn/datasets/_twenty_newsgroups.py | 7 | 18895 | """Caching loader for the 20 newsgroups text classification dataset.
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
from os.path import dirname, join
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
import joblib
from . import get_data_home
from . import load_files
from ._base import _convert_data_dataframe
from ._base import _pkl_filepath
from ._base import _fetch_remote
from ._base import RemoteFileMetadata
from ..feature_extraction.text import CountVectorizer
from .. import preprocessing
from ..utils import check_random_state, Bunch
from ..utils.validation import _deprecate_positional_args
logger = logging.getLogger(__name__)
# The original data can be found at:
# https://people.csail.mit.edu/jrennie/20Newsgroups/20news-bydate.tar.gz
ARCHIVE = RemoteFileMetadata(
filename='20news-bydate.tar.gz',
url='https://ndownloader.figshare.com/files/5975967',
checksum=('8f1b2514ca22a5ade8fbb9cfa5727df9'
'5fa587f4c87b786e15c759fa66d95610'))
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def _download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
logger.info("Downloading dataset from %s (14 MB)", ARCHIVE.url)
archive_path = _fetch_remote(ARCHIVE, dirname=target_dir)
logger.debug("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
Parameters
----------
text : str
The text from which to remove the signature block.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
Parameters
----------
text : str
The text from which to remove the signature block.
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
Parameters
----------
text : str
The text from which to remove the signature block.
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
@_deprecate_positional_args
def fetch_20newsgroups(*, data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True, return_X_y=False):
"""Load the filenames and data from the 20 newsgroups dataset \
(classification).
Download it if necessary.
================= ==========
Classes 20
Samples total 18846
Dimensionality 1
Features text
================= ==========
Read more in the :ref:`User Guide <20newsgroups_dataset>`.
Parameters
----------
data_home : str, default=None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
subset : {'train', 'test', 'all'}, default='train'
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
categories : array-like, dtype=str or unicode, default=None
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle : bool, default=True
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
remove : tuple, default=()
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
download_if_missing : bool, default=True
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns `(data.data, data.target)` instead of a Bunch
object.
.. versionadded:: 0.22
Returns
-------
bunch : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : list of shape (n_samples,)
The data list to learn.
target: ndarray of shape (n_samples,)
The target labels.
filenames: list of shape (n_samples,)
The path to the location of the data.
DESCR: str
The full description of the dataset.
target_names: list of shape (n_classes,)
The names of target classes.
(data, target) : tuple if `return_X_y=True`
.. versionadded:: 0.22
"""
data_home = get_data_home(data_home=data_home)
cache_path = _pkl_filepath(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
logger.info("Downloading 20news dataset. "
"This may take a few minutes.")
cache = _download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'twenty_newsgroups.rst')) as rst_file:
fdescr = rst_file.read()
data.DESCR = fdescr
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
if return_X_y:
return data.data, data.target
return data
@_deprecate_positional_args
def fetch_20newsgroups_vectorized(*, subset="train", remove=(), data_home=None,
download_if_missing=True, return_X_y=False,
normalize=True, as_frame=False):
"""Load and vectorize the 20 newsgroups dataset (classification).
Download it if necessary.
This is a convenience function; the transformation is done using the
default settings for
:class:`~sklearn.feature_extraction.text.CountVectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom
:class:`~sklearn.feature_extraction.text.CountVectorizer`,
:class:`~sklearn.feature_extraction.text.HashingVectorizer`,
:class:`~sklearn.feature_extraction.text.TfidfTransformer` or
:class:`~sklearn.feature_extraction.text.TfidfVectorizer`.
The resulting counts are normalized using
:func:`sklearn.preprocessing.normalize` unless normalize is set to False.
================= ==========
Classes 20
Samples total 18846
Dimensionality 130107
Features real
================= ==========
Read more in the :ref:`User Guide <20newsgroups_dataset>`.
Parameters
----------
subset : {'train', 'test', 'all'}, default='train'
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
remove : tuple, default=()
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
data_home : str, default=None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns ``(data.data, data.target)`` instead of a Bunch
object.
.. versionadded:: 0.20
normalize : bool, default=True
If True, normalizes each document's feature vector to unit norm using
:func:`sklearn.preprocessing.normalize`.
.. versionadded:: 0.22
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string, or categorical). The target is
a pandas DataFrame or Series depending on the number of
`target_columns`.
.. versionadded:: 0.24
Returns
-------
bunch : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data: {sparse matrix, dataframe} of shape (n_samples, n_features)
The input data matrix. If ``as_frame`` is `True`, ``data`` is
a pandas DataFrame with sparse columns.
target: {ndarray, series} of shape (n_samples,)
The target labels. If ``as_frame`` is `True`, ``target`` is a
pandas Series.
target_names: list of shape (n_classes,)
The names of target classes.
DESCR: str
The full description of the dataset.
frame: dataframe of shape (n_samples, n_features + 1)
Only present when `as_frame=True`. Pandas DataFrame with ``data``
and ``target``.
.. versionadded:: 0.24
(data, target) : tuple if ``return_X_y`` is True
`data` and `target` would be of the format defined in the `Bunch`
description above.
.. versionadded:: 0.20
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = _pkl_filepath(data_home, filebase + ".pkl")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove,
download_if_missing=download_if_missing)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove,
download_if_missing=download_if_missing)
if os.path.exists(target_file):
try:
X_train, X_test, feature_names = joblib.load(target_file)
except ValueError as e:
raise ValueError(
f"The cached dataset located in {target_file} was fetched "
f"with an older scikit-learn version and it is not compatible "
f"with the scikit-learn version imported. You need to "
f"manually delete the file: {target_file}."
) from e
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
feature_names = vectorizer.get_feature_names()
joblib.dump((X_train, X_test, feature_names), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
if normalize:
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
preprocessing.normalize(X_train, copy=False)
preprocessing.normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'twenty_newsgroups.rst')) as rst_file:
fdescr = rst_file.read()
frame = None
target_name = ['category_class']
if as_frame:
frame, data, target = _convert_data_dataframe(
"fetch_20newsgroups_vectorized",
data,
target,
feature_names,
target_names=target_name,
sparse_data=True
)
if return_X_y:
return data, target
return Bunch(data=data,
target=target,
frame=frame,
target_names=target_names,
feature_names=feature_names,
DESCR=fdescr)
| bsd-3-clause |
sgenoud/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 3 | 2742 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print __doc__
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import pylab as pl
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import metrics
from sklearn.metrics.metrics import confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iters=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print "Label Spreading model: %d labeled & %d unlabeled points (%d total)" % \
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples)
print metrics.classification_report(true_labels, predicted_labels)
print "Confusion matrix"
print cm
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = pl.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=pl.cm.gray_r)
pl.xticks([])
pl.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
pl.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.