repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
cmdunkers/DeeperMind | PythonEnv/lib/python2.7/site-packages/scipy/cluster/tests/test_hierarchy.py | 26 | 35159 | #! /usr/bin/env python
#
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (TestCase, run_module_suite, dec, assert_raises,
assert_allclose, assert_equal, assert_)
from scipy._lib.six import xrange, u
import scipy.cluster.hierarchy
from scipy.cluster.hierarchy import (
linkage, from_mlab_linkage, to_mlab_linkage, num_obs_linkage, inconsistent,
cophenet, fclusterdata, fcluster, is_isomorphic, single, leaders,
correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram,
set_link_color_palette)
from scipy.spatial.distance import pdist
import hierarchy_test_data
# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
# check if it's available
try:
import matplotlib
# and set the backend to be Agg (no gui)
matplotlib.use('Agg')
# before importing pyplot
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
class TestLinkage(object):
def test_linkage_empty_distance_matrix(self):
# Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
y = np.zeros((0,))
assert_raises(ValueError, linkage, y)
def test_linkage_tdist(self):
for method in ['single', 'complete', 'average', 'weighted', u('single')]:
yield self.check_linkage_tdist, method
def check_linkage_tdist(self, method):
# Tests linkage(Y, method) on the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
assert_allclose(Z, expectedZ, atol=1e-10)
def test_linkage_X(self):
for method in ['centroid', 'median', 'ward']:
yield self.check_linkage_q, method
def check_linkage_q(self, method):
# Tests linkage(Y, method) on the Q data set.
Z = linkage(hierarchy_test_data.X, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
assert_allclose(Z, expectedZ, atol=1e-06)
class TestInconsistent(object):
def test_inconsistent_tdist(self):
for depth in hierarchy_test_data.inconsistent_ytdist:
yield self.check_inconsistent_tdist, depth
def check_inconsistent_tdist(self, depth):
Z = hierarchy_test_data.linkage_ytdist_single
assert_allclose(inconsistent(Z, depth),
hierarchy_test_data.inconsistent_ytdist[depth])
class TestCopheneticDistance(object):
def test_linkage_cophenet_tdist_Z(self):
# Tests cophenet(Z) on tdist data set.
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
Z = hierarchy_test_data.linkage_ytdist_single
M = cophenet(Z)
assert_allclose(M, expectedM, atol=1e-10)
def test_linkage_cophenet_tdist_Z_Y(self):
# Tests cophenet(Z, Y) on tdist data set.
Z = hierarchy_test_data.linkage_ytdist_single
(c, M) = cophenet(Z, hierarchy_test_data.ytdist)
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
expectedc = 0.639931296433393415057366837573
assert_allclose(c, expectedc, atol=1e-10)
assert_allclose(M, expectedM, atol=1e-10)
class TestMLabLinkageConversion(object):
def test_mlab_linkage_conversion_empty(self):
# Tests from/to_mlab_linkage on empty linkage array.
X = np.asarray([])
assert_equal(from_mlab_linkage([]), X)
assert_equal(to_mlab_linkage([]), X)
def test_mlab_linkage_conversion_single_row(self):
# Tests from/to_mlab_linkage on linkage array with single row.
Z = np.asarray([[0., 1., 3., 2.]])
Zm = [[1, 2, 3]]
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
def test_mlab_linkage_conversion_multiple_rows(self):
# Tests from/to_mlab_linkage on linkage array with multiple rows.
Zm = np.asarray([[3, 6, 138], [4, 5, 219],
[1, 8, 255], [2, 9, 268], [7, 10, 295]])
Z = np.array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]],
dtype=np.double)
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
class TestFcluster(object):
def test_fclusterdata(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fclusterdata, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fclusterdata, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fclusterdata, t, 'maxclust'
def check_fclusterdata(self, t, criterion):
# Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
X = hierarchy_test_data.Q_X
T = fclusterdata(X, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fcluster, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster, t, 'maxclust'
def check_fcluster(self, t, criterion):
# Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster_monocrit(self):
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster_monocrit, t
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster_maxclust_monocrit, t
def check_fcluster_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_distance[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
def check_fcluster_maxclust_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_maxclust[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
class TestLeaders(object):
def test_leaders_single(self):
# Tests leaders using a flat clustering generated by single linkage.
X = hierarchy_test_data.Q_X
Y = pdist(X)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=3)
Lright = (np.array([53, 55, 56]), np.array([2, 3, 1]))
L = leaders(Z, T)
assert_equal(L, Lright)
class TestIsIsomorphic(object):
def test_is_isomorphic_1(self):
# Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
a = [1, 1, 1]
b = [2, 2, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_2(self):
# Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
a = [1, 7, 1]
b = [2, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_3(self):
# Tests is_isomorphic on test case #3 (no flat clusters)
a = []
b = []
assert_(is_isomorphic(a, b))
def test_is_isomorphic_4A(self):
# Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)
a = [1, 2, 3]
b = [1, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_4B(self):
# Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)
a = [1, 2, 3, 3]
b = [1, 3, 2, 3]
assert_(is_isomorphic(a, b) == False)
assert_(is_isomorphic(b, a) == False)
def test_is_isomorphic_4C(self):
# Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)
a = [7, 2, 3]
b = [6, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_5(self):
# Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling).
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc
def test_is_isomorphic_6(self):
# Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling, slightly
# nonisomorphic.)
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc, True, 5
def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
for k in range(3):
a = np.int_(np.random.rand(nobs) * nclusters)
b = np.zeros(a.size, dtype=np.int_)
P = np.random.permutation(nclusters)
for i in xrange(0, a.shape[0]):
b[i] = P[a[i]]
if noniso:
Q = np.random.permutation(nobs)
b[Q[0:nerrors]] += 1
b[Q[0:nerrors]] %= nclusters
assert_(is_isomorphic(a, b) == (not noniso))
assert_(is_isomorphic(b, a) == (not noniso))
class TestIsValidLinkage(object):
def test_is_valid_linkage_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_linkage_various_size, nrow, ncol, valid
def check_is_valid_linkage_various_size(self, nrow, ncol, valid):
# Tests is_valid_linkage(Z) with linkage matrics of various sizes
Z = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
Z = Z[:nrow, :ncol]
assert_(is_valid_linkage(Z) == valid)
if not valid:
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_int_type(self):
# Tests is_valid_linkage(Z) with integer type.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.int)
assert_(is_valid_linkage(Z) == False)
assert_raises(TypeError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_empty(self):
# Tests is_valid_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(is_valid_linkage(Z) == True)
def test_is_valid_linkage_4_and_up_neg_index_left(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (left).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,0] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_index_right(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (right).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,1] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_dist(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative distances.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,2] = -0.5
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_counts(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,3] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
class TestIsValidInconsistent(object):
def test_is_valid_im_int_type(self):
# Tests is_valid_im(R) with integer type.
R = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.int)
assert_(is_valid_im(R) == False)
assert_raises(TypeError, is_valid_im, R, throw=True)
def test_is_valid_im_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_im_various_size, nrow, ncol, valid
def check_is_valid_im_various_size(self, nrow, ncol, valid):
# Tests is_valid_im(R) with linkage matrics of various sizes
R = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
R = R[:nrow, :ncol]
assert_(is_valid_im(R) == valid)
if not valid:
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_empty(self):
# Tests is_valid_im(R) with empty inconsistency matrix.
R = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
assert_(is_valid_im(R) == True)
def test_is_valid_im_4_and_up_neg_index_left(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height means.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,0] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_index_right(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height standard deviations.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,1] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_dist(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,2] = -0.5
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
class TestNumObsLinkage(TestCase):
def test_num_obs_linkage_empty(self):
# Tests num_obs_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, num_obs_linkage, Z)
def test_num_obs_linkage_1x4(self):
# Tests num_obs_linkage(Z) on linkage over 2 observations.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 2)
def test_num_obs_linkage_2x4(self):
# Tests num_obs_linkage(Z) on linkage over 3 observations.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 3)
def test_num_obs_linkage_4_and_up(self):
# Tests num_obs_linkage(Z) on linkage on observation sets between sizes
# 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_equal(num_obs_linkage(Z), i)
class TestLeavesList(object):
def test_leaves_list_1x4(self):
# Tests leaves_list(Z) on a 1x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1])
def test_leaves_list_2x4(self):
# Tests leaves_list(Z) on a 2x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1, 2])
def test_leaves_list_Q(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward']:
yield self.check_leaves_list_Q, method
def check_leaves_list_Q(self, method):
# Tests leaves_list(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
node = to_tree(Z)
assert_equal(node.pre_order(), leaves_list(Z))
def test_Q_subtree_pre_order(self):
# Tests that pre_order() works when called on sub-trees.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
node = to_tree(Z)
assert_equal(node.pre_order(), (node.get_left().pre_order()
+ node.get_right().pre_order()))
class TestCorrespond(TestCase):
def test_correspond_empty(self):
# Tests correspond(Z, y) with empty linkage and condensed distance matrix.
y = np.zeros((0,))
Z = np.zeros((0,4))
assert_raises(ValueError, correspond, Z, y)
def test_correspond_2_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes.
for i in xrange(2, 4):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
def test_correspond_4_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
list(zip(list(range(3, 5)), list(range(2, 4))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_correspond_4_and_up_2(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
list(zip(list(range(2, 7)), list(range(16, 21))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_num_obs_linkage_multi_matrix(self):
# Tests num_obs_linkage with observation matrices of multiple sizes.
for n in xrange(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
Z = linkage(Y)
assert_equal(num_obs_linkage(Z), n)
class TestIsMonotonic(TestCase):
def test_is_monotonic_empty(self):
# Tests is_monotonic(Z) on an empty linkage.
Z = np.zeros((0, 4))
assert_raises(ValueError, is_monotonic, Z)
def test_is_monotonic_1x4(self):
# Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_T(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_F(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
Z = np.asarray([[0, 1, 0.4, 2],
[2, 3, 0.3, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_T(self):
# Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_3x4_F1(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.2, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F2(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
Z = np.asarray([[0, 1, 0.8, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F3(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.2, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_tdist_linkage1(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Expecting True.
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_tdist_linkage2(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Perturbing. Expecting False.
Z = linkage(hierarchy_test_data.ytdist, 'single')
Z[2,2] = 0.0
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_Q_linkage(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# Q data set. Expecting True.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
assert_equal(is_monotonic(Z), True)
class TestMaxDists(object):
def test_maxdists_empty_linkage(self):
# Tests maxdists(Z) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxdists, Z)
def test_maxdists_one_cluster_linkage(self):
# Tests maxdists(Z) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxdists_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxdists_Q_linkage, method
def check_maxdists_Q_linkage(self, method):
# Tests maxdists(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxInconsts(object):
def test_maxinconsts_empty_linkage(self):
# Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_difrow_linkage(self):
# Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_one_cluster_linkage(self):
# Tests maxinconsts(Z, R) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxinconsts_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxinconsts_Q_linkage, method
def check_maxinconsts_Q_linkage(self, method):
# Tests maxinconsts(Z, R) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxRStat(object):
def test_maxRstat_invalid_index(self):
for i in [3.3, -1, 4]:
yield self.check_maxRstat_invalid_index, i
def check_maxRstat_invalid_index(self, i):
# Tests maxRstat(Z, R, i). Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
if isinstance(i, int):
assert_raises(ValueError, maxRstat, Z, R, i)
else:
assert_raises(TypeError, maxRstat, Z, R, i)
def test_maxRstat_empty_linkage(self):
for i in range(4):
yield self.check_maxRstat_empty_linkage, i
def check_maxRstat_empty_linkage(self, i):
# Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_difrow_linkage(self):
for i in range(4):
yield self.check_maxRstat_difrow_linkage, i
def check_maxRstat_difrow_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_one_cluster_linkage(self):
for i in range(4):
yield self.check_maxRstat_one_cluster_linkage, i
def check_maxRstat_one_cluster_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxRstat_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
for i in range(4):
yield self.check_maxRstat_Q_linkage, method, i
def check_maxRstat_Q_linkage(self, method, i):
# Tests maxRstat(Z, R, i) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestDendrogram(object):
def test_dendrogram_single_linkage_tdist(self):
# Tests dendrogram calculation on single linkage of the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, no_plot=True)
leaves = R["leaves"]
assert_equal(leaves, [2, 5, 1, 0, 3, 4])
def test_valid_orientation(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_raises(ValueError, dendrogram, Z, orientation="foo")
@dec.skipif(not have_matplotlib)
def test_dendrogram_plot(self):
for orientation in ['top', 'bottom', 'left', 'right']:
yield self.check_dendrogram_plot, orientation
def check_dendrogram_plot(self, orientation):
# Tests dendrogram plotting.
Z = linkage(hierarchy_test_data.ytdist, 'single')
expected = {'color_list': ['g', 'b', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 219.0, 219.0, 0.0],
[0.0, 255.0, 255.0, 219.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[45.0, 45.0, 55.0, 55.0],
[35.0, 35.0, 50.0, 50.0],
[25.0, 25.0, 42.5, 42.5],
[10.0, 10.0, 33.75, 33.75]],
'ivl': ['2', '5', '1', '0', '3', '4'],
'leaves': [2, 5, 1, 0, 3, 4]}
fig = plt.figure()
ax = fig.add_subplot(111)
# test that dendrogram accepts ax keyword
R1 = dendrogram(Z, ax=ax, orientation=orientation)
plt.close()
assert_equal(R1, expected)
# test plotting to gca (will import pylab)
R2 = dendrogram(Z, orientation=orientation)
plt.close()
assert_equal(R2, expected)
@dec.skipif(not have_matplotlib)
def test_dendrogram_truncate_mode(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, 2, 'lastp', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['b'],
'dcoord': [[0.0, 295.0, 295.0, 0.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0]],
'ivl': ['(2)', '(4)'],
'leaves': [6, 9]})
R = dendrogram(Z, 2, 'mtica', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['g', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 255.0, 255.0, 0.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[35.0, 35.0, 45.0, 45.0],
[25.0, 25.0, 40.0, 40.0],
[10.0, 10.0, 32.5, 32.5]],
'ivl': ['2', '5', '1', '0', '(2)'],
'leaves': [2, 5, 1, 0, 7]})
def test_dendrogram_colors(self):
# Tests dendrogram plots with alternate colors
Z = linkage(hierarchy_test_data.ytdist, 'single')
set_link_color_palette(['c', 'm', 'y', 'k'])
R = dendrogram(Z, no_plot=True,
above_threshold_color='g', color_threshold=250)
set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k'])
color_list = R['color_list']
assert_equal(color_list, ['c', 'm', 'g', 'g', 'g'])
def calculate_maximum_distances(Z):
# Used for testing correctness of maxdists.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = Z[i, 2]
B[i] = q.max()
return B
def calculate_maximum_inconsistencies(Z, R, k=3):
# Used for testing correctness of maxinconsts.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = R[i, k]
B[i] = q.max()
return B
def test_euclidean_linkage_value_error():
for method in scipy.cluster.hierarchy._cpy_euclid_methods:
assert_raises(ValueError,
linkage, [[1, 1], [1, 1]], method=method, metric='cityblock')
def test_2x2_linkage():
Z1 = linkage([1], method='single', metric='euclidean')
Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean')
assert_allclose(Z1, Z2)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
Titan-C/scikit-learn | examples/text/document_clustering.py | 1 | 8526 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent semantic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
# work-around for Jupyter notebook and IPython console
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
# #############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
# categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
# #############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
Viktor-Evst/luigi | examples/pyspark_wc.py | 56 | 3361 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import luigi
from luigi.s3 import S3Target
from luigi.contrib.spark import SparkSubmitTask, PySparkTask
class InlinePySparkWordCount(PySparkTask):
"""
This task runs a :py:class:`luigi.contrib.spark.PySparkTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.PySparkTask.main`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
# py-packages: numpy, pandas
"""
driver_memory = '2g'
executor_memory = '3g'
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
def main(self, sc, *args):
sc.textFile(self.input().path) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(self.output().path)
class PySparkWordCount(SparkSubmitTask):
"""
This task is the same as :py:class:`InlinePySparkWordCount` above but uses
an external python driver file specified in :py:meth:`app`
It runs a :py:class:`luigi.contrib.spark.SparkSubmitTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.SparkSubmitTask.run`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
deploy-mode: client
"""
driver_memory = '2g'
executor_memory = '3g'
total_executor_cores = luigi.IntParameter(default=100)
name = "PySpark Word Count"
app = 'wordcount.py'
def app_options(self):
# These are passed to the Spark main args in the defined order.
return [self.input().path, self.output().path]
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
'''
// Corresponding example Spark Job, running Word count with Spark's Python API
// This file would have to be saved into wordcount.py
import sys
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext()
sc.textFile(sys.argv[1]) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(sys.argv[2])
'''
| apache-2.0 |
BubuLK/sfepy | probe.py | 5 | 9831 | #!/usr/bin/env python
# 12.01.2007, c
"""
Probe finite element solutions in points defined by various geometrical probes.
Generation mode
---------------
python probe.py [generation options] <input file> <results file>
Probe the data in the results file corresponding to the problem defined in the
input file. The input file options must contain 'gen_probes' and 'probe_hook'
keys, pointing to proper functions accessible from the input file scope.
For each probe returned by `gen_probes()` a data plot figure and a text
file with the data plotted are saved, see the options below.
Generation options
------------------
-o, --auto-dir, --same-dir, -f, --only-names, -s
Postprocessing mode
-------------------
python probe.py [postprocessing options] <probe file> <figure file>
Read a previously probed data from the probe text file, re-plot them,
and integrate them along the probe.
Postprocessing options
----------------------
--postprocess, --radial, --only-names
Notes
-----
For extremely thin hexahedral elements the Newton's iteration for finding the
reference element coordinates might converge to a spurious solution outside
of the element. To obtain some values even in this case, try increasing the
--close-limit option value.
"""
from __future__ import absolute_import
import os
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import numpy as nm
import sfepy
from sfepy.base.base import output, assert_
from sfepy.base.ioutils import edit_filename
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.discrete import Problem
from sfepy.discrete.fem import MeshIO
from sfepy.discrete.probes import write_results, read_results
import six
helps = {
'debug':
'automatically start debugger when an exception is raised',
'filename' :
'basename of output file(s) [default: <basename of input file>]',
'output_format' :
'output figure file format (supported by the matplotlib backend used) '\
'[default: %(default)s]',
'auto_dir' :
'the directory of the results file is determined automatically using the '\
'"output_dir" option in input file options',
'same_dir' :
'store the probe figures/data in the directory of the results file',
'only_names' :
'probe only named data',
'step' :
'probe the given time step',
'close_limit' :
'maximum limit distance of a point from the closest element allowed'
' for extrapolation. [default: %(default)s]',
'postprocess' :
'postprocessing mode',
'radial' :
'assume radial integration',
}
def generate_probes(filename_input, filename_results, options,
conf=None, problem=None, probes=None, labels=None,
probe_hooks=None):
"""
Generate probe figures and data files.
"""
if conf is None:
required, other = get_standard_keywords()
conf = ProblemConf.from_file(filename_input, required, other)
opts = conf.options
if options.auto_dir:
output_dir = opts.get_('output_dir', '.')
filename_results = os.path.join(output_dir, filename_results)
output('results in: %s' % filename_results)
io = MeshIO.any_from_filename(filename_results)
step = options.step if options.step >= 0 else io.read_last_step()
all_data = io.read_data(step)
output('loaded:', list(all_data.keys()))
output('from step:', step)
if options.only_names is None:
data = all_data
else:
data = {}
for key, val in six.iteritems(all_data):
if key in options.only_names:
data[key] = val
if problem is None:
problem = Problem.from_conf(conf,
init_equations=False, init_solvers=False)
if probes is None:
gen_probes = conf.get_function(conf.options.gen_probes)
probes, labels = gen_probes(problem)
if probe_hooks is None:
probe_hooks = {None : conf.get_function(conf.options.probe_hook)}
if options.output_filename_trunk is None:
options.output_filename_trunk = problem.ofn_trunk
filename_template = options.output_filename_trunk \
+ ('_%%d.%s' % options.output_format)
if options.same_dir:
filename_template = os.path.join(os.path.dirname(filename_results),
filename_template)
output_dir = os.path.dirname(filename_results)
for ip, probe in enumerate(probes):
output(ip, probe.name)
probe.set_options(close_limit=options.close_limit)
for key, probe_hook in six.iteritems(probe_hooks):
out = probe_hook(data, probe, labels[ip], problem)
if out is None: continue
if isinstance(out, tuple):
fig, results = out
else:
fig = out
if key is not None:
filename = filename_template % (key, ip)
else:
filename = filename_template % ip
if fig is not None:
if isinstance(fig, dict):
for fig_name, fig_fig in six.iteritems(fig):
fig_filename = edit_filename(filename,
suffix='_' + fig_name)
fig_fig.savefig(fig_filename)
output('figure ->', os.path.normpath(fig_filename))
else:
fig.savefig(filename)
output('figure ->', os.path.normpath(filename))
if results is not None:
txt_filename = edit_filename(filename, new_ext='.txt')
write_results(txt_filename, probe, results)
output('data ->', os.path.normpath(txt_filename))
def integrate_along_line(x, y, is_radial=False):
"""
Integrate numerically (trapezoidal rule) a function :math:`y=y(x)`.
If is_radial is True, multiply each :math:`y` by :math:`4 \pi x^2`.
"""
dx = nm.diff(x)
ay = 0.5 * (y[:-1] + y[1:])
if is_radial:
ax = 0.5 * (x[:-1] + x[1:])
val = 4.0 * nm.pi * nm.sum(ay * dx * (ax**2))
else:
val = nm.sum(ay * dx)
return val
def postprocess(filename_input, filename_results, options):
"""
Postprocess probe data files - replot, integrate data.
"""
from matplotlib import pyplot as plt
header, results = read_results(filename_input,
only_names=options.only_names)
output(header)
fig = plt.figure()
for name, result in six.iteritems(results):
pars, vals = result[:, 0], result[:, 1]
ii = nm.where(nm.isfinite(vals))[0]
# Nans only at the edges.
assert_(nm.diff(ii).sum() == (len(ii)-1))
val = integrate_along_line(pars[ii], vals[ii], options.radial)
label = r'%s: $\int\ %s' % (name, name)
if options.radial:
label += ' (r)'
label += '$ = %.5e'% val
plt.plot(pars, vals, label=label, lw=0.2, marker='+', ms=1)
plt.ylabel('probed data')
plt.xlabel('probe coordinate')
output(label)
plt.legend()
fig.savefig(filename_results)
def main():
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version',
version='%(prog)s ' + sfepy.__version__)
parser.add_argument('--debug',
action='store_true', dest='debug',
default=False, help=helps['debug'])
parser.add_argument('-o', metavar='filename',
action='store', dest='output_filename_trunk',
default=None, help=helps['filename'])
parser.add_argument('--auto-dir',
action='store_true', dest='auto_dir',
default=False, help=helps['auto_dir'])
parser.add_argument('--same-dir',
action='store_true', dest='same_dir',
default=False, help=helps['same_dir'])
parser.add_argument('-f', '--format', metavar='format',
action='store', dest='output_format',
default='png', help=helps['output_format'])
parser.add_argument('--only-names', metavar='list of names',
action='store', dest='only_names',
default=None, help=helps['only_names'])
parser.add_argument('-s', '--step', type=int, metavar='step',
action='store', dest='step',
default=0, help=helps['step'])
parser.add_argument('-c', '--close-limit', type=float, metavar='distance',
action='store', dest='close_limit',
default=0.1, help=helps['close_limit'])
parser.add_argument('-p', '--postprocess',
action='store_true', dest='postprocess',
default=False, help=helps['postprocess'])
parser.add_argument('--radial',
action='store_true', dest='radial',
default=False, help=helps['radial'])
parser.add_argument('filename_in')
parser.add_argument('filename_out')
options = parser.parse_args()
if options.debug:
from sfepy.base.base import debug_on_error; debug_on_error()
filename_input = options.filename_in
filename_results = options.filename_out
if options.only_names is not None:
options.only_names = options.only_names.split(',')
output.prefix = 'probe:'
if options.postprocess:
postprocess(filename_input, filename_results, options)
else:
generate_probes(filename_input, filename_results, options)
if __name__ == '__main__':
main()
| bsd-3-clause |
DSLituiev/scikit-learn | benchmarks/bench_mnist.py | 44 | 6801 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
------------------------------------------------------------
MLP_adam 53.46s 0.11s 0.0224
Nystroem-SVM 112.97s 0.92s 0.0228
MultilayerPerceptron 24.33s 0.14s 0.0287
ExtraTrees 42.99s 0.57s 0.0294
RandomForest 42.70s 0.49s 0.0318
SampledRBF-SVM 135.81s 0.56s 0.0486
LinearRegression-SAG 16.67s 0.06s 0.0824
CART 20.69s 0.02s 0.1219
dummy 0.00s 0.01s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM': make_pipeline(
Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM': make_pipeline(
RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LinearRegression-SAG': LogisticRegression(solver='sag', tol=1e-1, C=1e4),
'MultilayerPerceptron': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
algorithm='sgd', learning_rate_init=0.2, momentum=0.9, verbose=1,
tol=1e-4, random_state=1),
'MLP-adam': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
algorithm='adam', learning_rate_init=0.001, verbose=1,
tol=1e-4, random_state=1)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
xguse/bokeh | examples/plotting/server/elements.py | 42 | 1532 | # The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
import pandas as pd
from bokeh.plotting import figure, show, output_server
from bokeh.sampledata import periodic_table
elements = periodic_table.elements
elements = elements[elements["atomic number"] <= 82]
elements = elements[~pd.isnull(elements["melting point"])]
mass = [float(x.strip("[]")) for x in elements["atomic mass"]]
elements["atomic mass"] = mass
palette = list(reversed([
"#67001f","#b2182b","#d6604d","#f4a582","#fddbc7","#f7f7f7","#d1e5f0","#92c5de","#4393c3","#2166ac","#053061"
]))
melting_points = elements["melting point"]
low = min(melting_points)
high= max(melting_points)
melting_point_inds = [int(10*(x-low)/(high-low)) for x in melting_points] #gives items in colors a value from 0-10
meltingpointcolors = [palette[i] for i in melting_point_inds]
output_server("elements")
TOOLS = "pan,wheel_zoom,box_zoom,reset,resize,save"
p = figure(tools=TOOLS, toolbar_location="left", plot_width=1200)
p.title = "Density vs Atomic Weight of Elements (colored by melting point)"
p.background_fill= "#cccccc"
p.circle(elements["atomic mass"], elements["density"], size=12,
color=meltingpointcolors, line_color="black", fill_alpha=0.8)
p.text(elements["atomic mass"], elements["density"]+0.3,
text=elements["symbol"], text_color="#333333",
text_align="center", text_font_size="10pt")
p.xaxis.axis_label="atomic weight (amu)"
p.yaxis.axis_label="density (g/cm^3)"
p.grid.grid_line_color="white"
show(p)
| bsd-3-clause |
dsullivan7/scikit-learn | sklearn/decomposition/tests/test_pca.py | 14 | 11028 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
yyjiang/scikit-learn | sklearn/feature_extraction/dict_vectorizer.py | 234 | 12267 | # Authors: Lars Buitinck
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from array import array
from collections import Mapping
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..utils import check_array, tosequence
from ..utils.fixes import frombuffer_empty
def _tosequence(X):
"""Turn X into a sequence or ndarray, avoiding a copy if possible."""
if isinstance(X, Mapping): # single sample
return [X]
else:
return tosequence(X)
class DictVectorizer(BaseEstimator, TransformerMixin):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : callable, optional
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator: string, optional
Separator string used when constructing new features for one-hot
coding.
sparse: boolean, optional.
Whether transform should produce scipy.sparse matrices.
True by default.
sort: boolean, optional.
Whether ``feature_names_`` and ``vocabulary_`` should be sorted when fitting.
True by default.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[ 2., 0., 1.],
[ 0., 1., 3.]])
>>> v.inverse_transform(X) == \
[{'bar': 2.0, 'foo': 1.0}, {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[ 0., 0., 4.]])
See also
--------
FeatureHasher : performs vectorization using only a hash function.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, dtype=np.float64, separator="=", sparse=True,
sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
self
"""
feature_names = []
vocab = {}
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
if self.sort:
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report")
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = array("i", [0])
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
if f in vocab:
indices.append(vocab[f])
values.append(dtype(v))
else:
if fitting:
feature_names.append(f)
vocab[f] = len(vocab)
indices.append(vocab[f])
values.append(dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = frombuffer_empty(indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix((values, indices, indptr),
shape=shape, dtype=dtype)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Sample matrix.
dict_type : callable, optional
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
D : list of dict_type objects, length = n_samples
Feature mappings for the samples in X.
"""
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=['csr', 'csc'])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in xrange(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X, y=None):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
y : (ignored)
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
if self.sparse:
return self._transform(X, fitting=False)
else:
dtype = self.dtype
vocab = self.vocabulary_
X = _tosequence(X)
Xa = np.zeros((len(X), len(vocab)), dtype=dtype)
for i, x in enumerate(X):
for f, v in six.iteritems(x):
if isinstance(v, six.string_types):
f = "%s%s%s" % (f, self.separator, v)
v = 1
try:
Xa[i, vocab[f]] = dtype(v)
except KeyError:
pass
return Xa
def get_feature_names(self):
"""Returns a list of feature names, ordered by their indices.
If one-of-K coding is applied to categorical features, this will
include the constructed feature names but not the original ones.
"""
return self.feature_names_
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : boolean, optional
Whether support is a list of indices.
Returns
-------
self
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names()
['bar', 'baz', 'foo']
>>> v.restrict(support.get_support()) # doctest: +ELLIPSIS
DictVectorizer(dtype=..., separator='=', sort=True,
sparse=True)
>>> v.get_feature_names()
['bar', 'foo']
"""
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [f for f, i in sorted(six.iteritems(new_vocab),
key=itemgetter(1))]
return self
| bsd-3-clause |
buenrostrolab/proatac | proatac/bin/python/py3_makeVvec.py | 1 | 4227 | #!/usr/bin/env python
# Author: Jason Buenrostro, Stanford University
# modified from plotV_vC.py (Alicia)
# Will make a V-plot from bed regions
##### IMPORT MODULES #####
# import necessary for python
import os
import sys
import numpy as np
import pysam
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from multiprocessing import Pool
from optparse import OptionParser
import random
#### OPTIONS ####
# read options from command line
opts = OptionParser()
usage = "usage: %prog [options] [inputs]"
opts = OptionParser(usage=usage)
opts.add_option("-a", help="<Reads> Accepts sorted BAM file")
opts.add_option("-b", help="<Bed> Accepts bed file")
opts.add_option("-o",help="OutputFile")
opts.add_option("-e",default="1000", help="number of bases to extend to each side, default=1000")
opts.add_option("-p",default="center", help="options:center,ends, default=center")
opts.add_option("-q",help = "Outputplot")
opts.add_option("-c",default="4",help="number of threads to use, default=20")
opts.add_option("-s",default='4',help="column in which strand information is located (1 being first), default=4")
opts.add_option("--window",default='20',help="window size for ploting")
options, arguments = opts.parse_args()
# return usage information if no argvs given
if len(sys.argv)==1:
os.system(sys.argv[0]+" --help")
sys.exit()
##### DEFINE FUNCTIONS #####
# assign mat
def asn_mat(val,mat,s_int,e_int,t,i,weight):
if float(val)>=s_int and float(val)<e_int-1 and t<rows: # -1 correct?
base = val-s_int
if len(p1_ints[0]) == 3:
mat[t][base] += weight
elif p1_ints[i][int(options.s)-1] == "-":
mat[t][len(mat[0])-base-1] += weight
else:
mat[t][base] += weight
return mat
#compute Vplot Matrix for a particular chunk
def sub_Mat(start):
# initialize data matrix
mat = np.zeros([rows,cols])
# loop through the intervals and get relevent info
bamfile = pysam.Samfile(options.a, "rb")
end=min(start+chunksize,len(p1_ints))
for i in range(start,end):
# get interval as num
center = int(p1_ints[i][1])+(int(p1_ints[i][2])-int(p1_ints[i][1]))//2
s_int=center-int(options.e)
e_int=center+int(options.e)
# loop through rds
for p2_rds in bamfile.fetch(str(p1_ints[i][0]), max(0,s_int-2000), e_int+2000):
#check mapping quality
if p2_rds.mapq<30:# or p2_rds.is_proper_pair==False:
continue
# get read positions
if p2_rds.is_reverse:
continue
else:
l_pos = p2_rds.pos+4
# calculate center point
ilen = abs(p2_rds.tlen)-9
#ilen = 1
r_pos=l_pos+ilen
c_pos=l_pos+ilen//2
if ilen%2==1 and options.p=='center':
mat=asn_mat(c_pos,mat,s_int,e_int,ilen,i,0.5)
mat=asn_mat(c_pos+1,mat,s_int,e_int,ilen,i,0.5)
elif ilen%2!=1 and options.p=='center':
mat=asn_mat(c_pos,mat,s_int,e_int,ilen,i,1)
# save ends or read centers to v-plot
elif options.p == 'ends':
mat = asn_mat(l_pos,mat,s_int,e_int,ilen,i,1)
mat = asn_mat(r_pos,mat,s_int,e_int,ilen,i,1)
else:
sys.exit('Error, check parameters')
return mat
##### INPUTS AND OUTPUTS #####
# get intervals
p1_ints = np.loadtxt(options.b, dtype=bytes).astype(str)
##### SCRIPT #####
# open and read BAM file
# determine number of rows and columns for matrix
rows = 1000
cols = int(options.e)*2
#cols = int(p1_ints[0][2])-int(p1_ints[0][1])+int(options.e)*2
# split bedfile into chunks
maxi=len(p1_ints)
chunksize=maxi//int(options.c)
starts=range(0,int(chunksize)*int(options.c)-1,int(chunksize))
# parallel processed computation of matrix for each chunk
if __name__ == "__main__":
pool = Pool(processes=int(options.c))
sub_mats=pool.map(sub_Mat, starts, 1)
# sum up matrices for each chunk into matrix for all
mat = np.zeros([rows,cols])
for i in range(len(starts)):
mat=mat+sub_mats[i]
mat = np.sum(mat,0)
np.savetxt(options.o, np.column_stack((np.arange(-2000,2000),np.array(mat))),delimiter=',',fmt='%s')
fig=plt.figure(figsize=(8.0, 5.0))
xran=min(500,int(options.e))
yran=min(500,rows)
plt.plot(mat/np.mean(mat[1:200]),'k.')
plt.plot(np.convolve(mat,np.ones(int(options.window)),'same')/int(options.window)/np.mean(mat[1:200]),'r')
plt.xlabel('Position to TSS')
plt.ylabel('Insertions')
fig.savefig(options.q)
plt.close(fig) | mit |
davidgbe/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 349 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/matplotlib/finance.py | 10 | 42914 | """
A collection of functions for collecting, analyzing and plotting
financial data.
This module is deprecated in 2.0 and has been moved to a module called
`mpl_finance`.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
import contextlib
import os
import warnings
from six.moves.urllib.request import urlopen
import datetime
import numpy as np
from matplotlib import colors as mcolors, verbose, get_cachedir
from matplotlib.dates import date2num
from matplotlib.cbook import iterable, mkdirs, warn_deprecated
from matplotlib.collections import LineCollection, PolyCollection
from matplotlib.lines import Line2D, TICKLEFT, TICKRIGHT
from matplotlib.patches import Rectangle
from matplotlib.transforms import Affine2D
warn_deprecated(
since=2.0,
message=("The finance module has been deprecated in mpl 2.0 and will "
"be removed in mpl 2.2. Please use the module mpl_finance "
"instead."))
if six.PY3:
import hashlib
def md5(x):
return hashlib.md5(x.encode())
else:
from hashlib import md5
cachedir = get_cachedir()
# cachedir will be None if there is no writable directory.
if cachedir is not None:
cachedir = os.path.join(cachedir, 'finance.cache')
else:
# Should only happen in a restricted environment (such as Google App
# Engine). Deal with this gracefully by not caching finance data.
cachedir = None
stock_dt_ohlc = np.dtype([
(str('date'), object),
(str('year'), np.int16),
(str('month'), np.int8),
(str('day'), np.int8),
(str('d'), np.float), # mpl datenum
(str('open'), np.float),
(str('high'), np.float),
(str('low'), np.float),
(str('close'), np.float),
(str('volume'), np.float),
(str('aclose'), np.float)])
stock_dt_ochl = np.dtype(
[(str('date'), object),
(str('year'), np.int16),
(str('month'), np.int8),
(str('day'), np.int8),
(str('d'), np.float), # mpl datenum
(str('open'), np.float),
(str('close'), np.float),
(str('high'), np.float),
(str('low'), np.float),
(str('volume'), np.float),
(str('aclose'), np.float)])
def parse_yahoo_historical_ochl(fh, adjusted=True, asobject=False):
"""Parse the historical data in file handle fh from yahoo finance.
Parameters
----------
adjusted : bool
If True (default) replace open, close, high, low prices with
their adjusted values. The adjustment is by a scale factor, S =
adjusted_close/close. Adjusted prices are actual prices
multiplied by S.
Volume is not adjusted as it is already backward split adjusted
by Yahoo. If you want to compute dollars traded, multiply volume
by the adjusted close, regardless of whether you choose adjusted
= True|False.
asobject : bool or None
If False (default for compatibility with earlier versions)
return a list of tuples containing
d, open, close, high, low, volume
If None (preferred alternative to False), return
a 2-D ndarray corresponding to the list of tuples.
Otherwise return a numpy recarray with
date, year, month, day, d, open, close, high, low,
volume, adjusted_close
where d is a floating poing representation of date,
as returned by date2num, and date is a python standard
library datetime.date instance.
The name of this kwarg is a historical artifact. Formerly,
True returned a cbook Bunch
holding 1-D ndarrays. The behavior of a numpy recarray is
very similar to the Bunch.
"""
return _parse_yahoo_historical(fh, adjusted=adjusted, asobject=asobject,
ochl=True)
def parse_yahoo_historical_ohlc(fh, adjusted=True, asobject=False):
"""Parse the historical data in file handle fh from yahoo finance.
Parameters
----------
adjusted : bool
If True (default) replace open, high, low, close prices with
their adjusted values. The adjustment is by a scale factor, S =
adjusted_close/close. Adjusted prices are actual prices
multiplied by S.
Volume is not adjusted as it is already backward split adjusted
by Yahoo. If you want to compute dollars traded, multiply volume
by the adjusted close, regardless of whether you choose adjusted
= True|False.
asobject : bool or None
If False (default for compatibility with earlier versions)
return a list of tuples containing
d, open, high, low, close, volume
If None (preferred alternative to False), return
a 2-D ndarray corresponding to the list of tuples.
Otherwise return a numpy recarray with
date, year, month, day, d, open, high, low, close,
volume, adjusted_close
where d is a floating poing representation of date,
as returned by date2num, and date is a python standard
library datetime.date instance.
The name of this kwarg is a historical artifact. Formerly,
True returned a cbook Bunch
holding 1-D ndarrays. The behavior of a numpy recarray is
very similar to the Bunch.
"""
return _parse_yahoo_historical(fh, adjusted=adjusted, asobject=asobject,
ochl=False)
def _parse_yahoo_historical(fh, adjusted=True, asobject=False,
ochl=True):
"""Parse the historical data in file handle fh from yahoo finance.
Parameters
----------
adjusted : bool
If True (default) replace open, high, low, close prices with
their adjusted values. The adjustment is by a scale factor, S =
adjusted_close/close. Adjusted prices are actual prices
multiplied by S.
Volume is not adjusted as it is already backward split adjusted
by Yahoo. If you want to compute dollars traded, multiply volume
by the adjusted close, regardless of whether you choose adjusted
= True|False.
asobject : bool or None
If False (default for compatibility with earlier versions)
return a list of tuples containing
d, open, high, low, close, volume
or
d, open, close, high, low, volume
depending on `ochl`
If None (preferred alternative to False), return
a 2-D ndarray corresponding to the list of tuples.
Otherwise return a numpy recarray with
date, year, month, day, d, open, high, low, close,
volume, adjusted_close
where d is a floating poing representation of date,
as returned by date2num, and date is a python standard
library datetime.date instance.
The name of this kwarg is a historical artifact. Formerly,
True returned a cbook Bunch
holding 1-D ndarrays. The behavior of a numpy recarray is
very similar to the Bunch.
ochl : bool
Selects between ochl and ohlc ordering.
Defaults to True to preserve original functionality.
"""
if ochl:
stock_dt = stock_dt_ochl
else:
stock_dt = stock_dt_ohlc
results = []
# datefmt = '%Y-%m-%d'
fh.readline() # discard heading
for line in fh:
vals = line.split(',')
if len(vals) != 7:
continue # add warning?
datestr = vals[0]
#dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
# Using strptime doubles the runtime. With the present
# format, we don't need it.
dt = datetime.date(*[int(val) for val in datestr.split('-')])
dnum = date2num(dt)
open, high, low, close = [float(val) for val in vals[1:5]]
volume = float(vals[5])
aclose = float(vals[6])
if ochl:
results.append((dt, dt.year, dt.month, dt.day,
dnum, open, close, high, low, volume, aclose))
else:
results.append((dt, dt.year, dt.month, dt.day,
dnum, open, high, low, close, volume, aclose))
results.reverse()
d = np.array(results, dtype=stock_dt)
if adjusted:
scale = d['aclose'] / d['close']
scale[np.isinf(scale)] = np.nan
d['open'] *= scale
d['high'] *= scale
d['low'] *= scale
d['close'] *= scale
if not asobject:
# 2-D sequence; formerly list of tuples, now ndarray
ret = np.zeros((len(d), 6), dtype=np.float)
ret[:, 0] = d['d']
if ochl:
ret[:, 1] = d['open']
ret[:, 2] = d['close']
ret[:, 3] = d['high']
ret[:, 4] = d['low']
else:
ret[:, 1] = d['open']
ret[:, 2] = d['high']
ret[:, 3] = d['low']
ret[:, 4] = d['close']
ret[:, 5] = d['volume']
if asobject is None:
return ret
return [tuple(row) for row in ret]
return d.view(np.recarray) # Close enough to former Bunch return
def fetch_historical_yahoo(ticker, date1, date2, cachename=None,
dividends=False):
"""
Fetch historical data for ticker between date1 and date2. date1 and
date2 are date or datetime instances, or (year, month, day) sequences.
Parameters
----------
ticker : str
ticker
date1 : sequence of form (year, month, day), `datetime`, or `date`
start date
date2 : sequence of form (year, month, day), `datetime`, or `date`
end date
cachename : str
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
dividends : bool
set dividends=True to return dividends instead of price data. With
this option set, parse functions will not work
Returns
-------
file_handle : file handle
a file handle is returned
Examples
--------
>>> fh = fetch_historical_yahoo('^GSPC', (2000, 1, 1), (2001, 12, 31))
"""
ticker = ticker.upper()
if iterable(date1):
d1 = (date1[1] - 1, date1[2], date1[0])
else:
d1 = (date1.month - 1, date1.day, date1.year)
if iterable(date2):
d2 = (date2[1] - 1, date2[2], date2[0])
else:
d2 = (date2.month - 1, date2.day, date2.year)
if dividends:
g = 'v'
verbose.report('Retrieving dividends instead of prices')
else:
g = 'd'
urlFmt = ('http://ichart.yahoo.com/table.csv?a=%d&b=%d&' +
'c=%d&d=%d&e=%d&f=%d&s=%s&y=0&g=%s&ignore=.csv')
url = urlFmt % (d1[0], d1[1], d1[2],
d2[0], d2[1], d2[2], ticker, g)
# Cache the finance data if cachename is supplied, or there is a writable
# cache directory.
if cachename is None and cachedir is not None:
cachename = os.path.join(cachedir, md5(url).hexdigest())
if cachename is not None:
if os.path.exists(cachename):
fh = open(cachename)
verbose.report('Using cachefile %s for '
'%s' % (cachename, ticker))
else:
mkdirs(os.path.abspath(os.path.dirname(cachename)))
with contextlib.closing(urlopen(url)) as urlfh:
with open(cachename, 'wb') as fh:
fh.write(urlfh.read())
verbose.report('Saved %s data to cache file '
'%s' % (ticker, cachename))
fh = open(cachename, 'r')
return fh
else:
return urlopen(url)
def quotes_historical_yahoo_ochl(ticker, date1, date2, asobject=False,
adjusted=True, cachename=None):
""" Get historical data for ticker between date1 and date2.
See :func:`parse_yahoo_historical` for explanation of output formats
and the *asobject* and *adjusted* kwargs.
Parameters
----------
ticker : str
stock ticker
date1 : sequence of form (year, month, day), `datetime`, or `date`
start date
date2 : sequence of form (year, month, day), `datetime`, or `date`
end date
cachename : str or `None`
is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
Examples
--------
>>> sp = f.quotes_historical_yahoo_ochl('^GSPC', d1, d2,
asobject=True, adjusted=True)
>>> returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
>>> [n,bins,patches] = hist(returns, 100)
>>> mu = mean(returns)
>>> sigma = std(returns)
>>> x = normpdf(bins, mu, sigma)
>>> plot(bins, x, color='red', lw=2)
"""
return _quotes_historical_yahoo(ticker, date1, date2, asobject=asobject,
adjusted=adjusted, cachename=cachename,
ochl=True)
def quotes_historical_yahoo_ohlc(ticker, date1, date2, asobject=False,
adjusted=True, cachename=None):
""" Get historical data for ticker between date1 and date2.
See :func:`parse_yahoo_historical` for explanation of output formats
and the *asobject* and *adjusted* kwargs.
Parameters
----------
ticker : str
stock ticker
date1 : sequence of form (year, month, day), `datetime`, or `date`
start date
date2 : sequence of form (year, month, day), `datetime`, or `date`
end date
cachename : str or `None`
is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
Examples
--------
>>> sp = f.quotes_historical_yahoo_ohlc('^GSPC', d1, d2,
asobject=True, adjusted=True)
>>> returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
>>> [n,bins,patches] = hist(returns, 100)
>>> mu = mean(returns)
>>> sigma = std(returns)
>>> x = normpdf(bins, mu, sigma)
>>> plot(bins, x, color='red', lw=2)
"""
return _quotes_historical_yahoo(ticker, date1, date2, asobject=asobject,
adjusted=adjusted, cachename=cachename,
ochl=False)
def _quotes_historical_yahoo(ticker, date1, date2, asobject=False,
adjusted=True, cachename=None,
ochl=True):
""" Get historical data for ticker between date1 and date2.
See :func:`parse_yahoo_historical` for explanation of output formats
and the *asobject* and *adjusted* kwargs.
Parameters
----------
ticker : str
stock ticker
date1 : sequence of form (year, month, day), `datetime`, or `date`
start date
date2 : sequence of form (year, month, day), `datetime`, or `date`
end date
cachename : str or `None`
is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
ochl: bool
temporary argument to select between ochl and ohlc ordering
Examples
--------
>>> sp = f.quotes_historical_yahoo('^GSPC', d1, d2,
asobject=True, adjusted=True)
>>> returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
>>> [n,bins,patches] = hist(returns, 100)
>>> mu = mean(returns)
>>> sigma = std(returns)
>>> x = normpdf(bins, mu, sigma)
>>> plot(bins, x, color='red', lw=2)
"""
# Maybe enable a warning later as part of a slow transition
# to using None instead of False.
#if asobject is False:
# warnings.warn("Recommend changing to asobject=None")
fh = fetch_historical_yahoo(ticker, date1, date2, cachename)
try:
ret = _parse_yahoo_historical(fh, asobject=asobject,
adjusted=adjusted, ochl=ochl)
if len(ret) == 0:
return None
except IOError as exc:
warnings.warn('fh failure\n%s' % (exc.strerror[1]))
return None
return ret
def plot_day_summary_oclh(ax, quotes, ticksize=3,
colorup='k', colordown='r',
):
"""Plots day summary
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
Parameters
----------
ax : `Axes`
an `Axes` instance to plot to
quotes : sequence of (time, open, close, high, low, ...) sequences
data to plot. time must be in float date format - see date2num
ticksize : int
open/close tick marker in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
lines : list
list of tuples of the lines added (one tuple per quote)
"""
return _plot_day_summary(ax, quotes, ticksize=ticksize,
colorup=colorup, colordown=colordown,
ochl=True)
def plot_day_summary_ohlc(ax, quotes, ticksize=3,
colorup='k', colordown='r',
):
"""Plots day summary
Represent the time, open, high, low, close as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
Parameters
----------
ax : `Axes`
an `Axes` instance to plot to
quotes : sequence of (time, open, high, low, close, ...) sequences
data to plot. time must be in float date format - see date2num
ticksize : int
open/close tick marker in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
lines : list
list of tuples of the lines added (one tuple per quote)
"""
return _plot_day_summary(ax, quotes, ticksize=ticksize,
colorup=colorup, colordown=colordown,
ochl=False)
def _plot_day_summary(ax, quotes, ticksize=3,
colorup='k', colordown='r',
ochl=True
):
"""Plots day summary
Represent the time, open, high, low, close as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
Parameters
----------
ax : `Axes`
an `Axes` instance to plot to
quotes : sequence of quote sequences
data to plot. time must be in float date format - see date2num
(time, open, high, low, close, ...) vs
(time, open, close, high, low, ...)
set by `ochl`
ticksize : int
open/close tick marker in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
ochl: bool
argument to select between ochl and ohlc ordering of quotes
Returns
-------
lines : list
list of tuples of the lines added (one tuple per quote)
"""
# unfortunately this has a different return type than plot_day_summary2_*
lines = []
for q in quotes:
if ochl:
t, open, close, high, low = q[:5]
else:
t, open, high, low, close = q[:5]
if close >= open:
color = colorup
else:
color = colordown
vline = Line2D(xdata=(t, t), ydata=(low, high),
color=color,
antialiased=False, # no need to antialias vert lines
)
oline = Line2D(xdata=(t, t), ydata=(open, open),
color=color,
antialiased=False,
marker=TICKLEFT,
markersize=ticksize,
)
cline = Line2D(xdata=(t, t), ydata=(close, close),
color=color,
antialiased=False,
markersize=ticksize,
marker=TICKRIGHT)
lines.extend((vline, oline, cline))
ax.add_line(vline)
ax.add_line(oline)
ax.add_line(cline)
ax.autoscale_view()
return lines
def candlestick_ochl(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
Plot the time, open, close, high, low as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
Parameters
----------
ax : `Axes`
an Axes instance to plot to
quotes : sequence of (time, open, close, high, low, ...) sequences
As long as the first 5 elements are these values,
the record can be as long as you want (e.g., it may store volume).
time must be in float days format - see date2num
width : float
fraction of a day for the rectangle width
colorup : color
the color of the rectangle where close >= open
colordown : color
the color of the rectangle where close < open
alpha : float
the rectangle alpha level
Returns
-------
ret : tuple
returns (lines, patches) where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
return _candlestick(ax, quotes, width=width, colorup=colorup,
colordown=colordown,
alpha=alpha, ochl=True)
def candlestick_ohlc(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
Plot the time, open, high, low, close as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
Parameters
----------
ax : `Axes`
an Axes instance to plot to
quotes : sequence of (time, open, high, low, close, ...) sequences
As long as the first 5 elements are these values,
the record can be as long as you want (e.g., it may store volume).
time must be in float days format - see date2num
width : float
fraction of a day for the rectangle width
colorup : color
the color of the rectangle where close >= open
colordown : color
the color of the rectangle where close < open
alpha : float
the rectangle alpha level
Returns
-------
ret : tuple
returns (lines, patches) where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
return _candlestick(ax, quotes, width=width, colorup=colorup,
colordown=colordown,
alpha=alpha, ochl=False)
def _candlestick(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0, ochl=True):
"""
Plot the time, open, high, low, close as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
Parameters
----------
ax : `Axes`
an Axes instance to plot to
quotes : sequence of quote sequences
data to plot. time must be in float date format - see date2num
(time, open, high, low, close, ...) vs
(time, open, close, high, low, ...)
set by `ochl`
width : float
fraction of a day for the rectangle width
colorup : color
the color of the rectangle where close >= open
colordown : color
the color of the rectangle where close < open
alpha : float
the rectangle alpha level
ochl: bool
argument to select between ochl and ohlc ordering of quotes
Returns
-------
ret : tuple
returns (lines, patches) where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
OFFSET = width / 2.0
lines = []
patches = []
for q in quotes:
if ochl:
t, open, close, high, low = q[:5]
else:
t, open, high, low, close = q[:5]
if close >= open:
color = colorup
lower = open
height = close - open
else:
color = colordown
lower = close
height = open - close
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color=color,
linewidth=0.5,
antialiased=True,
)
rect = Rectangle(
xy=(t - OFFSET, lower),
width=width,
height=height,
facecolor=color,
edgecolor=color,
)
rect.set_alpha(alpha)
lines.append(vline)
patches.append(rect)
ax.add_line(vline)
ax.add_patch(rect)
ax.autoscale_view()
return lines, patches
def _check_input(opens, closes, highs, lows, miss=-1):
"""Checks that *opens*, *highs*, *lows* and *closes* have the same length.
NOTE: this code assumes if any value open, high, low, close is
missing (*-1*) they all are missing
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
closes : sequence
sequence of closing values
miss : int
identifier of the missing data
Raises
------
ValueError
if the input sequences don't have the same length
"""
def _missing(sequence, miss=-1):
"""Returns the index in *sequence* of the missing data, identified by
*miss*
Parameters
----------
sequence :
sequence to evaluate
miss :
identifier of the missing data
Returns
-------
where_miss: numpy.ndarray
indices of the missing data
"""
return np.where(np.array(sequence) == miss)[0]
same_length = len(opens) == len(highs) == len(lows) == len(closes)
_missopens = _missing(opens)
same_missing = ((_missopens == _missing(highs)).all() and
(_missopens == _missing(lows)).all() and
(_missopens == _missing(closes)).all())
if not (same_length and same_missing):
msg = ("*opens*, *highs*, *lows* and *closes* must have the same"
" length. NOTE: this code assumes if any value open, high,"
" low, close is missing (*-1*) they all must be missing.")
raise ValueError(msg)
def plot_day_summary2_ochl(ax, opens, closes, highs, lows, ticksize=4,
colorup='k', colordown='r',
):
"""Represent the time, open, close, high, low, as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
closes : sequence
sequence of closing values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
ret : list
a list of lines added to the axes
"""
return plot_day_summary2_ohlc(ax, opens, highs, lows, closes, ticksize,
colorup, colordown)
def plot_day_summary2_ohlc(ax, opens, highs, lows, closes, ticksize=4,
colorup='k', colordown='r',
):
"""Represent the time, open, high, low, close as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
*opens*, *highs*, *lows* and *closes* must have the same length.
NOTE: this code assumes if any value open, high, low, close is
missing (*-1*) they all are missing
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
closes : sequence
sequence of closing values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
ret : list
a list of lines added to the axes
"""
_check_input(opens, highs, lows, closes)
rangeSegments = [((i, low), (i, high)) for i, low, high in
zip(xrange(len(lows)), lows, highs) if low != -1]
# the ticks will be from ticksize to 0 in points at the origin and
# we'll translate these to the i, close location
openSegments = [((-ticksize, 0), (0, 0))]
# the ticks will be from 0 to ticksize in points at the origin and
# we'll translate these to the i, close location
closeSegments = [((0, 0), (ticksize, 0))]
offsetsOpen = [(i, open) for i, open in
zip(xrange(len(opens)), opens) if open != -1]
offsetsClose = [(i, close) for i, close in
zip(xrange(len(closes)), closes) if close != -1]
scale = ax.figure.dpi * (1.0 / 72.0)
tickTransform = Affine2D().scale(scale, 0.0)
colorup = mcolors.to_rgba(colorup)
colordown = mcolors.to_rgba(colordown)
colord = {True: colorup, False: colordown}
colors = [colord[open < close] for open, close in
zip(opens, closes) if open != -1 and close != -1]
useAA = 0, # use tuple here
lw = 1, # and here
rangeCollection = LineCollection(rangeSegments,
colors=colors,
linewidths=lw,
antialiaseds=useAA,
)
openCollection = LineCollection(openSegments,
colors=colors,
antialiaseds=useAA,
linewidths=lw,
offsets=offsetsOpen,
transOffset=ax.transData,
)
openCollection.set_transform(tickTransform)
closeCollection = LineCollection(closeSegments,
colors=colors,
antialiaseds=useAA,
linewidths=lw,
offsets=offsetsClose,
transOffset=ax.transData,
)
closeCollection.set_transform(tickTransform)
minpy, maxx = (0, len(rangeSegments))
miny = min([low for low in lows if low != -1])
maxy = max([high for high in highs if high != -1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(rangeCollection)
ax.add_collection(openCollection)
ax.add_collection(closeCollection)
return rangeCollection, openCollection, closeCollection
def candlestick2_ochl(ax, opens, closes, highs, lows, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""Represent the open, close as a bar line and high low range as a
vertical line.
Preserves the original argument order.
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
closes : sequence
sequence of closing values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : tuple
(lineCollection, barCollection)
"""
candlestick2_ohlc(ax, opens, highs, lows, closes, width=width,
colorup=colorup, colordown=colordown,
alpha=alpha)
def candlestick2_ohlc(ax, opens, highs, lows, closes, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""Represent the open, close as a bar line and high low range as a
vertical line.
NOTE: this code assumes if any value open, low, high, close is
missing they all are missing
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
closes : sequence
sequence of closing values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : tuple
(lineCollection, barCollection)
"""
_check_input(opens, highs, lows, closes)
delta = width / 2.
barVerts = [((i - delta, open),
(i - delta, close),
(i + delta, close),
(i + delta, open))
for i, open, close in zip(xrange(len(opens)), opens, closes)
if open != -1 and close != -1]
rangeSegments = [((i, low), (i, high))
for i, low, high in zip(xrange(len(lows)), lows, highs)
if low != -1]
colorup = mcolors.to_rgba(colorup, alpha)
colordown = mcolors.to_rgba(colordown, alpha)
colord = {True: colorup, False: colordown}
colors = [colord[open < close]
for open, close in zip(opens, closes)
if open != -1 and close != -1]
useAA = 0, # use tuple here
lw = 0.5, # and here
rangeCollection = LineCollection(rangeSegments,
colors=((0, 0, 0, 1), ),
linewidths=lw,
antialiaseds=useAA,
)
barCollection = PolyCollection(barVerts,
facecolors=colors,
edgecolors=((0, 0, 0, 1), ),
antialiaseds=useAA,
linewidths=lw,
)
minx, maxx = 0, len(rangeSegments)
miny = min([low for low in lows if low != -1])
maxy = max([high for high in highs if high != -1])
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(rangeCollection)
ax.add_collection(barCollection)
return rangeCollection, barCollection
def volume_overlay(ax, opens, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""Add a volume overlay to the current axes. The opens and closes
are used to determine the color of the bar. -1 is missing. If a
value is missing on one it must be missing on all
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
a sequence of opens
closes : sequence
a sequence of closes
volumes : sequence
a sequence of volumes
width : int
the bar width in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : `barCollection`
The `barrCollection` added to the axes
"""
colorup = mcolors.to_rgba(colorup, alpha)
colordown = mcolors.to_rgba(colordown, alpha)
colord = {True: colorup, False: colordown}
colors = [colord[open < close]
for open, close in zip(opens, closes)
if open != -1 and close != -1]
delta = width / 2.
bars = [((i - delta, 0), (i - delta, v), (i + delta, v), (i + delta, 0))
for i, v in enumerate(volumes)
if v != -1]
barCollection = PolyCollection(bars,
facecolors=colors,
edgecolors=((0, 0, 0, 1), ),
antialiaseds=(0,),
linewidths=(0.5,),
)
ax.add_collection(barCollection)
corners = (0, 0), (len(bars), max(volumes))
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
return barCollection
def volume_overlay2(ax, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The closes are used to
determine the color of the bar. -1 is missing. If a value is
missing on one it must be missing on all
nb: first point is not displayed - it is used only for choosing the
right color
Parameters
----------
ax : `Axes`
an Axes instance to plot to
closes : sequence
a sequence of closes
volumes : sequence
a sequence of volumes
width : int
the bar width in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : `barCollection`
The `barrCollection` added to the axes
"""
return volume_overlay(ax, closes[:-1], closes[1:], volumes[1:],
colorup, colordown, width, alpha)
def volume_overlay3(ax, quotes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""Add a volume overlay to the current axes. quotes is a list of (d,
open, high, low, close, volume) and close-open is used to
determine the color of the bar
Parameters
----------
ax : `Axes`
an Axes instance to plot to
quotes : sequence of (time, open, high, low, close, ...) sequences
data to plot. time must be in float date format - see date2num
width : int
the bar width in points
colorup : color
the color of the lines where close1 >= close0
colordown : color
the color of the lines where close1 < close0
alpha : float
bar transparency
Returns
-------
ret : `barCollection`
The `barrCollection` added to the axes
"""
colorup = mcolors.to_rgba(colorup, alpha)
colordown = mcolors.to_rgba(colordown, alpha)
colord = {True: colorup, False: colordown}
dates, opens, highs, lows, closes, volumes = list(zip(*quotes))
colors = [colord[close1 >= close0]
for close0, close1 in zip(closes[:-1], closes[1:])
if close0 != -1 and close1 != -1]
colors.insert(0, colord[closes[0] >= opens[0]])
right = width / 2.0
left = -width / 2.0
bars = [((left, 0), (left, volume), (right, volume), (right, 0))
for d, open, high, low, close, volume in quotes]
sx = ax.figure.dpi * (1.0 / 72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx, sy)
dates = [d for d, open, high, low, close, volume in quotes]
offsetsBars = [(d, 0) for d in dates]
useAA = 0, # use tuple here
lw = 0.5, # and here
barCollection = PolyCollection(bars,
facecolors=colors,
edgecolors=((0, 0, 0, 1),),
antialiaseds=useAA,
linewidths=lw,
offsets=offsetsBars,
transOffset=ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (min(dates), max(dates))
miny = 0
maxy = max([volume for d, open, high, low, close, volume in quotes])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
#print 'datalim', ax.dataLim.bounds
#print 'viewlim', ax.viewLim.bounds
ax.add_collection(barCollection)
ax.autoscale_view()
return barCollection
def index_bar(ax, vals,
facecolor='b', edgecolor='l',
width=4, alpha=1.0, ):
"""Add a bar collection graph with height vals (-1 is missing).
Parameters
----------
ax : `Axes`
an Axes instance to plot to
vals : sequence
a sequence of values
facecolor : color
the color of the bar face
edgecolor : color
the color of the bar edges
width : int
the bar width in points
alpha : float
bar transparency
Returns
-------
ret : `barCollection`
The `barrCollection` added to the axes
"""
facecolors = (mcolors.to_rgba(facecolor, alpha),)
edgecolors = (mcolors.to_rgba(edgecolor, alpha),)
right = width / 2.0
left = -width / 2.0
bars = [((left, 0), (left, v), (right, v), (right, 0))
for v in vals if v != -1]
sx = ax.figure.dpi * (1.0 / 72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx, sy)
offsetsBars = [(i, 0) for i, v in enumerate(vals) if v != -1]
barCollection = PolyCollection(bars,
facecolors=facecolors,
edgecolors=edgecolors,
antialiaseds=(0,),
linewidths=(0.5,),
offsets=offsetsBars,
transOffset=ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (0, len(offsetsBars))
miny = 0
maxy = max([v for v in vals if v != -1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
return barCollection
| mit |
sho-87/python-machine-learning | CNN/mw/2_eeg_mw_sd.py | 1 | 11487 | from __future__ import print_function
import os
import time
import numpy as np
import theano
import theano.tensor as T
import lasagne
import matplotlib.pyplot as plt
from tqdm import tqdm
from lasagne.layers import InputLayer, Conv2DLayer, Pool2DLayer
VERBOSE = False
def bootstrap(data, labels, boot_type="downsample"):
print("Bootstrapping data...")
ot_class = 0
mw_class = 1
ot_idx = np.where(labels == ot_class)
mw_idx = np.where(labels == mw_class)
# Get OT examples
ot_data = data[ot_idx]
ot_labels = labels[ot_idx]
print(" - OT (class: {}) | Data: {} | Labels: {}".format(ot_class, ot_data.shape, ot_labels.shape))
# Get MW examples
mw_data = data[mw_idx]
mw_labels = labels[mw_idx]
print(" - MW (class: {}) | Data: {} | Labels: {}".format(mw_class, mw_data.shape, mw_labels.shape))
# Set majority and minority classes
if ot_data.shape[0] > mw_data.shape[0]:
maj_class, maj_data, maj_labels = ot_class, ot_data, ot_labels
min_class, min_data, min_labels = mw_class, mw_data, mw_labels
else:
maj_class, maj_data, maj_labels = mw_class, mw_data, mw_labels
min_class, min_data, min_labels = ot_class, ot_data, ot_labels
print(" - Majority class: {} (N = {}) | Minority class: {} (N = {})".format(maj_class, maj_data.shape[0],
min_class, min_data.shape[0]))
# Upsample minority class
if boot_type == "upsample":
print("Upsampling minority class...")
num_to_boot = maj_data.shape[0] - min_data.shape[0]
print(" - Number to upsample: {}".format(num_to_boot))
bootstrap_idx = np.random.randint(min_data.shape[0], size=num_to_boot)
min_data_boot = min_data[bootstrap_idx]
min_labels_boot = min_labels[bootstrap_idx]
final_data = np.concatenate((data, min_data_boot), axis=0)
final_labels = np.concatenate((labels, min_labels_boot), axis=0)
elif boot_type == "downsample":
print("Downsampling majority class...")
# Resample N = number of minority examples
num_to_boot = min_data.shape[0]
bootstrap_idx = np.random.randint(maj_data.shape[0], size=num_to_boot)
maj_data_boot = maj_data[bootstrap_idx]
maj_labels_boot = maj_labels[bootstrap_idx]
final_data = np.concatenate((maj_data_boot, min_data), axis=0)
final_labels = np.concatenate((maj_labels_boot, min_labels), axis=0)
print("Final class balance: {} ({}) - {} ({})".format(
maj_class, len(np.where(final_labels==maj_class)[0]),
min_class, len(np.where(final_labels==min_class)[0])))
return final_data, final_labels
# Load EEG data
base_dir = os.path.abspath(os.path.join(os.path.join(os.path.dirname(__file__), os.pardir), os.pardir))
data_dir = os.path.join(base_dir, "data")
data = np.load(os.path.join(data_dir, 'all_data_6_2d_full.npy'))
data = data.reshape(-1, 1, 64, 512)
data_labels = np.load(os.path.join(data_dir, 'all_data_6_2d_full_labels.npy'))
data_labels = data_labels[:,1]
# Standardize data per trial
# Significantly improves gradient descent
data = (data - data.mean(axis=(2,3),keepdims=1)) / data.std(axis=(2,3),keepdims=1)
# Up/downsample the data to balance classes
data, data_labels = bootstrap(data, data_labels, "downsample")
# Create train, validation, test sets
indices = np.random.permutation(data.shape[0])
split_train, split_val, split_test = .6, .2, .2
split_train = int(round(data.shape[0]*split_train))
split_val = split_train + int(round(data.shape[0]*split_val))
train_idx = indices[:split_train]
val_idx = indices[split_train:split_val]
test_idx = indices[split_val:]
train_data = data[train_idx,:]
train_labels = data_labels[train_idx]
val_data = data[val_idx,:]
val_labels = data_labels[val_idx]
test_data = data[test_idx,:]
test_labels = data_labels[test_idx]
def build_cnn(input_var=None):
# Input layer, as usual:
l_in = InputLayer(shape=(None, 1, 64, 512), input_var=input_var)
l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8, filter_size = (3,3),
stride = 1, pad = 'same', W = lasagne.init.Normal(std = 0.02),
nonlinearity = lasagne.nonlinearities.very_leaky_rectify)
l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = 2, stride = 2)
# A fully-connected layer
l_fc = lasagne.layers.DenseLayer(
l_pool1,
num_units=512,
nonlinearity=lasagne.nonlinearities.rectify)
l_out = lasagne.layers.DenseLayer(
l_fc,
num_units=2,
nonlinearity=lasagne.nonlinearities.softmax)
return l_out
# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
# tqdm() can be removed if no visual progress bar is needed
for start_idx in tqdm(range(0, len(inputs) - batchsize + 1, batchsize)):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def main(model='cnn', batch_size=500, num_epochs=500):
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
network = build_cnn(input_var)
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# We could add some weight decay as well here, see lasagne.regularization.
train_acc = T.mean(T.eq(T.argmax(prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Create update expressions for training
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.001)
#updates = lasagne.updates.adam(loss, params, learning_rate=0.1)
# Create a loss expression for validation/testing. The crucial difference
# here is that we do a deterministic forward pass through the network,
# disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
# As a bonus, also create an expression for the classification accuracy:
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], [loss, train_acc], updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
training_hist = []
val_hist = []
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
print("Training epoch {}...".format(epoch+1))
train_err = 0
train_acc = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(train_data, train_labels, batch_size, shuffle=True):
inputs, targets = batch
err, acc = train_fn(inputs, targets)
train_err += err
train_acc += acc
train_batches += 1
if VERBOSE:
print("Epoch: {} | Mini-batch: {}/{} | Elapsed time: {:.2f}s".format(
epoch+1,
train_batches,
train_data.shape[0]/batch_size,
time.time()-start_time))
training_hist.append(train_err / train_batches)
# And a full pass over the validation data:
print("Validating epoch...")
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(val_data, val_labels, batch_size, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
val_hist.append(val_err / val_batches)
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" training accuracy:\t\t{:.2f} %".format(
train_acc / train_batches * 100))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# After training, we compute and print the test predictions/error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(test_data, test_labels, batch_size, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
# Plot learning
plt.plot(range(1, num_epochs+1), training_hist, label="Training")
plt.plot(range(1, num_epochs+1), val_hist, label="Validation")
plt.grid(True)
plt.title("Training Curve")
plt.xlim(1, num_epochs+1)
plt.xlabel("Epoch #")
plt.ylabel("Loss")
plt.legend(loc='best')
plt.show()
# Optionally, you could now dump the network weights to a file like this:
# np.savez('model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
# Run the model
main(batch_size=100, num_epochs=50) # 66.56%
| mit |
rahul-c1/scikit-learn | sklearn/externals/joblib/__init__.py | 10 | 4382 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://packages.python.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
__version__ = '0.8.3'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
rishikksh20/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 19 | 40613 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False),
('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_min_impurity_split():
# Test if min_impurity_split of base estimators is set
# Regression test for #8006
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
for GBEstimator in all_estimators:
est = GBEstimator(min_impurity_split=0.1).fit(X, y)
for tree in est.estimators_.flat:
assert_equal(tree.min_impurity_split, 0.1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
assert_array_almost_equal(sparse.predict(X_sparse), dense.predict(X))
assert_array_almost_equal(dense.predict(X_sparse), sparse.predict(X))
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
assert_array_almost_equal(sparse.decision_function(X_sparse),
sparse.decision_function(X))
assert_array_almost_equal(dense.decision_function(X_sparse),
sparse.decision_function(X))
assert_array_almost_equal(
np.array(sparse.staged_decision_function(X_sparse)),
np.array(sparse.staged_decision_function(X)))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
| bsd-3-clause |
jayhetee/auto-sklearn | autosklearn/data/competition_data_manager.py | 5 | 16248 | # Functions performing various input/output operations for the ChaLearn AutoML challenge
# Main contributor: Arthur Pesah, August 2014
# Edits: Isabelle Guyon, October 2014
# ALL INFORMATION, SOFTWARE, DOCUMENTATION, AND DATA ARE PROVIDED "AS-IS".
# ISABELLE GUYON, CHALEARN, AND/OR OTHER ORGANIZERS OR CODE AUTHORS DISCLAIM
# ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ANY PARTICULAR PURPOSE, AND THE
# WARRANTY OF NON-INFRIGEMENT OF ANY THIRD PARTY'S INTELLECTUAL PROPERTY RIGHTS.
# IN NO EVENT SHALL ISABELLE GUYON AND/OR OTHER ORGANIZERS BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF SOFTWARE, DOCUMENTS, MATERIALS,
# PUBLICATIONS, OR INFORMATION MADE AVAILABLE FOR THE CHALLENGE.
import numpy as np
import os
import re
import time
import scipy.sparse
try:
import autosklearn.data.competition_c_functions as competition_c_functions
competition_c_functions_is_there = True
except:
competition_c_functions_is_there = False
pass
from autosklearn.data import util as data_util
from autosklearn.data.data_manager import DataManager
from autosklearn.constants import *
def data_dense(filename, feat_type=None, verbose=False):
# The 2nd parameter makes possible a using of the 3 functions of data
# reading (data, data_sparse, data_binary_sparse) without changing
# parameters
# This code is based on scipy.io.arff.arff_load
r_comment = re.compile(r'^%')
# Match an empty line
r_empty = re.compile(r'^\s+$')
descr = [(str(i), np.float32) for i in range(len(feat_type))]
def generator(row_iter, delim=','):
# Copied from scipy.io.arff.arffread
raw = next(row_iter)
while r_empty.match(raw) or r_comment.match(raw):
raw = next(row_iter)
# 'compiling' the range since it does not change
# Note, I have already tried zipping the converters and
# row elements and got slightly worse performance.
elems = list(range(len(feat_type)))
row = raw.split(delim)
# yield tuple([np.float64(row[i]) for i in elems])
yield tuple([row[i] for i in elems])
for raw in row_iter:
while r_comment.match(raw) or r_empty.match(raw):
raw = next(row_iter)
row = raw.split(delim)
# yield tuple([np.float64(row[i]) for i in elems])
yield tuple([row[i] for i in elems])
with open(filename) as fh:
a = generator(fh, delim=" ")
# No error should happen here: it is a bug otherwise
data = np.fromiter(a, descr)
data = data.view(np.float32).reshape((len(data), -1))
return data
def data_sparse(filename, feat_type):
# This function takes as argument a file representing a sparse matrix
# sparse_matrix[i][j] = "a:b" means matrix[i][a] = b
# It converts it into a numpy array, using sparse_list_to_array function,
# and returns this array
sparse_list = sparse_file_to_sparse_list(filename)
return sparse_list_to_csr_sparse(sparse_list, len(feat_type))
def data_binary_sparse(filename, feat_type):
# This function takes as an argument a file representing a binary sparse
# matrix
# binary_sparse_matrix[i][j] = a means matrix[i][j] = 1
# It converts it into a numpy array an returns this array.
inner_data = file_to_array(filename)
nbr_samples = len(inner_data)
# the construction is easier w/ dok_sparse
dok_sparse = scipy.sparse.dok_matrix((nbr_samples, len(feat_type)))
print ("Converting {} to dok sparse matrix".format(filename))
for row in range(nbr_samples):
for feature in inner_data[row]:
dok_sparse[row, int(feature) - 1] = 1
print ("Converting {} to csr sparse matrix".format(filename))
return dok_sparse.tocsr()
def file_to_array(filename, verbose=False):
# Converts a file to a list of list of STRING; It differs from
# np.genfromtxt in that the number of columns doesn't need to be constant
data = []
with open(filename, "r") as data_file:
if verbose:
print ("Reading {}...".format(filename))
lines = data_file.readlines()
if verbose:
print ("Converting {} to correct array...".format(filename))
data = [lines[i].strip().split() for i in range(len(lines))]
return data
def read_first_line(filename):
# Read fist line of file
data = []
with open(filename, "r") as data_file:
line = data_file.readline()
data = line.strip().split()
return data
def sparse_file_to_sparse_list(filename, verbose=True):
# Converts a sparse data file to a sparse list, so that:
# sparse_list[i][j] = (a,b) means matrix[i][a]=b
data_file = open(filename, "r")
if verbose:
print ("Reading {}...".format(filename))
lines = data_file.readlines()
if verbose:
print ("Converting {} to correct array")
data = [lines[i].split(' ') for i in range(len(lines))]
if verbose:
print ("Converting {} to sparse list".format(filename))
_converter = lambda a_: (int(a_[0]), np.float32(float(a_[1])))
return [[_converter(data[i][j].rstrip().split(':'))
for j in range(len(data[i])) if data[i][j] != '\n']
for i in range(len(data))]
def sparse_list_to_csr_sparse(sparse_list, nbr_features, verbose=True):
# This function takes as argument a matrix of tuple representing a sparse
# matrix and the number of features.
# sparse_list[i][j] = (a,b) means matrix[i][a]=b
# It converts it into a scipy csr sparse matrix
nbr_samples = len(sparse_list)
# construction easier w/ dok_sparse...
dok_sparse = scipy.sparse.dok_matrix((nbr_samples, nbr_features),
dtype=np.float32)
if verbose:
print ("\tConverting sparse list to dok sparse matrix")
for row in range(nbr_samples):
for column in range(len(sparse_list[row])):
(feature, value) = sparse_list[row][column]
dok_sparse[row, feature - 1] = value
if verbose:
print ("\tConverting dok sparse matrix to csr sparse matrix")
# but csr better for shuffling data or other tricks
return dok_sparse.tocsr()
class CompetitionDataManager(DataManager):
''' This class aims at loading and saving data easily with a cache and at generating a dictionary (self.info) in which each key is a feature (e.g. : name, format, feat_num,...).
Methods defined here are :
__init__ (...)
x.__init__([(feature, value)]) -> void
Initialize the info dictionary with the tuples (feature, value) given as argument. It recognizes the type of value (int, string) and assign value to info[feature]. An unlimited number of tuple can be sent.
getInfo (...)
x.getInfo (filename) -> void
Fill the dictionary with an info file. Each line of the info file must have this format 'feature' : value
The information is obtained from the public.info file if it exists, or inferred from the data files
getInfoFromFile (...)
x.getInfoFromFile (filename) -> void
Fill the dictionary with an info file. Each line of the info file must have this format 'feature' : value
'''
def __init__(self, basename, input_dir, verbose=False, encode_labels=True):
super(CompetitionDataManager, self).__init__()
self.basename = basename
if basename in input_dir:
self.input_dir = input_dir
else:
self.input_dir = input_dir + "/" + basename + "/"
info_file = os.path.join(self.input_dir, basename + '_public.info')
self.getInfo(info_file)
self.feat_type = self.loadType(os.path.join(self.input_dir, basename + '_feat.type'), verbose=verbose)
Xtr = self.loadData(os.path.join(self.input_dir, basename + '_train.data'),
self.info['train_num'], verbose=verbose)
Ytr = self.loadLabel(os.path.join(self.input_dir, basename + '_train.solution'),
self.info['train_num'], verbose=verbose)
Xva = self.loadData(os.path.join(self.input_dir, basename + '_valid.data'),
self.info['valid_num'], verbose=verbose)
Xte = self.loadData(os.path.join(self.input_dir, basename + '_test.data'),
self.info['test_num'], verbose=verbose)
self._data['X_train'] = Xtr
self._data['Y_train'] = Ytr
self._data['X_valid'] = Xva
self._data['X_test'] = Xte
p = os.path.join(self.input_dir, basename + '_valid.solution')
if os.path.exists(p):
try:
self._data['Y_valid'] = self.loadLabel(p,
self.info['valid_num'], verbose=verbose)
except (IOError, OSError):
pass
p = os.path.join(self.input_dir, basename + '_test.solution')
if os.path.exists(p):
try:
self.data['Y_test'] = self.loadLabel(p,
self.info['test_num'], verbose=verbose)
except (IOError, OSError) as e:
pass
if encode_labels:
self.perform1HotEncoding()
def loadData (self, filename, num_points, verbose=True):
''' Get the data from a text file in one of 3 formats: matrix, sparse, binary_sparse'''
if verbose: print("========= Reading " + filename)
start = time.time()
if 'format' not in self.info:
self.getFormatData(filename)
if competition_c_functions_is_there:
data_func = {'dense': competition_c_functions.read_dense_file,
'sparse': competition_c_functions.read_sparse_file,
'sparse_binary': competition_c_functions.read_sparse_binary_file}
data = data_func[self.info['format']](filename, num_points,
self.info['feat_num'])
if scipy.sparse.issparse(data):
if not np.all(data.indices >= 0):
raise ValueError("Sparse data must be 1-indexed, "
"not 0-indexed.")
else:
data_func = {'dense': data_dense,
'sparse': data_sparse,
'sparse_binary': data_binary_sparse}
data = data_func[self.info['format']](filename, self.feat_type)
end = time.time()
if verbose: print( "[+] Success in %5.2f sec" % (end - start))
return data
def loadLabel (self, filename, num_points, verbose=True):
''' Get the solution/truth values'''
if verbose: print("========= Reading " + filename)
start = time.time()
# IG: Here change to accommodate the new multiclass label format
if competition_c_functions_is_there:
if self.info['task'] == MULTILABEL_CLASSIFICATION:
# cast into ints
label = (competition_c_functions.read_dense_file_unknown_width(
filename, num_points)).astype(np.int)
elif self.info['task'] == MULTICLASS_CLASSIFICATION:
label = competition_c_functions.read_dense_file_unknown_width(
filename, num_points)
# read the class from the only non zero entry in each line!
# should be ints right away
label = np.where(label != 0)[1];
else:
label = competition_c_functions.read_dense_file_unknown_width(
filename, num_points)
else:
if self.info['task'] == MULTILABEL_CLASSIFICATION:
label = self._data(filename)
elif self.info['task'] == MULTICLASS_CLASSIFICATION:
label = data_util.convert_to_num(self._data(filename))
else:
label = np.ravel(data_util.data(filename)) # get a column vector
end = time.time()
if verbose: print( "[+] Success in %5.2f sec" % (end - start))
return label
def loadType (self, filename, verbose=True):
''' Get the variable types'''
if verbose: print("========= Reading " + filename)
start = time.time()
type_list = []
if os.path.isfile(filename):
if competition_c_functions_is_there:
type_list = competition_c_functions.file_to_array(filename,
verbose=False)
else:
type_list = file_to_array(filename, verbose=False)
else:
n=self.info['feat_num']
type_list = [self.info['feat_type']]*n
type_list = np.array(type_list).ravel()
end = time.time()
if verbose: print( "[+] Success in %5.2f sec" % (end - start))
return type_list
def getInfo (self, filename, verbose=True):
''' Get all information {attribute = value} pairs from the filename (public.info file),
if it exists, otherwise, output default values'''
if filename==None:
basename = self.basename
input_dir = self.input_dir
else:
# Split away the _public.info (anyway, I don't know why its
# there... the dataset name is known from the call)
basename = "_".join(os.path.basename(filename).split('_')[:-1])
input_dir = os.path.dirname(filename)
if os.path.exists(filename):
self.getInfoFromFile (filename)
print "Info file found : " + os.path.abspath(filename)
# Finds the data format ('dense', 'sparse', or 'sparse_binary')
self.getFormatData(os.path.join(input_dir, basename + '_train.data'))
else:
raise NotImplementedError("The user must always provide an info "
"file.")
self.info['task'] = STRING_TO_TASK_TYPES[self.info['task']]
return self.info
def getInfoFromFile (self, filename):
''' Get all information {attribute = value} pairs from the public.info file'''
with open (filename, "r") as info_file:
lines = info_file.readlines()
features_list = list(map(lambda x: tuple(x.strip("\'").split(" = ")), lines))
for (key, value) in features_list:
self.info[key] = value.rstrip().strip("'").strip(' ')
if self.info[key].isdigit(): # if we have a number, we want it to be an integer
self.info[key] = int(self.info[key])
return self.info
def getFormatData(self,filename):
''' Get the data format directly from the data file (in case we do not have an info file)'''
if 'format' in self.info.keys():
return self.info['format']
if 'is_sparse' in self.info.keys():
if self.info['is_sparse'] == 0:
self.info['format'] = 'dense'
else:
if competition_c_functions_is_there:
data = competition_c_functions.read_first_line(filename)
else:
data = data_util.read_first_line(filename)
if ':' in data[0]:
self.info['format'] = 'sparse'
else:
self.info['format'] = 'sparse_binary'
else:
if competition_c_functions_is_there:
data = competition_c_functions.file_to_array(filename)
else:
data = data_util.file_to_array(filename)
if ':' in data[0][0]:
self.info['is_sparse'] = 1
self.info['format'] = 'sparse'
else:
nbr_columns = len(data[0])
for row in range (len(data)):
if len(data[row]) != nbr_columns:
self.info['format'] = 'sparse_binary'
if 'format' not in self.info.keys():
self.info['format'] = 'dense'
self.info['is_sparse'] = 0
return self.info['format']
| bsd-3-clause |
tensorflow/lingvo | lingvo/core/metrics.py | 1 | 17161 | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper classes for computing performance metrics."""
import collections
import lingvo.compat as tf
from lingvo.core import hyperparams
from lingvo.core import plot
from lingvo.core import py_utils
from lingvo.core import scorers
import numpy as np
try:
import sklearn.metrics # pylint: disable=g-import-not-at-top
HAS_SKLEARN = True
except ImportError:
HAS_SKLEARN = False
try:
import scipy.stats # pylint: disable=g-import-not-at-top
HAS_SCIPY_STATS = True
except ImportError:
HAS_SCIPY_STATS = False
def CreateScalarSummary(name, simple_value):
return tf.Summary(
value=[tf.Summary.Value(tag=name, simple_value=simple_value)])
class BaseMetric:
"""Base class for aggregating statistics to compute a performance metric."""
def Update(self, *args, **kwargs):
"""Updates this metric (e.g. accumulates statistics) from the arguments."""
pass
@property
def value(self):
"""Current value of this metric."""
return None
def Summary(self, name):
"""Converts the current state of this metric to a `tf.Summary`.
Args:
name: A string to use as the summary value tag.
Returns:
A `tf.Summary` proto.
"""
return CreateScalarSummary(name, self.value)
class ConfigurableMetric(BaseMetric):
"""A Metric class with configurable params."""
@classmethod
def Params(cls):
p = hyperparams.InstantiableParams(cls)
return p
def __init__(self, params):
self.params = params
class AverageMetric(BaseMetric):
"""Class to compute a weighted (arithmetic) average value metric."""
def __init__(self):
self._total_value = 0.0
self._total_weight = 0.0
def Update(self, value, weight=1.0):
if weight < 0.0:
raise ValueError('weight must be non-negative. Got: %f' % weight)
self._total_value += value * weight
self._total_weight += weight
# We may want both a getter and a setter method for total_value and
# total_weight, respectively.
def GetTotalValue(self):
return self._total_value
def SetTotalValue(self, val):
self._total_value = val
total_value = property(GetTotalValue, SetTotalValue)
def GetTotalWeight(self):
return self._total_weight
def SetTotalWeight(self, val):
self._total_weight = val
total_weight = property(GetTotalWeight, SetTotalWeight)
@property
def value(self):
return (self._total_value /
self._total_weight if self._total_weight > 0 else 0)
class F1Metric(BaseMetric):
"""Class to compute F1 metrics."""
def __init__(self):
self._true_pos = 0.0
self._false_pos = 0.0
self._false_neg = 0.0
def UpdateTruePositive(self, count=1.0):
self._true_pos += count
def UpdateFalsePositive(self, count=1.0):
self._false_pos += count
def UpdateFalseNegative(self, count=1.0):
self._false_neg += count
@property
def value(self):
if (self._true_pos + self._false_pos) > 0:
precision = self._true_pos / (self._true_pos + self._false_pos)
else:
precision = 0.0
if (self._true_pos + self._false_neg) > 0:
recall = self._true_pos / (self._true_pos + self._false_neg)
else:
recall = 0.0
if (precision + recall) > 0:
return 2.0 * precision * recall / (precision + recall)
else:
return 0.0
class MCCMetric(F1Metric):
"""Class to compute Matthews correlation coefficient metric."""
def __init__(self):
super().__init__()
self._true_neg = 0.0
def UpdateTrueNegative(self, count=1.0):
self._true_neg += count
@property
def value(self):
tp = self._true_pos
tn = self._true_neg
fp = self._false_pos
fn = self._false_neg
denominator = (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)
if denominator > 0.0:
return (tp * tn - fp * fn) / denominator**0.5
else:
return 0.0
class CorpusBleuMetric(BaseMetric):
"""Metric class to compute the corpus-level BLEU score."""
def __init__(self, **kwargs):
self._scorer = scorers.BleuScorer(**kwargs)
def Update(self, ref_str, hyp_str):
self._scorer.AddSentence(ref_str, hyp_str)
@property
def unsegmenter(self):
return self._scorer.unsegmenter
@property
def value(self):
return self._scorer.ComputeOverallScore()
class TpuEvalMetrics:
"""Manages computation of metrics during TPU execution.
TPU execution runs a training loop on device. To get eval metrics out of this,
metric values and weights must be carried through the loop. This requires
passing initial values to the loop setup, updated the values during the loop,
and doing a final aggregation after the loop. This class wraps the metrics
dictionary so that the needed ops can be built at the right time as the
training loop is built.
Note that because the model is not constructed until the loop body function is
called, the initial values must be known statically. This is done currently by
hardcoding a limit on the number of metrics and casting each metric and value
count to float32, regardless of the number of actual metrics the model
produced.
Note that this implementation computes the metrics over all replicas, for the
last step of the loop only (could be changed to average over all loop steps
instead).
"""
def __init__(self, max_metrics=256):
self._metrics = None
self._max_metrics = max_metrics
# Loop-carried values alternate value and weight; all values are scalars.
self._initial_values = (2 *
self._max_metrics) * [tf.constant(0, tf.float32)]
def SetMetrics(self, metric_dict, step_args):
"""Sets the metrics to evaluate and the per-step output tensors.
Args:
metric_dict: dict of (name -> (tensor of values, tensor of weights))
step_args: the tensors being passed to the training loop body. These share
the same structure of alternating value and weight scalars as the
initial values and the output of this function.
Returns:
The tensors to return from the training loop body. For entries that are
for metrics in self._metrics, returns the value computed within the loop
(the step_args value passed in); for all others, the value will never be
used at the end and so the step_args value is passed through (which has
the effect of passing the initial values through every iteration of the
loop).
"""
num_metrics = len(metric_dict)
assert num_metrics <= self._max_metrics, ('Increase max_metrics to >= %d' %
num_metrics)
self._metrics = metric_dict
# self._metrics contains a map of (metric_value,
# metric_weight). We convert it into [metric_value *
# metric_weight, metric_weight] to make it easier to aggregate
# metric values across steps and TPU replicas.
ret = []
for _, (value, weight) in sorted(self._metrics.items()):
assert value.shape.is_fully_defined(), ('%s' % value)
assert weight.shape.is_fully_defined(), ('%s' % weight)
weight = tf.cast(weight, tf.float32)
value = tf.cast(value, tf.float32) * weight
ret += [value, weight]
# Each metric has two tensors: value and weight.
assert len(ret) == 2 * num_metrics
ret += list(step_args)[len(ret):]
return ret
@property
def initial_values(self):
"""Returns the initial loop values."""
return self._initial_values
@property
def metrics(self):
return self._metrics
def _Zip(self, values):
assert isinstance(values, list)
return list(zip(values[::2], values[1::2]))
def FinalizeMetrics(self, loop_result):
"""Compute final average of the metrics, given loop_result tensors.
To be called outside the training loop body , but still in the scope of
tpu.batch_parallel.
Args:
loop_result: Result of the training loop.
Returns:
The tensors of the final avg values and total weights.
"""
# Each metric has two tensors in the loop carrying result.
metrics = loop_result[:2 * len(self._metrics.items())]
# Aggregate across tpu replicas.
metrics = [tf.tpu.cross_replica_sum(x) for x in metrics]
ret = []
for (value, weight) in self._Zip(metrics):
value, weight = py_utils.WeightedAvg(
tf.math.divide_no_nan(value, weight), weight)
ret += [value, weight]
return ret
def PackMetricsValues(self, values):
"""Packs numpy values into a dict of metrics."""
for k, v in zip(sorted(self._metrics.keys()), self._Zip(values)):
self._metrics[k] = v
def ToAverageMetrics(self):
"""Wrap the final metric values into AverageMetric objects.
Returns:
A dict that maps metric names to AverageMetric objects.
"""
ret = {}
for name, (value, weight) in self._metrics.items():
avg_metric = AverageMetric()
avg_metric.total_weight = weight
avg_metric.total_value = weight * value
ret[name] = avg_metric
return ret
class AUCMetric(BaseMetric):
"""Class to compute the AUC score for binary classification."""
def __init__(self, mode='roc', samples=-1):
"""Constructor of the class.
Args:
mode: Possible values: 'roc' or 'pr'.
samples: The number of sample points to compute the AUC. If -1, include
all points seen thus far.
Raises:
ImportError: If user has not installed sklearn, raise an ImportError.
"""
if not HAS_SKLEARN:
raise ImportError('AUCMetric depends on sklearn.')
self._mode = mode
self._samples = samples
self._label = []
self._prob = []
self._weight = []
if self._mode == 'roc':
self._curve_fn = sklearn.metrics.roc_curve
self._score_fn = sklearn.metrics.roc_auc_score
self._plot_labels = ['False Positive Rate', 'True Positive Rate']
elif self._mode == 'pr':
self._curve_fn = sklearn.metrics.precision_recall_curve
self._score_fn = sklearn.metrics.average_precision_score
self._plot_labels = ['Recall', 'Precision']
else:
raise ValueError('mode in AUCMetric must be one of "roc" or "pr".')
def Update(self, label, prob, weight=None):
"""Updates the metrics.
Args:
label: An array to specify the groundtruth binary labels. Values must be
either 0 or 1.
prob: An array to specify the prediction probabilities. Values must be
within [0, 1.0].
weight: An array to specify the sample weight for the auc computation.
"""
self._label += label
self._prob += prob
if weight:
self._weight += weight
else:
self._weight += [1 for _ in range(len(label))]
if self._samples > 0:
self._label = self._label[-self._samples:]
self._prob = self._prob[-self._samples:]
self._weight = self._weight[-self._samples:]
@property
def value(self):
try:
return self._score_fn(self._label, self._prob, sample_weight=self._weight)
except ValueError as exception:
# In case self._label still has just 1 type of label, e.g. all(labels==0).
if 'Only one class present in y_true.' in str(exception):
return 0.0
else:
raise
def Summary(self, name):
def _Setter(fig, axes):
# 20 ticks betweein 0 and 1.
ticks = np.arange(0, 1.05, 0.05)
axes.grid(b=True)
axes.set_xlabel(self._plot_labels[0])
axes.set_xticks(ticks)
axes.set_ylabel(self._plot_labels[1])
axes.set_yticks(ticks)
fig.tight_layout()
xs, ys, _ = self._curve_fn(
self._label, self._prob, sample_weight=self._weight)
if self._mode == 'pr':
# Swap because sklearn returns <'precision', 'recall'>.
xs, ys = ys, xs
ret = plot.Curve(name=name, figsize=(12, 12), xs=xs, ys=ys, setter=_Setter)
ret.value.add(tag=name, simple_value=self.value)
return ret
class CorrelationMetric(BaseMetric):
"""Class to compute correlation."""
def __init__(self, mode='pearson', samples=-1):
"""Constructor of the class.
Args:
mode: Possible values: 'pearson', 'spearman', 'kendalltau'.
samples: The number of sample points to compute the correlation. If -1,
include all points seen thus far.
Raises:
ImportError: If user has not installed scipy.stats, raise an ImportError.
"""
if not HAS_SCIPY_STATS:
raise ImportError('CorrelationMetric depends on scipy.stats.')
assert mode in ['pearson', 'spearman', 'kendalltau']
self._mode = mode
self._samples = samples
self._target = []
self._pred = []
def Update(self, target, pred):
"""Updates the metrics.
Args:
target: An array to specify the groundtruth float target.
pred: An array to specify the prediction.
"""
self._target += target
self._pred += pred
if self._samples > 0:
self._target = self._target[-self._samples:]
self._pred = self._pred[-self._samples:]
@property
def value(self):
# only use the correlation, p-value is ignored.
if self._mode == 'pearson':
return scipy.stats.pearsonr(self._target, self._pred)[0]
elif self._mode == 'spearman':
return scipy.stats.spearmanr(self._target, self._pred)[0]
else:
return scipy.stats.kendalltau(self._target, self._pred)[0]
class AverageKeyedCorrelationMetric(BaseMetric):
"""Class to compute correlation per key, then report average across all keys."""
def __init__(self, mode='pearson', bypass_nan=True):
"""Constructor of the class.
Args:
mode: Possible values: 'pearson', 'spearman', 'kendalltau'.
bypass_nan: for keys that has only 1 example, the metric will be NaN,
turn on this flag to skip those keys.
Depending on the scipy library, it may raise ValueException instead
of produce NaN. In this case, the Exception will be suppressed and the
key is skipped during calculation.
Raises:
ImportError: If user has not installed scipy.stats, raise an ImportError.
"""
if not HAS_SCIPY_STATS:
raise ImportError('CorrelationMetric depends on scipy.stats.')
assert mode in ['pearson', 'spearman', 'kendalltau']
self._mode = mode
self._bypass_nan = bypass_nan
self._target = collections.defaultdict(list)
self._pred = collections.defaultdict(list)
def Update(self, key, target, pred):
"""Updates the metrics.
Args:
key: The key this (target, pred) pair belongs to.
target: An array to specify the groundtruth float target.
pred: An array to specify the prediction.
"""
self._target[key] += target
self._pred[key] += pred
@property
def value(self):
# only use the correlation, p-value is ignored.
if self._mode == 'pearson':
corr_f = scipy.stats.pearsonr
elif self._mode == 'spearman':
corr_f = scipy.stats.spearmanr
else:
corr_f = scipy.stats.kendalltau
results = []
for k in self._target:
target = self._target[k]
pred = self._pred[k]
try:
raw_corr = corr_f(target, pred)[0]
if not self._bypass_nan or not np.isnan(raw_corr):
results.append(raw_corr)
except ValueError:
continue
if not self._bypass_nan or results:
return np.mean(results)
else:
return 0.0
class SamplingMetric(ConfigurableMetric):
"""Sampling metric base class.
Subclasses must implement _CreateSummary(); sampling will be handled
by this base class.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_samples', 8, 'The number of samples to store uniformly.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
self._sampler = py_utils.UniformSampler(num_samples=p.num_samples)
self._summary = None
@property
def samples(self):
"""Returns an iterable of sampled decoded outputs to compute Summaries."""
return self._sampler.samples
def Update(self, decoded_outputs):
"""Samples the input decoded_outputs NestedMap.
Args:
decoded_outputs: A `.NestedMap`.
"""
self._sampler.Add(decoded_outputs)
# Invalidate cache.
self._summary = None
def Summary(self, name):
if self._summary is None:
self._summary = self._CreateSummary(name)
self._sampler = py_utils.UniformSampler(
num_samples=self.params.num_samples)
return self._summary
def _CreateSummary(self, name):
"""Returns a tf.Summary for this metric."""
raise NotImplementedError()
| apache-2.0 |
ProjectPyRhO/PyRhO | setup.py | 1 | 8313 | """The PyRhO package setup script"""
#from __future__ import print_function # Added for Python 2.x support
from setuptools import setup, find_packages # Prefer setuptools over distutils
from codecs import open # To use a consistent encoding
import os
# Download and install setuptools if not installed
#from ez_setup import use_setuptools
#use_setuptools()
#python -m ensurepip --upgrade
#from setuptools import setup
#from distutils import setup
here = os.path.abspath(os.path.dirname(__file__))
home = os.path.expanduser("~")
print(home)
prwd = os.path.join(home, 'pyrho') # pyrho working directory
# TODO: Test changes to package_data and include notebooks and license without MANIFEST
# TODO: Fix this to remove redundant long_description text
# Get the long description from the relevant file
#with open(os.path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
#with open('DESCRIPTION.rst', encoding='utf-8') as f:
# long_description = f.read()
long_description = """
PyRhO - A Virtual Optogenetics Laboratory
=========================================
A Python module to fit and characterise rhodopsin photocurrents.
Background
----------
Optogenetics has become a key tool for understanding the function of neural circuits and controlling their behaviour. An array of directly light driven opsins have been genetically isolated from several families of organisms, with a wide range of temporal and spectral properties. In order to characterize, understand and apply these rhodopsins, we present an integrated suite of open-source, multi-scale computational tools called PyRhO.
PyRhO enables users to:
(i) characterize new (and existing) rhodopsins by automatically fitting a minimal set of experimental data to three, four or six-state kinetic models,
(ii) simulate these models at the channel, neuron & network levels and
(iii) gain functional insights through model selection and virtual experiments *in silico*.
The module is written in Python with an additional IPython/Jupyter notebook based GUI, allowing models to be fit, simulations to be run and results to be shared through simply interacting with a webpage. The seamless integration of model fitting algorithms with simulation environments for these virtual opsins will enable (neuro)scientists to gain a comprehensive understanding of their behaviour and rapidly identify the most suitable variant for application in a particular biological system. This process may thereby guide not only experimental design and opsin choice but also alterations of the rhodopsin genetic code in a neuro-engineering feed-back loop. In this way, we hope PyRhO will help to significantly improve optogenetics as a tool for transforming biological sciences.
Further Information
-------------------
If you use PyRhO please cite our paper:
Evans, B. D., Jarvis, S., Schultz, S. R. & Nikolic K. (2016) "PyRhO: A Multiscale Optogenetics Simulation Platform", *Frontiers in Neuroinformatics, 10* (8). `doi:10.3389/fninf.2016.00008 <https://dx.doi.org/10.3389/fninf.2016.00008>`_
The PyRhO project website with additional documentation may be found here: `www.imperial.ac.uk/bio-modelling/pyrho <http://www.imperial.ac.uk/a-z-research/bio-modelling/pyrho>`_
Finally, don't forget to follow us on twitter for updates: `@ProjectPyRhO <https://twitter.com/ProjectPyRhO>`_!
"""
setup(
name='PyRhO',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.9.5',
description='Fit and characterise rhodopsin photocurrents',
long_description=long_description,
# The project's main homepage.
url='https://github.com/ProjectPyRhO/PyRhO/',
# download_url='https://github.com/ProjectPyRhO/PyRhO/archive/master.zip',
# download_url='https://github.com/ProjectPyRhO/PyRhO/tarball/' + version,
# Author details
author='Benjamin D. Evans',
author_email='[email protected]',
license='BSD',
platforms=['Linux', 'Mac OS X', 'Windows'],
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Artificial Life',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
# The license should match "license" above
'License :: OSI Approved :: BSD License',
# Supported Python versions
'Programming Language :: Python',
'Programming Language :: Python :: 3',
# 3.5 EOL: 13/09/20
'Programming Language :: Python :: 3.5',
# 3.6 EOL: 23/12/21
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Framework :: IPython',
'Natural Language :: English',
'Operating System :: OS Independent',
],
#keywords='optogenetics rhodopsin opsin brain neuroscience neuron brian jupyter',
keywords=['optogenetics', 'rhodopsin', 'opsin', 'brain', 'neuroscience',
'neuron', 'brian', 'jupyter'],
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# package_dir = {'':'.'},
# package_dir = {'pyrho': 'pyrho'}, # Relative to this script
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
# ipython is used for latex repr - remove from requirements and have a fallback repr?
install_requires=['numpy>=1.8', 'scipy>=0.15', 'matplotlib>=1.3',
'lmfit>=0.9.3', 'brian2>=2.0'], # 'ipython>=4.1'
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# 'brian' : ['brian2'],
# 'docs' : ['sphinx>=1.3'],
'extras': ['seaborn>=0.7', 'pandas>=0.17'], # 'cython>=0.23'
# traitlets is a dependency of ipywidgets and can be removed if 4.1 entails traitlets>=4.1
'GUI' : ['jupyter>=1.0', 'notebook>=4.1', 'ipywidgets>=4.1,<5',
'seaborn>=0.7'], # , 'traitlets>=4.1,<5'
'full': ['jupyter>=1.0', 'notebook>=4.1', 'ipywidgets>=4.1,<5',
'seaborn>=0.7', 'pandas>=0.17'], # 'cython>=0.23'
},
include_package_data=True,
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
# TODO: Try this without MANIFEST
'NEURON' : ['*.mod', '*.hoc', '*.sh'],
'gui' : ['*.png'],
'datasets': ['*.pkl'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
#data_files=[#(prwd, []),
# (prwd, [os.path.join(prwd, 'gui/*.png'), ])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
#entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
#},
)
| bsd-3-clause |
labcem/EIRP | EIRP.py | 1 | 5119 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 26 2015
Full EIRP Measurement with the anechoic room, EDF Lab bât D16, 3 cutting planes
and 2 polarisations.
[email protected]
"""
from __future__ import division
import time
import visa
import scipy
import os
from numpy import *
import matplotlib.pyplot as plt
#Instruments modules
import Spectrum
from FC06 import * #classe du mât et de la table tournante de la CA
nom=raw_input('Enter the name of the equipment: ')
if (os.path.isdir('Results_'+nom)==False):
os.mkdir('Results_'+nom)
#Calibration files
Correction_H=loadtxt('Cal_Pol_H.txt')
Correction_V=loadtxt('Cal_Pol_V.txt')
os.chdir('Results_'+nom)
f=Correction_V[:,0]
###############################################
########## Testing parameters ##############
###############################################
fstart=f[0] #Start frequency
fstop=f[-1] #Stop frequency
fcenter=0.5*(fstart+fstop) #Center frequency
fspan=fstop-fstart #Span
RBW=1e6 #RBW size in Hz
VBW=100e3 #VBW size in Hz
SwpPt=len(f) #Number of points
N=37 #Number of incident angles
Angles=linspace(0,360,N)-180
Pol=2 #Number of polarisations
Exp=1 #Number of cutting planes
Tmes=15 #dwell time
###Stop criterion
###channels center frequencies (european wifi)
##f0=2.412e9
##fn=2.472e9
##n=13 #number of channels
##fc=linspace(f0,fn,n)
###channel center frequencies indexes
##peaksindx=zeros(len(fc))
##for i in range(len(fc)):
# peaksindx[i]=argmin(abs(f-fc[i]))
Level_criterion=-60
print '\nInstruments initialisations\n'
print '\nSpectrum analyser:'
Spectre=Spectrum.FSV30()
Spectre.reset()
Spectre.startFreq(fstart)
Spectre.stopFreq(fstop)
Spectre.RBW(RBW)
Spectre.SweepPoint(SwpPt)
Spectre.MaxHold()
Spectre.UnitDBM()
print u'\nFC-06 Mast and TurnTable Controller'
FC=FC06()
FC.reset()
FC.AngleVel(20)
#FC.hVel(20)
FC.setAngle(0)
print 'Full anechoic chamber, height=1.1 m'
FC.setHauteur(1100)
print '\nMeasurement...\n'
raw_input (u"Place your EUT, first cutting plane, angle 0°, Presse Enter ")
Measurement=ones([Pol,Exp,N,2])*-Inf #Measurement is logarithmic to get 0 after linearisation
Raw_Traces=zeros([Pol,Exp,N,2,SwpPt])
for k in range(Exp):
if k!=0:
print (u"\nBack to 0°")
FC.setAngle(0)
raw_input ("Place your object according to cutting plane %s, Presse Enter " %(k+1))
for l in range (0,Pol):
if l==0:
Polarisation='V'
else:
Polarisation='H'
FC.setPolar(l)
while FC.busy()=="1":
#print("NOK")
time.sleep(0.2)
print("OK")
print("Cutting plane : %i, antenna polarisation : %s " %(k+1,Polarisation))
for j in range(0,len(Angles)):
#print ("Go to %s deg" %(Angles [j]))
FC.setAngle(Angles[j])
Spectre.readwrite()
Spectre.MaxHold()
time.sleep(Tmes)
#raw_input("\n Press Enter to validate the measurement\n")
Level = Spectre.getTrace(SwpPt)
if Polarisation=='V':
cLevel=Level+Correction_V[:,1]
else:
cLevel=Level+Correction_H[:,1]
#criterion automatic stop
#while (min(Level[peaksindx])<Level_criterion): #every channel
#while (min(Level[peaksindx])<Level_criterion): #one channel
#while (mean(Level[peaksindx]<Level_criterion))<p/n: #p channels among n
while (max(Level)<Level_criterion):
Level = Spectre.getTrace(SwpPt)
if Polarisation=='V':
cLevel=Level+Correction_V[:,1]
else:
cLevel=Level+Correction_H[:,1]
time.sleep(Tmes)
Trace=Level
MaxLevel=max(cLevel)
MaxIdx =cLevel.argmax()
Measurement[l,k,j,:]=array([f[MaxIdx],MaxLevel])
Raw_Traces[l,k,j,:]=Trace
print u'%s°, EIRP = %2.2f mW/MHz' %((Angles [j]),10**(MaxLevel/10))
fname = ( '%s_Exp%s.txt') %(Polarisation,k+1)
savetxt(fname,Measurement[l,k,:])
r=sum((10**((Measurement[:,k,:,1])/10)),axis=0)
print "Printing some figures..."
plt.close('all')
plt.polar((Angles*pi/180),r)
Graphlin= 'Graph_Exp%s' %(k+1)
plt.ylabel('PIRE/mW')
plt.title("PIRE en mW, plan %s" %(k+1))
plt.savefig(Graphlin+'.pdf',bbox='tight')
plt.savefig(Graphlin+'.png',bbox='tight')
print (Graphlin+'.pdf')
print (Graphlin+'.png')
plt.close()
print "Raw data saved in file "+nom+'_raw.npz'
savez(nom+'_raw.npz',Measurement=Measurement,Raw_Traces=Raw_Traces,f=f)
print(u"Back to 0° and vertical polarisation ")
FC.setAngle(0)
FC.setPolar(0)
print("OK")
| mit |
lbishal/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 58 | 17158 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
| bsd-3-clause |
alexeyum/scikit-learn | sklearn/datasets/samples_generator.py | 26 | 56554 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://seat.massey.ac.nz/personal/s.r.marsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
eramirem/astroML | book_figures/chapter1/fig_S82_hess.py | 3 | 2146 | """
SDSS Stripe 82 Hess Diagram
---------------------------
Figure 1.10.
A Hess diagram of the r-i vs. g-r colors for the entire set of SDSS Stripe 82
standard stars. The pixels are colored with a logarithmic scaling;
cf. figures 1.6 and 1.9.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.datasets import fetch_sdss_S82standards
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Fetch the stripe 82 data
data = fetch_sdss_S82standards()
g = data['mmu_g']
r = data['mmu_r']
i = data['mmu_i']
#------------------------------------------------------------
# Compute and plot the 2D histogram
H, xbins, ybins = np.histogram2d(g - r, r - i,
bins=(np.linspace(-0.5, 2.5, 50),
np.linspace(-0.5, 2.5, 50)))
# Create a black and white color map where bad data (NaNs) are white
cmap = plt.cm.binary
cmap.set_bad('w', 1.)
# Use the image display function imshow() to plot the result
fig, ax = plt.subplots(figsize=(5, 3.75))
H[H == 0] = 1 # prevent warnings in log10
ax.imshow(np.log10(H).T, origin='lower',
extent=[xbins[0], xbins[-1], ybins[0], ybins[-1]],
cmap=cmap, interpolation='nearest',
aspect='auto')
ax.set_xlabel(r'${\rm g - r}$')
ax.set_ylabel(r'${\rm r - i}$')
ax.set_xlim(-0.6, 2.5)
ax.set_ylim(-0.6, 2.5)
plt.show()
| bsd-2-clause |
meteotest/hurray | logo.py | 1 | 2775 | """
Create hurray logo
"""
import numpy as np
from scipy.ndimage import zoom
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
def create_logo():
data = np.array([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
], dtype=np.float64)
rand = 1 + (np.random.random(data.shape) - 0.5) * 0.1
data = data * rand
data = zoom(data, zoom=1.6, order=5, mode="constant")
cmap = "YlGnBu"
# big logo
width, height = 300, 110
DPI = 100
width_inch, height_inch = width / DPI, height / DPI
fig = Figure(frameon=False)
fig.set_size_inches((width_inch, height_inch))
FigureCanvas(fig)
ax = fig.add_axes([0., 0., 1., 1.])
ax.set_axis_off()
ax.pcolor(data[::-1], edgecolors='#777777', linewidths=0.6)
# extent = (ax.get_window_extent().
# transformed(fig.dpi_scale_trans.inverted()))
extent = mpl.transforms.Bbox(((0, 0), (width_inch, height_inch)))
fig.savefig("logo.png", dpi=DPI, transparent=True, bbox_inches=extent,
format="png")
fig.clf() # important to release memory!
# create a small logo consisting of only the "h"
width, height = 95, 150
width_inch, height_inch = width / DPI, height / DPI
fig = Figure(frameon=False)
fig.set_size_inches((width_inch, height_inch))
FigureCanvas(fig)
ax = fig.add_axes([0., 0., 1., 1.])
ax.set_axis_off()
ax.pcolor(data[::-1][1:, 1:9], edgecolors='#888888', linewidths=0.4)
extent = mpl.transforms.Bbox(((0, 0), (width_inch, height_inch)))
fig.savefig("logo_small.png", dpi=DPI, transparent=True, bbox_inches=extent,
format="png")
fig.clf() # important to release memory!
if __name__ == "__main__":
create_logo()
| bsd-3-clause |
Rambatino/Kruskals | tests/test_kruskals.py | 1 | 7953 | """
Testing module for Kruskals
"""
from setup_tests import Kruskals
import numpy as np
import pandas as pd
import pytest
def test_driver_score():
""" Test driver_score is calculated correctly """
ndarr = np.array([
[1, 2, 3, 4, 5, 6],
[6, 5, 4, 3, 8, 1],
[1, 1, 9, 1, 1, 1],
[9, 2, 2, 2, 2, 2],
[3, 3, 3, 9, 3, 3],
[1, 2, 2, 9, 1, 4]
])
arr = np.array([1, 2, 3, 4, 5, 6])
exp_driver_score = np.array([ 0.14721, 0.44398, 0.23979, 0.62493, 0.71898, 0.31662])
driver_score = np.round(Kruskals.Kruskals(ndarr, arr).driver_score(), decimals=5)
assert np.array_equal(driver_score, exp_driver_score)
def test_from_pandas_df():
""" Test from pandas_df correctly slices the data """
ndarr = np.array([
[1, 2, 3, 4, 5, 6, 1],
[6, 5, 4, 3, 8, 1, 2],
[1, 1, 9, 1, 1, 1, 3],
[9, 2, 2, 2, 2, 2, 4],
[3, 3, 3, 9, 3, 3, 5],
[1, 2, 2, 9, 1, 4, 6]
])
exp_driver_score = np.array([ 0.14721, 0.44398, 0.23979, 0.62493, 0.71898, 0.31662])
df = pd.DataFrame(ndarr)
driver_score = np.round(Kruskals.Kruskals.from_pandas_df(df, list(range(6)), 6).driver_score(), decimals=5)
assert np.array_equal(driver_score, exp_driver_score)
def test_percentage():
""" Test percentage is calculated correctly """
ndarr = np.array([
[1, 2, 3, 4, 5, 6],
[6, 5, 4, 3, 8, 1],
[1, 1, 9, 1, 1, 1],
[9, 2, 2, 2, 2, 2],
[3, 3, 3, 9, 3, 3],
[1, 2, 2, 9, 1, 4]
])
arr = np.array([1, 2, 3, 4, 5, 6])
exp_driver_score = np.array([ 5.90856, 17.81959, 9.62429, 25.08222, 28.85722, 12.70813])
driver_score = np.round(Kruskals.Kruskals(ndarr, arr).percentage(), decimals=5)
assert np.array_equal(driver_score, exp_driver_score)
def test_series_output():
""" Test percentage is calculated correctly """
ndarr = np.array([
[1, 2, 3, 4, 5, 6],
[6, 5, 4, 3, 8, 1],
[1, 1, 9, 1, 1, 1],
[9, 2, 2, 2, 2, 2],
[3, 3, 3, 9, 3, 3],
[1, 2, 2, 9, 1, 4]
])
arr = np.array([1, 2, 3, 4, 5, 6])
exp_driver_score = np.array([ 0.14721, 0.44398, 0.23979, 0.62493, 0.71898, 0.31662])
series = Kruskals.Kruskals(ndarr, arr).driver_score_to_series()
assert np.array_equal(np.round(series.values, decimals=5), exp_driver_score)
assert series.name == 'score'
assert series.index.name == 'driver'
def test_ivars_sub_into_series():
"""
Test that the column names are correctly mapped
to the index values of the series
"""
ndarr = np.array([
[1, 2, 3, 4, 5, 6, 1],
[6, 5, 4, 3, 8, 1, 2],
[1, 1, 9, 1, 1, 1, 3],
[9, 2, 2, 2, 2, 2, 4],
[3, 3, 3, 9, 3, 3, 5],
[1, 2, 2, 9, 1, 4, 6]
])
df = pd.DataFrame(ndarr)
df.columns = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
ind_cols = ['a', 'b', 'c', 'd', 'e', 'f']
series = Kruskals.Kruskals.from_pandas_df(df, ind_cols, 'g').driver_score_to_series()
assert (series.index.values == ind_cols).all()
def test_that_direction_is_applied_on_directional_drivers_analysis():
""" Test whether some driver scores are negative """
ndarr = np.array([
[10, 2, 3, 4, 5, 6],
[6, 5, 4, 3, 8, 1],
[1, 1, 9, 1, 1, 1],
[9, 2, 2, 2, 2, 2],
[3, 3, 3, 9, 3, 3],
[1, 2, 2, 9, 1, 4],
[1, 2, 2, 9, 1, 4],
[1, 2, 2, 9, 1, 4]
])
arr = np.array([1, 2, 3, 4, 5, 6, 7, 8])
series = Kruskals.Kruskals(ndarr, arr).driver_score_to_series(True)
assert (series.values < 0).any()
def test_ability_to_handle_all_same_type():
"""
Test to make sure that kruskals can handle data
when all the values for and independent set are 0
"""
ndarr = np.array([
[10, 0, 3, 4, 5, 6],
[6, 0, 4, 3, 5, 1],
[1, 0, 9, 1, 5, 1],
[9, 0, 2, 2, 5, 2],
[3, 0, 3, 9, 5, 3],
[1, 0, 2, 9, 5, 4],
[1, 0, 2, 9, 5, 4],
[1, 0, 2, 9, 5, 4]
])
arr = np.array([1, 2, 3, 4, 5, 6, 7, 8])
series = Kruskals.Kruskals(ndarr, arr).driver_score()
assert series[1] == 0.0
assert series[4] == 0.0
def test_can_handle_numpy_arrays_for_col_names():
""" Test that df.columns can be passed into __init__ """
ndarr = np.array([
[1, 2, 3, 4, 5, 6, 1],
[6, 5, 4, 3, 8, 1, 2],
[1, 1, 9, 1, 1, 1, 3],
[9, 2, 2, 2, 2, 2, 4],
[3, 3, 3, 9, 3, 3, 5],
[1, 2, 2, 9, 1, 4, 6]
])
exp_driver_score = np.array([0.14721, 0.44398, 0.23979, 0.62493, 0.71898, 0.31662])
df = pd.DataFrame(ndarr)
df.columns = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
driver_score = Kruskals.Kruskals(ndarr, exp_driver_score, i_vars=df.columns).driver_score_to_series()
assert np.array_equal(driver_score.index.values, ['a', 'b', 'c', 'd', 'e', 'f', 'g'])
def test_return_error_if_i_vars_not_sufficient():
""" Test that error raised when columns insufficient length """
ndarr = np.array([
[1, 2, 3, 4, 5, 6, 1],
[6, 5, 4, 3, 8, 1, 2],
[1, 1, 9, 1, 1, 1, 3],
[9, 2, 2, 2, 2, 2, 4],
[3, 3, 3, 9, 3, 3, 5],
[1, 2, 2, 9, 1, 4, 6]
])
exp_driver_score = np.array([0.14721, 0.44398, 0.23979, 0.62493, 0.71898, 0.31662])
i_vars = ['a', 'b', 'c', 'd', 'e', 'f']
with pytest.raises(ValueError) as e:
Kruskals.Kruskals(ndarr, exp_driver_score, i_vars=i_vars).driver_score_to_series()
assert 'driver labels: {}, not sufficient for ndarray of shape {}'.format(i_vars, ndarr.shape) in str(e.value)
def test_percentage_when_non_directional():
""" Test the percentage function behaves as expected """
ndarr = np.array([
[10, 2, 3, 4, 5, 6],
[6, 5, 4, 3, 8, 1],
[1, 1, 9, 1, 1, 1],
[9, 2, 2, 2, 2, 2],
[3, 3, 3, 9, 3, 3],
[1, 2, 2, 9, 1, 4],
[1, 2, 2, 9, 1, 4],
[1, 2, 2, 9, 1, 4]
])
arr = np.array([1, 2, 3, 4, 5, 6, 7, 8])
percentage = Kruskals.Kruskals(ndarr, arr).driver_score(percentage=True)
assert (np.round(percentage, decimals=4) == [18.7523, 13.8413, 15.4078, 21.5111, 23.4954, 6.9921]).all()
def test_percentage_when_directional():
""" Test the percentage function behaves as expected """
ndarr = np.array([
[10, 2, 3, 4, 5, 6],
[6, 5, 4, 3, 8, 1],
[1, 1, 9, 1, 1, 1],
[9, 2, 2, 2, 2, 2],
[3, 3, 3, 9, 3, 3],
[1, 2, 2, 9, 1, 4],
[1, 2, 2, 9, 1, 4],
[1, 2, 2, 9, 1, 4]
])
arr = np.array([1, 2, 3, 4, 5, 6, 7, 8])
percentage = Kruskals.Kruskals(ndarr, arr).driver_score(directional=True, percentage=True)
assert (np.round(percentage, decimals=4) == [-18.7523, -13.8413, -15.4078, 21.5111, -23.4954, 6.9921]).all()
def test_dependent_variable_can_be_nan():
ndarr = np.array([
[10, 2, 3, 4, 5, 6],
[6, 5, 4, 3, 8, 1],
[1, 1, 9, 1, 1, 1],
[9, 2, 2, 2, 2, 2],
[3, 3, 3, 9, 3, 3],
[1, 2, 2, 9, 1, 4],
[1, 2, 2, 9, 1, 4],
[1, 2, 2, 9, 1, 4]
])
arr = np.array([1, 2, 3, 4, np.nan, 6, 7, 8])
percentage = Kruskals.Kruskals(ndarr, arr).driver_score(directional=True, percentage=True)
assert (np.round(percentage, decimals=4) == [-17.2805, -13.5913, -14.5028, 23.0658, -22.5377, 9.0218]).all()
def test_independent_1_col():
ndarr = np.array([
[10],
[6],
[1],
[9],
[3],
[1],
[1],
[1],
])
arr = np.array([1, 2, 3, 4, 5, 6, 7, 8])
percentage = Kruskals.Kruskals(ndarr, arr).driver_score(directional=True, percentage=True)
assert (np.isnan(np.round(percentage, decimals=4))).all()
def test_independent_2_col():
ndarr = np.array([
[10, 2],
[6, 5],
[1, 1],
[9, 2],
[3, 3],
[1, 2],
[1, 2],
[1, 2]
])
arr = np.array([1, 2, 3, 4, 5, 6, 7, 8])
percentage = Kruskals.Kruskals(ndarr, arr).driver_score(directional=True, percentage=True)
assert (np.isnan(np.round(percentage, decimals=4))).all()
| mit |
mxOBS/deb-pkg_trusty_chromium-browser | chrome/test/nacl_test_injection/buildbot_nacl_integration.py | 94 | 3083 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
# This environment variable check mimics what
# buildbot_chrome_nacl_stage.py does.
is_win64 = (sys.platform in ('win32', 'cygwin') and
('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')))
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# http://code.google.com/p/nativeclient/issues/detail?id=2511
tests_to_disable.append('run_ppapi_ppb_image_data_browser_test')
if sys.platform == 'darwin':
# TODO(mseaborn) fix
# http://code.google.com/p/nativeclient/issues/detail?id=1835
tests_to_disable.append('run_ppapi_crash_browser_test')
if sys.platform in ('win32', 'cygwin'):
# This one is only failing for nacl_glibc on x64 Windows but it is not
# clear how to disable only that limited case.
# See http://crbug.com/132395
tests_to_disable.append('run_inbrowser_test_runner')
# run_breakpad_browser_process_crash_test is flaky.
# See http://crbug.com/317890
tests_to_disable.append('run_breakpad_browser_process_crash_test')
# See http://crbug.com/332301
tests_to_disable.append('run_breakpad_crash_in_syscall_test')
# It appears that crash_service.exe is not being reliably built by
# default in the CQ. See: http://crbug.com/380880
tests_to_disable.append('run_breakpad_untrusted_crash_test')
tests_to_disable.append('run_breakpad_trusted_crash_in_startup_test')
script_dir = os.path.dirname(os.path.abspath(__file__))
nacl_integration_script = os.path.join(script_dir,
'buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
# TODO(ncbray) re-enable.
# https://code.google.com/p/chromium/issues/detail?id=133568
'--disable_glibc',
'--disable_tests=%s' % ','.join(tests_to_disable)]
cmd += args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
dpinney/omf | omf/scratch/GRIP/grip.py | 1 | 37464 | ''' Web server exposing HTTP API for GRIP. '''
import os, traceback, tempfile, platform, zipfile, subprocess, time, shutil, sys, datetime, numbers
from functools import wraps
from multiprocessing import Process
import matplotlib.pyplot as plt
from flask import Flask, request, send_from_directory, make_response, json, abort, redirect, url_for, jsonify
import omf
from omf import distNetViz, feeder, milToGridlab, network
from omf import cymeToGridlab as cymeToGridlab_
from omf.models import transmission, resilientDist
from omf.solvers import gridlabd, nrelsam2013
from omf.scratch.GRIP import grip_config
app = Flask(__name__)
app.config.from_object(grip_config.Config)
# Change the dictionary values to change the output file names. Don't change the keys unless you also update the rest of the dictionary references in
# the code
filenames = {
'ongl': 'onelinegridlab.png',
'msgl': 'milsofttogridlab.glm',
'cygl': 'cymetogridlab.glm',
'glrun': 'gridlabrun.json',
'glgfm': 'gridlabtogfm.gfm',
'rungfm': 'rungfm.txt',
'samrun': 'samrun.json',
'tmomt': 'transmissionmattoomt.json',
'tmpf': 'transmissionpowerflow.zip',
'tv': 'network-viewer.html',
'dv': 'distnetviz-viewer.html',
'gfl': 'glmforcelayout.glm'
}
def _get_abs_path(path):
'''Return the absolute variant of the path argument.'''
if not os.path.isabs(path):
path = "/" + path
return path
def _get_rel_path(path):
'''Return the relative variant of the path argument.'''
return path.lstrip("/")
def _get_elapsed_time(start, end):
'''TODO'''
elapsed_time = int(end - start)
return '{:02}:{:02}:{:02}'.format(elapsed_time // 3600, ((elapsed_time % 3600) // 60), elapsed_time % 60)
def _get_created_at(temp_dir):
'''TODO'''
start_time_path = os.path.join(temp_dir, 'start-time.txt')
created_at = os.path.getmtime(start_time_path)
return created_at
def _get_timestamp(unix_time):
'''Return an ISO 8601 timestamp string that is equivalent to a Unix time integer'''
return datetime.datetime.utcfromtimestamp(unix_time).isoformat().rsplit('.')[0] + 'Z'
def _get_failure_time(temp_dir):
'''TODO'''
error_path = os.path.join(temp_dir, 'error.txt')
if os.path.isfile(error_path):
stopped_at = os.path.getmtime(error_path)
with open(error_path, 'r') as f:
msg = f.readlines()
if app.config["DELETE_FAILED_JOBS"]:
shutil.rmtree(temp_dir)
return (stopped_at, msg)
return (None, None)
def _validate_input(input_metadata):
'''
Validate the incoming request input, given the input_metadata dictionary that specifies:
- The name of the form parameter
- The Python type of the form parameter
- Whether or not the form parameter is required
- The permitted range of values of the form parameter
'''
name = input_metadata['name']
input_type = input_metadata['type']
input_ = request.files.get(name) if input_type == 'file' else request.form.get(name)
if input_ is None or (input_type == 'file' and input_.filename == ''):
if input_metadata['required']:
return ({ name: None }, "The parameter '{}' of type '{}' is required, but it was not submitted.".format(name, input_type))
return (None, None)
elif input_type != 'file':
try:
if input_type is bool:
if input_ != 'True' and input_ != 'False':
raise Exception
else:
input_ = input_type(input_)
except:
return ({name: input_}, "The parameter '{}' could not be converted into the required type '{}'.".format(name, input_type))
input_range = input_metadata.get('range')
if input_range is not None:
if input_type == str:
if input_ not in input_range:
return ({name: input_}, "The parameter '{}' was not one of the allowed values: '{}'.".format(name, input_range))
elif issubclass(input_type, numbers.Number):
min_ = input_range.get('min')
if min_ is not None and input_ < min_:
return ({name: input_}, "The parameter '{}' was less than the minimum bound of '{}'.".format(name, min_))
max_ = input_range.get('max')
if max_ is not None and input_ > max_:
return ({name: input_}, "The parameter '{}' was greater than the maximum bound of '{}'.".format(name, max_))
return (None, None)
def start_process(route_function=None, inputs_metadata=None, custom_validation_functions=None):
'''TODO'''
def decorator(process_function):
@wraps(process_function)
def wrapper(*args, **kwargs):
'''If the inputs were valid, start the process and return 202 and JSON, else return 4xx and JSON.'''
errors = []
if inputs_metadata:
for i in inputs_metadata:
src, msg = _validate_input(i)
if msg:
errors.append({
'http code': 400,
'source': src,
'title': 'Invalid Parameter Value',
'detail': msg
})
if len(errors) > 0:
r = jsonify(job={'state': 'failed'}, errors=errors)
r.status = '400'
return r
temp_dir = tempfile.mkdtemp()
if custom_validation_functions:
for func in custom_validation_functions:
src, msg = func(temp_dir)
if msg:
errors.append({
'http code': 422,
'source': src,
'title': 'Invalid Parameter Value Combination',
'detail': msg
})
if len(errors) > 0:
r = jsonify(job={'state': 'failed'}, errors=errors)
r.status = '422'
return r
start_time_path = os.path.join(temp_dir, 'start-time.txt')
with open(start_time_path, 'w'):
pass
created_at = _get_created_at(temp_dir)
url = process_function(temp_dir)
r = jsonify(job={
'state': 'in-progress',
'status': os.path.join(request.url_root, _get_rel_path(url)),
'created at': _get_timestamp(created_at),
'elapsed time': '00:00:00',
})
r.status = '202'
return r
return wrapper
if route_function is None:
return decorator
else:
return decorator(route_function)
def try_except(func):
@wraps(func)
def wrapper(*args, **kwargs):
'''Try-except the the function.'''
temp_dir = args[0]
try:
func(temp_dir)
except:
entries = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
with open(os.path.join(temp_dir, 'error.txt'), 'w') as f:
f.write(entries[-0])
f.write(entries[-2])
f.write(entries[-1])
return wrapper
def get_status(func):
@wraps(func)
def wrapper(*args, **kwargs):
'''Return JSON indicating the status of the background job if it exists, else 404.'''
temp_dir = _get_abs_path(kwargs["temp_dir"])
if not os.path.isdir(temp_dir):
abort(404)
created_at = _get_created_at(temp_dir)
failed_at, failure_msg = _get_failure_time(temp_dir)
completed_at, status_url, download_url = func(temp_dir)
status_url = os.path.join(request.url_root, _get_rel_path(status_url))
download_url = os.path.join(request.url_root, _get_rel_path(download_url))
response_data = {'job': {'created at': _get_timestamp(created_at)}}
if failed_at:
state = 'failed'
elapsed_time = _get_elapsed_time(created_at, failed_at)
response_data['job']['stopped at'] = _get_timestamp(failed_at)
response_data['errors'] = [{
'http code': 500,
'source': failure_msg,
'title': 'Job Failed',
'detail': 'The process handling the job raised an exception.'
}]
elif completed_at:
state = 'complete'
elapsed_time = _get_elapsed_time(created_at, completed_at)
response_data['job']['stopped at'] = _get_timestamp(completed_at)
response_data['job']['download'] = download_url
else:
state = 'in-progress'
elapsed_time = _get_elapsed_time(created_at, time.time())
response_data['job']['elapsed time'] = elapsed_time
if request.method == 'DELETE':
state = 'deleted'
if response_data['job'].get('stopped at') is None:
response_data['job']['stopped at'] = _get_timestamp(time.time())
del response_data['job']['download']
shutil.rmtree(temp_dir)
elif request.method == 'GET':
response_data['job']['status'] = status_url
response_data['job']['state'] = state
r = jsonify(response_data)
if failed_at:
r.status = '500'
return r
return wrapper
def get_download(func):
@wraps(func)
def wrapper(*args, **kwargs):
'''Return the requested resource if it exists, else 404.'''
temp_dir = _get_abs_path(kwargs["temp_dir"])
response = func(temp_dir)
if app.config["DELETE_SUCCESSFUL_JOBS"]:
shutil.rmtree(temp_dir)
return response
return wrapper
def _validate_oneLineGridlab(temp_dir):
'''TODO'''
glm_path = os.path.join(temp_dir, 'in.glm')
request.files['glm'].save(glm_path)
tree = feeder.parse(glm_path)
if not distNetViz.contains_valid_coordinates(tree) and request.form['useLatLons'] == 'True':
return (
{'useLatLons': 'True'},
("Since the submitted GLM contained no coordinates, or the coordinates could not be parsed as floats, "
"'useLatLons' must be 'False' because artificial coordinates must be used to draw the GLM.")
)
return (None, None)
@app.route("/oneLineGridlab", methods=["POST"])
@start_process(
inputs_metadata=(
{'name': 'useLatLons', 'required': True, 'type': bool},
{'name': 'glm', 'required': True, 'type': 'file'}
),
custom_validation_functions=(_validate_oneLineGridlab,)
)
def oneLineGridlab_start(temp_dir):
p = Process(target=oneLineGridlab, args=(temp_dir,))
p.start()
return url_for('oneLineGridlab_status', temp_dir=_get_rel_path(temp_dir))
@try_except
def oneLineGridlab(temp_dir):
'''
Create a one-line diagram of the input GLM and return a PNG of it.
Form parameters:
:param glm: a GLM file.
:param useLatLons: 'True' if the diagram should be drawn with coordinate values taken from within the GLM, 'False' if the diagram should be drawn
with artificial coordinates using Graphviz NEATO.
Details:
:OMF fuction: omf.feeder.latLonNxGraph().
:run-time: about 1 to 30 seconds.
'''
glm_path = os.path.join(temp_dir, 'in.glm')
feed = feeder.parse(glm_path)
graph = feeder.treeToNxGraph(feed)
neatoLayout = True if request.form.get('useLatLons') == 'False' else False
# Clear old plots.
plt.clf()
plt.close()
# Plot new plot.
feeder.latLonNxGraph(graph, labels=False, neatoLayout=neatoLayout, showPlot=False)
plt.savefig(os.path.join(temp_dir, filenames["ongl"]))
@app.route("/oneLineGridlab/<path:temp_dir>", methods=['GET', 'DELETE'])
@get_status
def oneLineGridlab_status(temp_dir):
ongl_path = os.path.join(temp_dir, filenames['ongl'])
temp_dir = _get_rel_path(temp_dir)
status_url = url_for('oneLineGridlab_status', temp_dir=temp_dir)
download_url = url_for("oneLineGridlab_download", temp_dir=temp_dir)
if os.path.isfile(ongl_path):
return (os.path.getmtime(ongl_path), status_url, download_url)
return (None, status_url, download_url)
@app.route("/oneLineGridlab/<path:temp_dir>/download")
@get_download
def oneLineGridlab_download(temp_dir):
return send_from_directory(temp_dir, filenames["ongl"])
@app.route('/milsoftToGridlab', methods=['POST'])
@start_process(
inputs_metadata=(
{'name': 'std', 'required': True, 'type': 'file'},
{'name': 'seq', 'required': True, 'type': 'file'}
)
)
def milsoftToGridlab_start(temp_dir):
p = Process(target=milsoftToGridlab, args=(temp_dir,))
p.start()
return url_for('milsoftToGridlab_status', temp_dir=_get_rel_path(temp_dir))
@try_except
def milsoftToGridlab(temp_dir):
'''
Convert a Milsoft Windmil ASCII export (.std & .seq) in to a GridLAB-D .glm and return the .glm.
Form parameters:
:param std: an STD file.
:param seq: an SEQ file.
Details:
:OMF function: omf.milToGridlab.convert().
:run-time: up to a few minutes
'''
stdPath = os.path.join(temp_dir, 'in.std')
request.files['std'].save(stdPath)
seqPath = os.path.join(temp_dir, 'in.seq')
request.files['seq'].save(seqPath)
with open(stdPath) as f:
stdFile = f.read()
with open(seqPath) as f:
seqFile = f.read()
tree = milToGridlab.convert(stdFile, seqFile, rescale=True)
# Remove '#include "schedules.glm' objects from the tree. Would be faster if this was incorported in sortedWrite() or something
tree = {k: v for k, v in tree.items() if v.get('omftype') != '#include'}
with open(os.path.join(temp_dir, filenames['msgl']), 'w') as outFile:
outFile.write(feeder.sortedWrite(tree))
@app.route('/milsoftToGridlab/<path:temp_dir>', methods=['GET', 'DELETE'])
@get_status
def milsoftToGridlab_status(temp_dir):
msgl_path = os.path.join(temp_dir, filenames['msgl'])
temp_dir = _get_rel_path(temp_dir)
status_url = url_for('milsoftToGridlab_status', temp_dir=temp_dir)
download_url = url_for('milsoftToGridlab_download', temp_dir=temp_dir)
if os.path.isfile(msgl_path):
return (os.path.getmtime(msgl_path), status_url, download_url)
return (None, status_url, download_url)
@app.route("/milsoftToGridlab/<path:temp_dir>/download")
@get_download
def milsoftToGridlab_download(temp_dir):
return send_from_directory(temp_dir, filenames["msgl"], mimetype="text/plain")
@app.route("/cymeToGridlab", methods=["POST"])
@start_process(inputs_metadata=({'name': 'mdb', 'required': True, 'type': 'file'},))
def cymeToGridlab_start(temp_dir):
p = Process(target=cymeToGridlab, args=(temp_dir,))
p.start()
return url_for('cymeToGridlab_status', temp_dir=_get_rel_path(temp_dir))
@try_except
def cymeToGridlab(temp_dir):
'''
Convert an Eaton Cymdist .mdb export in to a GridLAB-D .glm and return the .glm.
Form parameters:
:param mdb: a MDB file.
Details:
:OMF function: omf.cymeToGridlab.convertCymeModel().
:run-time: up to a few minutes.
'''
mdbPath = os.path.join(temp_dir, "in.mdb")
request.files["mdb"].save(mdbPath)
import locale
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
tree = cymeToGridlab_.convertCymeModel(mdbPath, temp_dir)
# Remove '#include "schedules.glm' objects from the tree. Would be faster if this was incorported in sortedWrite() or something
tree = {k: v for k, v in tree.items() if v.get('omftype') != '#include'}
with open(os.path.join(temp_dir, filenames["cygl"]), 'w') as outFile:
outFile.write(feeder.sortedWrite(tree))
@app.route("/cymeToGridlab/<path:temp_dir>", methods=['GET', 'DELETE'])
@get_status
def cymeToGridlab_status(temp_dir):
cygl_path = os.path.join(temp_dir, filenames['cygl'])
temp_dir = _get_rel_path(temp_dir)
status_url = url_for('cymeToGridlab_status', temp_dir=temp_dir)
download_url = url_for("cymeToGridlab_download", temp_dir=temp_dir)
if os.path.isfile(cygl_path):
return (os.path.getmtime(cygl_path), status_url, download_url)
return (None, status_url, download_url)
@app.route("/cymeToGridlab/<path:temp_dir>/download")
@get_download
def cymeToGridlab_download(temp_dir):
return send_from_directory(temp_dir, filenames["cygl"], mimetype="text/plain")
@app.route("/gridlabRun", methods=["POST"])
@start_process(inputs_metadata=({'name': 'glm', 'required': True, 'type': 'file'},))
def gridlabRun_start(temp_dir):
p = Process(target=gridlabRun, args=(temp_dir,))
p.start()
return url_for('gridlabRun_status', temp_dir=_get_rel_path(temp_dir))
@try_except
def gridlabRun(temp_dir):
'''
Run a .glm through GridLAB-D and return the results as JSON.
Form parameters:
:param glm: a GLM file.
Details:
:OMF fuction: omf.solvers.gridlabd.runInFileSystem().
:run-time: up to a few hours.
TODO: think about attachment support.
'''
fName = 'in.glm'
f = request.files['glm']
glmOnDisk = os.path.join(temp_dir, fName)
f.save(glmOnDisk)
feed = feeder.parse(glmOnDisk)
outDict = gridlabd.runInFilesystem(feed, attachments=[], keepFiles=True, workDir=temp_dir, glmName='out.glm')
with open(os.path.join(temp_dir, filenames["glrun"]), 'w') as f:
json.dump(outDict, f)
@app.route("/gridlabRun/<path:temp_dir>", methods=['GET', 'DELETE'])
@get_status
def gridlabRun_status(temp_dir):
glrun_path = os.path.join(temp_dir, filenames['glrun'])
temp_dir = _get_rel_path(temp_dir)
status_url = url_for('gridlabRun_status', temp_dir=temp_dir)
download_url = url_for('gridlabRun_download', temp_dir=temp_dir)
if os.path.isfile(glrun_path):
return (os.path.getmtime(glrun_path), status_url, download_url)
return (None, status_url, download_url)
@app.route("/gridlabRun/<path:temp_dir>/download")
@get_download
def gridlabRun_download(temp_dir):
return send_from_directory(temp_dir, filenames["glrun"], mimetype="application/json")
@app.route('/gridlabdToGfm', methods=['POST'])
@start_process(
inputs_metadata=(
{'name': 'glm', 'required': True, 'type': 'file'},
{'name': 'phase_variation', 'required': False, 'type': float, 'range': {'min': 0, 'max': 1}},
{'name': 'chance_constraint', 'required': False, 'type': float, 'range': {'min': 0, 'max': 1}},
{'name': 'critical_load_met', 'required': False, 'type': float, 'range': {'min': 0, 'max': 1}},
{'name': 'total_load_met', 'required': False, 'type': float, 'range': {'min': 0, 'max': 1}},
{'name': 'maxDGPerGenerator', 'required': False, 'type': float},
{'name': 'dgUnitCost', 'required': False, 'type': float},
{'name': 'generatorCandidates', 'required': False, 'type': str},
{'name': 'criticalLoads', 'required': False, 'type': str},
)
)
def gridlabdToGfm_start(temp_dir):
p = Process(target=gridlabdToGfm, args=(temp_dir,))
p.start()
return url_for("gridlabdToGfm_status", temp_dir=_get_rel_path(temp_dir))
@try_except
def gridlabdToGfm(temp_dir):
'''
Convert a GridLAB-D model (i.e. .glm file) into a LANL ANSI General Fragility Model and return the GFM model as JSON. Note that this is not the
main fragility model for GRIP.
Form parameters:
:param glm: a GLM file.
:param phase_variation: maximum phase unbalance allowed in the optimization model.
:param chance_constraint: indicates the percent of damage scenarios where load constraints above must be met.
:param critical_load_met: indicates the percent of critical load that must be met in each damage scenario.
:param total_load_met: indicates the percent of non-critical load that must be met in each damage scenario.
:param maxDGPerGenerator: the maximum DG capacity that a generator supports in MW.
:param dgUnitCost: the cost of adding distributed generation to a load in $/MW.
:param generatorCandidates: the IDs of nodes on the system where the user wants to consider adding distributed generation. At least one node is
required.
:type generatorCandidates: one long string delimited with commas.
:param criticalLoads: the IDs of loads on the system that the user declares to be critical (must-run).
:type criticalLoads: one long string delimited with commas.
Details:
:OMF function: omf.models.resilientDist.convertToGFM().
:run-time: a few seconds.
'''
fName = 'in.glm'
f = request.files['glm']
glmPath = os.path.join(temp_dir, fName)
f.save(glmPath)
gfmInputTemplate = {
'phase_variation': float(request.form.get('phase_variation', 0.15)),
'chance_constraint': float(request.form.get('chance_constraint', 1)),
'critical_load_met': float(request.form.get('critical_load_met', .98)),
'total_load_met': float(request.form.get('total_load_met', .9)),
'maxDGPerGenerator': float(request.form.get('max_dg_per_generator', 1)),
'dgUnitCost': float(request.form.get('dg_unit_cost', 1000000)),
'generatorCandidates': request.form.get('generator_candidates', ''),
'criticalLoads': request.form.get('critical_loads', '')
}
feederModel = {
'nodes': [], # Don't need these.
'tree': feeder.parse(glmPath)
}
gfmDict = resilientDist.convertToGFM(gfmInputTemplate, feederModel)
with open(os.path.join(temp_dir, filenames["glgfm"]), 'w') as f:
json.dump(gfmDict, f)
@app.route("/gridlabdToGfm/<path:temp_dir>", methods=['GET', 'DELETE'])
@get_status
def gridlabdToGfm_status(temp_dir):
glgfm_path = os.path.join(temp_dir, filenames['glgfm'])
temp_dir = _get_rel_path(temp_dir)
status_url = url_for('gridlabdToGfm_status', temp_dir=temp_dir)
download_url = url_for('gridlabdToGfm_download', temp_dir=temp_dir)
if os.path.isfile(glgfm_path):
return (os.path.getmtime(glgfm_path), status_url, download_url)
return (None, status_url, download_url)
@app.route("/gridlabdToGfm/<path:temp_dir>/download")
@get_download
def gridlabdToGfm_download(temp_dir):
return send_from_directory(temp_dir, filenames["glgfm"], mimetype="application/json")
@app.route('/runGfm', methods=['POST'])
@start_process(
inputs_metadata=(
{'name': 'gfm', 'required': True, 'type': 'file'},
{'name': 'asc', 'required': True, 'type': 'file'}
)
)
def runGfm_start(temp_dir):
p = Process(target=runGfm, args=(temp_dir,))
p.start()
return url_for('runGfm_status', temp_dir=_get_rel_path(temp_dir))
@try_except
def runGfm(temp_dir):
'''
Calculate distribution damage using a LANL ANSI General Fragility Model file (i.e. a .gfm) along with a hazard field file (i.e. a .asc file) and
return the results as JSON. Note that this is not the main fragility model for GRIP.
Form parameters:
:param gfm: a GFM file.
:param asc: an ASC file.
Details:
:OMF function: omf.solvers.gfm.run().
:run-time: should be around 1 to 30 seconds.
'''
gfm_name = "gfm.json"
gfm_path = os.path.join(temp_dir, gfm_name)
request.files["gfm"].save(gfm_path)
hazard_name = "hazard.asc"
hazard_path = os.path.join(temp_dir, hazard_name)
request.files["asc"].save(hazard_path)
# Run GFM
gfmBinaryPath = os.path.join(omf.omfDir, "solvers/gfm/Fragility.jar")
if platform.system() == 'Darwin':
#HACK: force use of Java8 on MacOS.
#javaCmd = '/Library/Java/JavaVirtualMachines/jdk1.8.0_181.jdk/Contents/Home/bin/java'
#HACK HACK: use my version of Java 8 for now
javaCmd = "/Library/Java/JavaVirtualMachines/jdk1.8.0_202.jdk/Contents/Home/bin/java"
else:
javaCmd = 'java'
outName = 'gfm_out.json'
proc = subprocess.Popen(
[javaCmd,'-jar', gfmBinaryPath, '-r', gfm_name, '-wf', hazard_name, '-num', '3', '-ro', outName],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
cwd = temp_dir
)
(stdout,stderr) = proc.communicate()
gfmOutPath = os.path.join(temp_dir, outName)
try:
with open(gfmOutPath) as f:
out = json.load(f).decode()
except:
out = stdout.decode()
with open(os.path.join(temp_dir, filenames["rungfm"]), 'w') as f:
f.write(out)
@app.route("/runGfm/<path:temp_dir>", methods=['GET', 'DELETE'])
@get_status
def runGfm_status(temp_dir):
rungfm_path = os.path.join(temp_dir, filenames['rungfm'])
temp_dir = _get_rel_path(temp_dir)
status_url = url_for('runGfm_status', temp_dir=temp_dir)
download_url = url_for('runGfm_download', temp_dir=temp_dir)
if os.path.isfile(rungfm_path):
return (os.path.getmtime(rungfm_path), status_url, download_url)
return (None, status_url, download_url)
@app.route("/runGfm/<path:temp_dir>/download")
@get_download
def runGfm_download(temp_dir):
return send_from_directory(temp_dir, filenames["rungfm"])
@app.route('/samRun', methods=['POST'])
@start_process(
inputs_metadata=(
{'name': 'tmy2', 'required': True, 'type': 'file'},
{'name': 'system_size', 'required': False, 'type': int},
{'name': 'derate', 'required': False, 'type': float, 'range': {'min': 0, 'max': 1}},
{'name': 'track_mode', 'required': False, 'type': int, 'range': {'min': 0, 'max': 3}},
{'name': 'azimuth', 'required': False, 'type': float, 'range': {'min': 0, 'max': 360}},
{'name': 'tilt', 'required': False, 'type': float, 'range': {'min': 0, 'max': 90}},
)
)
def samRun_start(temp_dir):
p = Process(target=samRun, args=(temp_dir,))
p.start()
return url_for("samRun_status", temp_dir=_get_rel_path(temp_dir))
@try_except
def samRun(temp_dir):
'''
Run NREL's System Advisor Model with the specified parameters and return output vectors and floats in JSON.
Form parameters:
:param tmy2: a tmy2 file.
:param system_size: nameplate capacity.
:param derate: system derate value.
:param track_mode: tracking mode.
:param azimuth: azimuth angle.
:param tilt: tilt angle.
There are 40 other additional optional input parameters to the pvwattsv1 module of S.A.M.
Details:
:OMF function: omf.solvers.sam.run().
:run-time: should only be a couple of seconds.
See pvwattsv1-variable-info.txt for details on other 40 possible inputs to this route.
'''
tmy2_path = os.path.join(temp_dir, "in.tmy2")
request.files["tmy2"].save(tmy2_path)
# Set up SAM data structures.
ssc = nrelsam2013.SSCAPI()
dat = ssc.ssc_data_create()
# Set the inputs.
ssc.ssc_data_set_string(dat, b'file_name', bytes(tmy2_path, 'ascii'))
for key in request.form.keys():
ssc.ssc_data_set_number(dat, bytes(key, 'ascii'), float(request.form.get(key)))
# Enter required parameters
system_size = int(request.form.get('system_size', 4))
ssc.ssc_data_set_number(dat, b'system_size', system_size)
derate = float(request.form.get('derate', .77))
ssc.ssc_data_set_number(dat, b'derate', derate)
track_mode = int(request.form.get('track_mode', 0))
ssc.ssc_data_set_number(dat, b'track_mode', track_mode)
azimuth = float(request.form.get('azimuth', 180))
ssc.ssc_data_set_number(dat, b'azimuth', azimuth)
tilt = float(request.form.get('tilt', 30))
ssc.ssc_data_set_number(dat, b'tilt', tilt)
# Run PV system simulation.
mod = ssc.ssc_module_create(b'pvwattsv1')
ssc.ssc_module_exec(mod, dat)
# Geodata output.
outData = {}
outData['city'] = ssc.ssc_data_get_string(dat, b'city').decode()
outData['state'] = ssc.ssc_data_get_string(dat, b'state').decode()
outData['lat'] = ssc.ssc_data_get_number(dat, b'lat')
outData['lon'] = ssc.ssc_data_get_number(dat, b'lon')
outData['elev'] = ssc.ssc_data_get_number(dat, b'elev')
# Weather output.
outData['climate'] = {}
outData['climate']['Plane of Array Irradiance (W/m^2)'] = ssc.ssc_data_get_array(dat, b'poa')
outData['climate']['Beam Normal Irradiance (W/m^2)'] = ssc.ssc_data_get_array(dat, b'dn')
outData['climate']['Diffuse Irradiance (W/m^2)'] = ssc.ssc_data_get_array(dat, b'df')
outData['climate']['Ambient Temperature (F)'] = ssc.ssc_data_get_array(dat, b'tamb')
outData['climate']['Cell Temperature (F)'] = ssc.ssc_data_get_array(dat, b'tcell')
outData['climate']['Wind Speed (m/s)'] = ssc.ssc_data_get_array(dat, b'wspd')
# Power generation.
outData['Consumption'] = {}
outData['Consumption']['Power'] = ssc.ssc_data_get_array(dat, b'ac')
outData['Consumption']['Losses'] = ssc.ssc_data_get_array(dat, b'ac')
outData['Consumption']['DG'] = ssc.ssc_data_get_array(dat, b'ac')
with open(os.path.join(temp_dir, filenames['samrun']), 'w') as f:
json.dump(outData, f)
@app.route("/samRun/<path:temp_dir>", methods=['GET', 'DELETE'])
@get_status
def samRun_status(temp_dir):
samrun_path = os.path.join(temp_dir, filenames['samrun'])
temp_dir = _get_rel_path(temp_dir)
status_url = url_for('samRun_status', temp_dir=temp_dir)
download_url = url_for('samRun_download', temp_dir=temp_dir)
if os.path.isfile(samrun_path):
return (os.path.getmtime(samrun_path), status_url, download_url)
return (None, status_url, download_url)
@app.route("/samRun/<path:temp_dir>/download")
@get_download
def samRun_download(temp_dir):
return send_from_directory(temp_dir, filenames["samrun"])
@app.route('/transmissionMatToOmt', methods=['POST'])
@start_process(inputs_metadata=({'name': 'matpower', 'required': True, 'type': 'file'},))
def transmissionMatToOmt_start(temp_dir):
p = Process(target=transmissionMatToOmt, args=(temp_dir,))
p.start()
return url_for('transmissionMatToOmt_status', temp_dir=_get_rel_path(temp_dir))
@try_except
def transmissionMatToOmt(temp_dir):
'''
Convert a MATPOWER .mat or .m input into a JSON .omt transmission circuit format and return the .omt.
Form parameters:
:param matpower: a MATPOWER .mat file.
Details:
:OMF function: omf.network.parse()
:run-time: maybe a couple minutes.
'''
mat_path = os.path.join(temp_dir, "input.mat")
request.files["matpower"].save(mat_path)
omt_json = network.parse(mat_path, filePath=True)
if omt_json == {"baseMVA":"100.0","mpcVersion":"2.0","bus":{},"gen":{}, "branch":{}}:
raise Exception("The submitted .m file was invalid or could not be parsed correctly.")
nxG = network.netToNxGraph(omt_json)
omt_json = network.latlonToNet(nxG, omt_json)
with open(os.path.join(temp_dir, filenames["tmomt"]), 'w') as f:
json.dump(omt_json, f)
@app.route("/transmissionMatToOmt/<path:temp_dir>", methods=['GET', 'DELETE'])
@get_status
def transmissionMatToOmt_status(temp_dir):
tmomt_path = os.path.join(temp_dir, filenames['tmomt'])
temp_dir = _get_rel_path(temp_dir)
status_url = url_for('transmissionMatToOmt_status', temp_dir=temp_dir)
download_url = url_for('transmissionMatToOmt_download', temp_dir=temp_dir)
if os.path.isfile(tmomt_path):
return (os.path.getmtime(tmomt_path), status_url, download_url)
return (None, status_url, download_url)
@app.route("/transmissionMatToOmt/<path:temp_dir>/download")
@get_download
def transmissionMatToOmt_download(temp_dir):
return send_from_directory(temp_dir, filenames["tmomt"])
@app.route('/transmissionPowerflow', methods=['POST'])
@start_process(
inputs_metadata=(
{'name': 'algorithm', 'required': False, 'type': str, 'range': ('NR', 'FDXB', 'FDBX', 'GS')},
{'name': 'model', 'required': False, 'type': str, 'range': ('AC', 'DC')},
{'name': 'iteration', 'required': False, 'type': int, 'range': {'min': 1}},
{'name': 'tolerance', 'required': False, 'type': float},
{'name': 'genLimits', 'required': False, 'type': int, 'range': {'min': 0, 'max': 2}}
)
)
def transmissionPowerflow_start(temp_dir):
p = Process(target=transmissionPowerflow, args=(temp_dir,))
p.start()
return url_for("transmissionPowerflow_status", temp_dir=_get_rel_path(temp_dir))
@try_except
def transmissionPowerflow(temp_dir):
'''
Run ACOPF for a .omt transmission circuit.
Form parameters:
:param omt: an OMT file.
:param algorithm: powerflow solution method. 'NR' = Newton's method, 'FDXB' = Fast-Decoupled (XB version), 'FDBX' = Fast-Decouple (BX version),
'GS' = Gauss-Seidel.
:param model: AC vs. DC modeling for power flow and OPF formulation.
:param iteration: maximum number of iterations allowed in the attempt to find a powerflow solution.
:param tolerance: termination tolerance on per unit P and Q dispatch.
:param genLimits: enforce gen reactive power limits at expense of Vm. 0 = do not enforce limits, 1 = enforce limits, simultaneous bus type
conversion, 2 = enforce limits, one-at-a-time bus type conversion.
Details:
:OMF function: omf.models.transmission.new() and omf.models.transmission.work().
:run-time: tens of seconds.
'''
algorithm = request.form.get('algorithm', 'NR')
model = request.form.get('model', 'AC')
iteration = int(request.form.get('iteration', 10))
tolerance = float(request.form.get('tolerance', 10**-8))
genLimits = int(request.form.get('genLimits', 0))
inputDict = {
'algorithm': algorithm,
'model': model,
'tolerance': tolerance,
'iteration': iteration,
'genLimits': genLimits
}
model_dir = os.path.join(temp_dir, "transmission")
if transmission.new(model_dir):
omt_path = os.path.join(model_dir, "case9.omt")
request.files["omt"].save(omt_path)
with open(os.path.join(model_dir, "allInputData.json")) as f:
defaults = json.load(f)
merged = {key: inputDict.get(key) if inputDict.get(key) is not None else defaults[key] for key in defaults}
with open(os.path.join(model_dir, "allInputData.json"), 'w') as f:
json.dump(merged, f)
outputDict = transmission.work(model_dir, merged)
with open(os.path.join(model_dir, "allOutputData.json"), 'w') as f:
json.dump(outputDict, f)
with zipfile.ZipFile(os.path.join(model_dir, filenames["tmpf"]), 'w', zipfile.ZIP_DEFLATED) as z:
z.write(os.path.join(model_dir, "output.png"), "output.png")
z.write(os.path.join(model_dir, "allOutputData.json"), "allOutputData.json")
else:
raise Exception("Couldn't create model directory")
@app.route("/transmissionPowerflow/<path:temp_dir>", methods=['GET', 'DELETE'])
@get_status
def transmissionPowerflow_status(temp_dir):
tmpf_path = os.path.join(temp_dir, 'transmission', filenames['tmpf'])
temp_dir = _get_rel_path(temp_dir)
status_url = url_for('transmissionPowerflow_status', temp_dir=temp_dir)
download_url = url_for('transmissionPowerflow_download', temp_dir=temp_dir)
if os.path.isfile(tmpf_path):
return (os.path.getmtime(tmpf_path), status_url, download_url)
return (None, status_url, download_url)
@app.route("/transmissionPowerflow/<path:temp_dir>/download")
@get_download
def transmissionPowerflow_download(temp_dir):
model_dir = os.path.join(temp_dir, "transmission")
return send_from_directory(model_dir, filenames["tmpf"], as_attachment=True)
@app.route('/transmissionViz', methods=['POST'])
@start_process(inputs_metadata=({'name': 'omt', 'required': True, 'type': 'file'},))
def transmissionViz_start(temp_dir):
p = Process(target=transmissionViz, args=(temp_dir,))
p.start()
return url_for("transmissionViz_status", temp_dir=_get_rel_path(temp_dir))
@try_except
def transmissionViz(temp_dir):
'''
Generate an interactive and editable one line diagram of a transmission network and return it as an HTML file.
Form parameters:
:param omt: an .omt file.
Details:
:OMF function: omf.network.viz().
:run-time: a couple seconds.
'''
omt_path = os.path.join(temp_dir, "in.omt")
request.files["omt"].save(omt_path)
try:
with open(omt_path) as f:
json.load(f)
except:
raise Exception("Could not parse the omt file as json")
network.viz(omt_path, output_path=temp_dir, output_name=filenames["tv"], open_file=False)
@app.route("/transmissionViz/<path:temp_dir>", methods=['GET', 'DELETE'])
@get_status
def transmissionViz_status(temp_dir):
tv_path = os.path.join(temp_dir, filenames['tv'])
temp_dir = _get_rel_path(temp_dir)
status_url = url_for('transmissionViz_status', temp_dir=temp_dir)
download_url = url_for("transmissionViz_download", temp_dir=temp_dir)
if os.path.isfile(tv_path):
return (os.path.getmtime(tv_path), status_url, download_url)
return (None, status_url, download_url)
@app.route("/transmissionViz/<path:temp_dir>/download")
@get_download
def transmissionViz_download(temp_dir):
return send_from_directory(temp_dir, filenames["tv"])
@app.route("/distributionViz", methods=["POST"])
@start_process(inputs_metadata=({'name': 'omd', 'required': True, 'type': 'file'},))
def distributionViz_start(temp_dir):
p = Process(target=distributionViz, args=(temp_dir,))
p.start()
return url_for("distributionViz_status", temp_dir=_get_rel_path(temp_dir))
@try_except
def distributionViz(temp_dir):
'''
Generate an interactive and editable one line diagram of a distribution network and return it as an HTML file.
Form parameters:
:param omd: a .omd file.
Details:
:OMF function: omf.distNetViz.viz().
:run-time: a few seconds.
'''
omd_path = os.path.join(temp_dir, "in.omd")
request.files["omd"].save(omd_path)
try:
with open(omd_path) as f:
json.load(f)
except:
raise Exception("Could not parse omd file as json")
distNetViz.viz(omd_path, outputPath=temp_dir, outputName=filenames["dv"], open_file=False)
@app.route("/distributionViz/<path:temp_dir>", methods=['GET', 'DELETE'])
@get_status
def distributionViz_status(temp_dir):
dv_path = os.path.join(temp_dir, filenames['dv'])
temp_dir = _get_rel_path(temp_dir)
status_url = url_for('distributionViz_status', temp_dir=temp_dir)
download_url = url_for("distributionViz_download", temp_dir=temp_dir)
if os.path.isfile(dv_path):
return (os.path.getmtime(dv_path), status_url, download_url)
return (None, status_url, download_url)
@app.route("/distributionViz/<path:temp_dir>/download")
@get_download
def distributionViz_download(temp_dir):
return send_from_directory(temp_dir, filenames["dv"])
@app.route('/glmForceLayout', methods=['POST'])
@start_process(inputs_metadata=({'name': 'glm', 'required': True, 'type': 'file'},))
def glmForceLayout_start(temp_dir):
p = Process(target=glmForceLayout, args=(temp_dir,))
p.start()
return url_for('glmForceLayout_status', temp_dir=_get_rel_path(temp_dir))
@try_except
def glmForceLayout(temp_dir):
'''
Inject artifical coordinates into a GridLAB-D .glm and return the .glm.
Form parameters:
:param glm: a GLM file
Details:
:OMF function: omf.distNetViz.insert_coordinates()
:run-time: a few seconds
'''
glm_path = os.path.join(temp_dir, 'in.glm')
glm_file = request.files['glm']
glm_file.save(glm_path)
tree = feeder.parse(glm_path)
distNetViz.insert_coordinates(tree)
with open(os.path.join(temp_dir, filenames['gfl']), 'w') as f:
f.write(feeder.sortedWrite(tree))
@app.route('/glmForceLayout/<path:temp_dir>', methods=['GET', 'DELETE'])
@get_status
def glmForceLayout_status(temp_dir):
glm_path = os.path.join(temp_dir, filenames['gfl'])
temp_dir = _get_rel_path(temp_dir)
status_url = url_for('glmForceLayout_status', temp_dir=temp_dir)
download_url = url_for('glmForceLayout_download', temp_dir=temp_dir)
if os.path.isfile(glm_path):
return (os.path.getmtime(glm_path), status_url, download_url)
return (None, status_url, download_url)
@app.route('/glmForceLayout/<path:temp_dir>/download')
@get_download
def glmForceLayout_download(temp_dir):
return send_from_directory(temp_dir, filenames['gfl'], mimetype='text/plain')
def serve_production():
'''
- Make sure to run this file with the -m (module) flag
- One way to kill gunicorn is with $ ps -ef | awk '/gunicorn/ {print $2}' | xargs kill
'''
os.chdir(os.path.dirname(__file__))
subprocess.call(["gunicorn", "-w", "4", "-b", "0.0.0.0:5100", "--preload", "-k sync", "grip:app"])
def serve_development():
'''gevent does NOT work with multiprocessing. Don't use gevent or gunicorn with gevent in this version of the API.'''
app.run(debug=False, port=5100)
if __name__ == '__main__':
serve_production()
| gpl-2.0 |
billy-inn/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 248 | 6359 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
akrherz/iem | scripts/hads/compute_hads_pday.py | 1 | 3548 | """Attempt at totalling up DCP data
Run from `RUN_12Z.sh` for previous day
Run from `RUN_20_AFTER.sh` for current day
"""
import datetime
import sys
import pytz
import numpy as np
from pandas.io.sql import read_sql
from pyiem.util import get_dbconn, utc, logger
LOG = logger()
def workflow(date):
"""Do the necessary work for this date"""
pgconn = get_dbconn("hads", user="nobody")
iem_pgconn = get_dbconn("iem")
icursor = iem_pgconn.cursor()
# load up the current obs
df = read_sql(
f"""
WITH dcp as (
SELECT id, iemid, tzname from stations where network ~* 'DCP'
and tzname is not null
), obs as (
SELECT iemid, pday from summary_{date.year}
WHERE day = %s)
SELECT d.id, d.iemid, d.tzname, coalesce(o.pday, 0) as pday from
dcp d LEFT JOIN obs o on (d.iemid = o.iemid)
""",
iem_pgconn,
params=(date,),
index_col="id",
)
bases = {}
ts = utc(date.year, date.month, date.day, 12)
for tzname in df["tzname"].unique():
base = ts.astimezone(pytz.timezone(tzname))
bases[tzname] = base.replace(hour=0)
# retrieve data that is within 12 hours of our bounds
sts = datetime.datetime(
date.year, date.month, date.day
) - datetime.timedelta(hours=12)
ets = sts + datetime.timedelta(hours=48)
obsdf = read_sql(
f"""
SELECT distinct station, valid at time zone 'UTC' as utc_valid, value
from raw{date.year} WHERE valid between %s and %s and
substr(key, 1, 3) = 'PPH' and value >= 0
""",
pgconn,
params=(sts, ets),
index_col=None,
)
if obsdf.empty:
LOG.info("%s found no data", date)
return
obsdf["utc_valid"] = obsdf["utc_valid"].dt.tz_localize(pytz.UTC)
precip = np.zeros((24 * 60))
grouped = obsdf.groupby("station")
for station in obsdf["station"].unique():
if station not in df.index:
continue
precip[:] = 0
tz = df.loc[station, "tzname"]
current_pday = df.loc[station, "pday"]
for _, row in grouped.get_group(station).iterrows():
ts = row["utc_valid"].to_pydatetime()
if ts <= bases[tz]:
continue
t1 = (ts - bases[tz]).total_seconds() / 60.0
t0 = max([0, t1 - 60.0])
precip[int(t0) : int(t1)] = row["value"] / 60.0
pday = np.sum(precip)
if pday > 50 or np.allclose([pday], [current_pday]):
continue
iemid = int(df.loc[station, "iemid"])
icursor.execute(
f"UPDATE summary_{date.year} "
"SET pday = %s WHERE iemid = %s and day = %s",
(pday, iemid, date),
)
if icursor.rowcount == 0:
LOG.info("Adding record %s[%s] for day %s", station, iemid, date)
icursor.execute(
f"INSERT into summary_{date.year} "
"(iemid, day) VALUES (%s, %s)",
(iemid, date),
)
icursor.execute(
f"UPDATE summary_{date.year} "
"SET pday = %s WHERE iemid = %s and day = %s "
"and %s > coalesce(pday, 0)",
(pday, iemid, date, pday),
)
icursor.close()
iem_pgconn.commit()
def main(argv):
"""Do Something"""
if len(argv) == 4:
ts = datetime.date(int(argv[1]), int(argv[2]), int(argv[3]))
else:
ts = datetime.date.today()
workflow(ts)
if __name__ == "__main__":
main(sys.argv)
| mit |
saullocastro/pyNastran | pyNastran/op2/tables/oes_stressStrain/oes_nonlinear.py | 1 | 37310 | from __future__ import (nested_scopes, generators, division, absolute_import,
print_function, unicode_literals)
from six import iteritems
from six.moves import range
from itertools import cycle
from math import isnan
import numpy as np
from numpy import zeros, array_equal
from pyNastran.op2.tables.oes_stressStrain.real.oes_objects import StressObject, StrainObject, OES_Object
from pyNastran.f06.f06_formatting import write_floats_13e, _eigenvalue_header, write_float_13e
try:
import pandas as pd
except ImportError:
pass
class RealNonlinearRodArray(OES_Object): # 89-CRODNL, 92-CONRODNL
"""
::
ELEMENT-ID = 102
N O N L I N E A R S T R E S S E S I N R O D E L E M E N T S ( C R O D )
TIME AXIAL STRESS EQUIVALENT TOTAL STRAIN EFF. STRAIN EFF. CREEP LIN. TORSIONAL
STRESS PLASTIC/NLELAST STRAIN STRESS
2.000E-02 1.941367E+01 1.941367E+01 1.941367E-04 0.0 0.0 0.0
3.000E-02 1.941367E+01 1.941367E+01 1.941367E-04 0.0 0.0 0.0
"""
def __init__(self, data_code, is_sort1, isubcase, dt):
OES_Object.__init__(self, data_code, isubcase, apply_data_code=True)
#self.code = [self.format_code, self.sort_code, self.s_code]
self.nelements = 0 # result specific
def is_real(self):
return True
def is_complex(self):
return False
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def _get_msgs(self):
raise NotImplementedError()
def get_headers(self):
headers = ['axial_stress', 'equiv_stress', 'total_strain',
'effective_plastic_creep_strain', 'effective_creep_strain',
'linear_torsional_stress']
return headers
def build(self):
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
if self.is_built:
return
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, int):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
#[axial_stress, equiv_stress, total_strain, effective_plastic_creep_strain,
# effective_creep_strain, linear_torsional_stress]
self.data = zeros((self.ntimes, self.nelements, 6), dtype='float32')
def build_dataframe(self):
headers = self.get_headers()
if self.nonlinear_factor is not None:
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = pd.Panel(self.data, items=column_values, major_axis=self.element, minor_axis=headers).to_frame()
self.data_frame.columns.names = column_names
self.data_frame.index.names = ['ElementID', 'Item']
else:
df1 = pd.DataFrame(self.element).T
df1.columns = ['ElementID']
df2 = pd.DataFrame(self.data[0])
df2.columns = headers
self.data_frame = df1.join([df2])
#print(self.data_frame)
def __eq__(self, table):
self._eq_header(table)
assert self.is_sort1() == table.is_sort1()
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1():
for itime in range(ntimes):
for ieid, eid, in enumerate(self.element):
t1 = self.data[itime, inid, :]
t2 = table.data[itime, inid, :]
(axial_stress1, equiv_stress1, total_strain1, effective_plastic_creep_strain1, effective_creep_strain1, linear_torsional_stress1) = t1
(axial_stress2, equiv_stress2, total_strain2, effective_plastic_creep_strain2, effective_creep_strain2, linear_torsional_stress2) = t2
if not np.allclose(t1, t2):
#if not np.array_equal(t1, t2):
msg += '%s\n (%s, %s, %s, %s, %s, %s)\n (%s, %s, %s, %s, %s, %s)\n' % (
eid,
axial_stress1, equiv_stress1, total_strain1, effective_plastic_creep_strain1, effective_creep_strain1, linear_torsional_stress1,
axial_stress2, equiv_stress2, total_strain2, effective_plastic_creep_strain2, effective_creep_strain2, linear_torsional_stress2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2())
if i > 0:
print(msg)
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, axial_stress, equiv_stress, total_strain,
effective_plastic_creep_strain, effective_creep_strain, linear_torsional_stress):
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [
axial_stress, equiv_stress, total_strain, effective_plastic_creep_strain,
effective_creep_strain, linear_torsional_stress
]
self.ielement += 1
def get_stats(self):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
ntimes, nelements, _ = self.data.shape
assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)
assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)
msg = []
if self.nonlinear_factor is not None: # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i\n'
% (self.__class__.__name__, nelements))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element type: %s\n ' % self.element_name)
msg += self.get_data_code()
return msg
def write_f06(self, f, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
if is_sort1:
msg = [
' N O N L I N E A R S T R E S S E S I N R O D E L E M E N T S ( C R O D )\n',
' \n',
' ELEMENT-ID AXIAL STRESS EQUIVALENT TOTAL STRAIN EFF. STRAIN EFF. CREEP LIN. TORSIONAL\n',
' STRESS PLASTIC/NLELAST STRAIN STRESS\n'
]
else:
msg = [
' N O N L I N E A R S T R E S S E S I N R O D E L E M E N T S ( C R O D )\n',
' \n',
' TIME AXIAL STRESS EQUIVALENT TOTAL STRAIN EFF. STRAIN EFF. CREEP LIN. TORSIONAL\n',
' STRESS PLASTIC/NLELAST STRAIN STRESS\n'
]
if self.is_sort1():
page_num = self._write_sort1_as_sort1(header, page_stamp, page_num, f, msg)
else:
raise NotImplementedError('RealNonlinearRodArray')
return page_num
def _write_sort1_as_sort1(self, header, page_stamp, page_num, f, msg_temp):
ntimes = self.data.shape[0]
eids = self.element
is_odd = False
nwrite = len(eids)
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
axial = self.data[itime, :, 0]
eqs = self.data[itime, :, 1]
total = self.data[itime, :, 2]
epcs = self.data[itime, :, 3]
ecs = self.data[itime, :, 4]
lts = self.data[itime, :, 5]
#print "dt=%s axials=%s eqs=%s ts=%s epcs=%s ecs=%s lts=%s" %(dt,axial,eqs,ts,epcs,ecs,lts)
#msgE[eid] = ' ELEMENT-ID = %8i\n' % (eid)
#if eid not in msgT:
#msgT[eid] = []
#msgT[eid].append(' %9.3E %13.6E %13.6E %13.6E %13.6E %13.6E %13.6E\n' % (dt, axial, eqs, ts, epcs, ecs, lts))
for eid, axiali, eqsi, totali, epcsi, ecsi, ltsi in zip(eids, axial, eqs, total, epcs, ecs, lts):
([saxial, seqs, stotal, sepcs, secs, slts]) = write_floats_13e(
[axiali, eqsi, totali, epcsi, ecsi, ltsi])
f.write(' %8i %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, saxial, seqs, stotal, sepcs, secs, slts))
f.write(page_stamp % page_num)
page_num += 1
return page_num - 1
class RealNonlinearPlateArray(OES_Object):
def __init__(self, data_code, is_sort1, isubcase, dt):
OES_Object.__init__(self, data_code, isubcase, apply_data_code=True)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.ielement = 0
self.nelements = 0 # result specific
self.nnodes = None
def is_real(self):
return True
def is_complex(self):
return False
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def is_stress(self):
return True
def get_headers(self):
headers = [
#[fiber_dist, oxx, oyy, ozz, txy, es, eps, ecs, exx, eyy, ezz, etxy]
'fiber_distance', 'oxx', 'oyy', 'ozz', 'txy',
'eff_plastic_strain', 'eff_plastic_strain', 'eff_creep_strain',
'exx', 'eyy', 'ezz', 'exy',
]
return headers
#def is_bilinear(self):
#if self.element_type in [33, 74]: # CQUAD4, CTRIA3
#return False
#elif self.element_type in [144, 64, 82, 70, 75]: # CQUAD4
#return True
#else:
#raise NotImplementedError('name=%s type=%s' % (self.element_name, self.element_type))
def build(self):
#print("self.ielement = %s" % self.ielement)
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
if self.is_built:
return
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
#if self.element_type in [33, 74]:
#nnodes_per_element = 1
#elif self.element_type == 144:
#nnodes_per_element = 5
#elif self.element_type == 64: # CQUAD8
#nnodes_per_element = 5
#elif self.element_type == 82: # CQUADR
#nnodes_per_element = 5
#elif self.element_type == 70: # CTRIAR
#nnodes_per_element = 4
#elif self.element_type == 75: # CTRIA6
#nnodes_per_element = 4
#else:
#raise NotImplementedError('name=%r type=%s' % (self.element_name, self.element_type))
nnodes_per_element = 1
self.nnodes = nnodes_per_element
#self.nelements //= nnodes_per_element
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("***name=%s type=%s nnodes_per_element=%s ntimes=%s nelements=%s ntotal=%s" % (
#self.element_name, self.element_type, nnodes_per_element, self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, int):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element_node = zeros((self.ntotal, 2), dtype='int32')
#[fiber_dist, oxx, oyy, ozz, txy, es, eps, ecs, exx, eyy, ezz, etxy]
self.data = zeros((self.ntimes, self.ntotal, 12), dtype='float32')
def build_dataframe(self):
headers = self.get_headers()[1:]
nelements = self.element_node.shape[0] // 2
if self.is_fiber_distance():
fiber_distance = ['Top', 'Bottom'] * nelements
else:
fiber_distance = ['Mean', 'Curvature'] * nelements
fd = np.array(fiber_distance, dtype='unicode')
element_node = [self.element_node[:, 0], self.element_node[:, 1], fd]
if self.nonlinear_factor is not None:
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = pd.Panel(self.data[:, :, 1:], items=column_values, major_axis=element_node, minor_axis=headers).to_frame()
self.data_frame.columns.names = column_names
self.data_frame.index.names = ['ElementID', 'NodeID', 'Location', 'Item']
else:
# option B - nice!
df1 = pd.DataFrame(element_node).T
df1.columns = ['ElementID', 'NodeID', 'Location']
df2 = pd.DataFrame(self.data[0, :, 1:])
df2.columns = headers
self.data_frame = df1.join(df2)
self.data_frame = self.data_frame.reset_index().replace({'NodeID': {0:'CEN'}}).set_index(['ElementID', 'NodeID', 'Location'])
#print(self.data_frame)
def add_new_eid(self, dt, eid, etype, fd, sx, sy, sz, txy, es, eps, ecs, ex, ey, ez, exy):
self.add_sort1(dt, eid, etype, fd, sx, sy, sz, txy, es, eps, ecs, ex, ey, ez, exy)
def add_new_eid_sort1(self, dt, eid, etype, fd, sx, sy, sz, txy, es, eps, ecs, ex, ey, ez, exy):
self.add_sort1(dt, eid, etype, fd, sx, sy, sz, txy, es, eps, ecs, ex, ey, ez, exy)
def add_sort1(self, dt, eid, etype, fd, sx, sy, sz, txy, es, eps, ecs, ex, ey, ez, exy):
#print(etype, eid)
if isnan(fd):
fd = 0.
if isnan(sz):
sz = 0.
if isnan(ez):
ez = 0.
self._times[self.itime] = dt
self.element_node[self.ielement, 0] = eid
#[fiber_dist, oxx, oyy, ozz, txy, es, eps, ecs, exx, eyy, ezz, etxy]
self.data[self.itime, self.ielement, :] = [fd, sx, sy, sz, txy, es, eps, ecs, ex, ey, ez, exy]
self.ielement += 1
self.itotal += 1
def __eq__(self, table):
self._eq_header(table)
assert self.is_sort1() == table.is_sort1()
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ie, e in enumerate(self.element_node):
(eid, nid) = e
t1 = self.data[itime, ie, :]
t2 = table.data[itime, ie, :]
# TODO: this name order is wrong
#[fiber_dist, oxx, oyy, ozz, txy, es, eps, ecs, exx, eyy, ezz, etxy]
(fiber_distance1, oxx1, oyy1, ozz1, txy1, exx1, eyy1, ezz1, exy1, es1, eps1, ecs1) = t1
(fiber_distance2, oxx2, oyy2, ozz2, txy2, exx2, eyy2, ezz2, exy2, es2, eps2, ecs2) = t2
# vm stress can be NaN for some reason...
if not np.array_equal(t1, t2):
msg += ('(%s, %s) (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n'
'%s (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\n' % (
eid, nid,
fiber_distance1, oxx1, oyy1, ozz1, txy1, exx1, eyy1, ezz1, exy1, es1, eps1, ecs1,
' ' * (len(str(eid)) + len(str(nid)) + 3),
fiber_distance2, oxx2, oyy2, ozz2, txy2, exx2, eyy2, ezz2, exy2, es2, eps2, ecs2))
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def get_stats(self):
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
nelements = self.nelements
ntimes = self.ntimes
nnodes = self.nnodes
ntotal = self.ntotal
nlayers = 2
nelements = self.ntotal // self.nnodes // 2
msg = []
if self.nonlinear_factor is not None: # transient
msgi = ' type=%s ntimes=%i nelements=%i nnodes_per_element=%i nlayers=%i ntotal=%i\n' % (
self.__class__.__name__, ntimes, nelements, nnodes, nlayers, ntotal)
ntimes_word = 'ntimes'
else:
msgi = ' type=%s nelements=%i nnodes_per_element=%i nlayers=%i ntotal=%i\n' % (
self.__class__.__name__, nelements, nnodes, nlayers, ntotal)
ntimes_word = '1'
msg.append(msgi)
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, ntotal, %i] where %i=[%s]\n' % (ntimes_word, n, n,
str(', '.join(headers))))
msg.append(' data.shape=%s\n' % str(self.data.shape))
msg.append(' element type: %s\n ' % self.element_name)
msg += self.get_data_code()
return msg
def write_f06(self, f, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
#msg, nnodes, cen = _get_plate_msg(self)
if self.element_type == 88:
msg = [
' N O N L I N E A R S T R E S S E S I N T R I A N G U L A R E L E M E N T S ( T R I A 3 )\n'
' \n'
' ELEMENT FIBER STRESSES/ TOTAL STRAINS EQUIVALENT EFF. STRAIN EFF. CREEP\n'
' ID DISTANCE X Y Z XY STRESS PLASTIC/NLELAST STRAIN\n'
]
elif self.element_type == 90:
msg = [
' N O N L I N E A R S T R E S S E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 )\n'
' \n'
' ELEMENT FIBER STRESSES/ TOTAL STRAINS EQUIVALENT EFF. STRAIN EFF. CREEP\n'
' ID DISTANCE X Y Z XY STRESS PLASTIC/NLELAST STRAIN\n'
#'0 1 -2.500000E-02 -4.829193E+00 -1.640651E-05 -1.907010E-04 4.829185E+00 0.0 0.0\n'
#' -4.829188E-05 1.448741E-05 -4.958226E-09\n'
#' 2.500000E-02 4.770547E+00 1.493975E-04 1.907012E-04 4.770473E+00 0.0 0.0\n'
#' 4.770502E-05 -1.431015E-05 4.958231E-09\n'
]
else:
raise NotImplementedError('element_name=%s self.element_type=%s' % (self.element_name, self.element_type))
#msg = [
#' N O N L I N E A R S T R E S S E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 )\n'
#' \n'
#' ELEMENT FIBER STRESSES/ TOTAL STRAINS EQUIVALENT EFF. STRAIN EFF. CREEP\n'
#' ID DISTANCE X Y Z XY STRESS PLASTIC/NLELAST STRAIN\n'
##'0 1 -2.500000E-02 -4.829193E+00 -1.640651E-05 -1.907010E-04 4.829185E+00 0.0 0.0\n'
##' -4.829188E-05 1.448741E-05 -4.958226E-09\n'
##' 2.500000E-02 4.770547E+00 1.493975E-04 1.907012E-04 4.770473E+00 0.0 0.0\n'
##' 4.770502E-05 -1.431015E-05 4.958231E-09\n'
#]
# write the f06
ntimes = self.data.shape[0]
eids = self.element_node[:, 0]
nids = self.element_node[:, 1]
#cen_word = 'CEN/%i' % nnodes
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f.write(''.join(header + msg))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
#[fiber_dist, oxx, oyy, ozz, txy, es, eps, ecs, exx, eyy, ezz, etxy]
fiber_dist = self.data[itime, :, 0]
oxx = self.data[itime, :, 1]
oyy = self.data[itime, :, 2]
ozz = self.data[itime, :, 3]
txy = self.data[itime, :, 4]
es = self.data[itime, :, 5]
eps = self.data[itime, :, 6]
ecs = self.data[itime, :, 7]
exx = self.data[itime, :, 8]
eyy = self.data[itime, :, 9]
ezz = self.data[itime, :, 10]
exy = self.data[itime, :, 11]
for (i, eid, nid, fdi, oxxi, oyyi, ozzi, txyi, exxi, eyyi, ezzi, exyi, esi, epsi, ecsi) in zip(
cycle([0, 1]), eids, nids, fiber_dist, oxx, oyy, ozz, txy, exx, eyy, ezz, exy, es, eps, ecs):
#[fdi, oxxi, oyyi, txyi, major, minor, ovmi] = write_floats_13e(
#[fdi, oxxi, oyyi, txyi, major, minor, ovmi])
#' ELEMENT FIBER STRESSES/ TOTAL STRAINS EQUIVALENT EFF. STRAIN EFF. CREEP\n'
#' ID DISTANCE X Y Z XY STRESS PLASTIC/NLELAST STRAIN\n'
#'0 1 -2.500000E-02 -4.829193E+00 -1.640651E-05 -1.907010E-04 4.829185E+00 0.0 0.0\n'
#' -4.829188E-05 1.448741E-05 -4.958226E-09\n'
#' 2.500000E-02 4.770547E+00 1.493975E-04 1.907012E-04 4.770473E+00 0.0 0.0\n'
#' 4.770502E-05 -1.431015E-05 4.958231E-09\n'
if i == 0:
f.write(
'0 %8i %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %-13s %-13s %s\n' % (
# A
#eid, write_float_13e(fdi),
#write_float_13e(oxxi), write_float_13e(oyyi),
##write_float_13e(ozzi),
#write_float_13e(txyi),
#write_float_13e(esi), write_float_13e(epsi),
#write_float_13e(ecsi),
#write_float_13e(exxi), write_float_13e(eyyi),
##write_float_13e(ezzi),
#write_float_13e(exyi),
# ELEMENT FIBER XYZ STRESS EQUIVALENT EFF.STRAIN EFF.CREEP\n'
eid, write_float_13e(fdi),
write_float_13e(oxxi), write_float_13e(oyyi),
#write_float_13e(ozzi),
write_float_13e(txyi),
write_float_13e(esi), write_float_13e(epsi),
write_float_13e(ecsi),
write_float_13e(exxi), write_float_13e(eyyi),
#write_float_13e(ezzi),
write_float_13e(exyi),
))
else:
f.write(
' %-13s %-13s %-13s %-13s %-13s %-13s %s\n'
' %-13s %-13s %s\n' % (
write_float_13e(fdi),
write_float_13e(oxxi), write_float_13e(oyyi),
#write_float_13e(ozzi),
write_float_13e(txyi),
write_float_13e(esi), write_float_13e(epsi),
write_float_13e(ecsi),
write_float_13e(exxi), write_float_13e(eyyi),
#write_float_13e(ezzi),
write_float_13e(exyi),
))
f.write(page_stamp % page_num)
page_num += 1
return page_num - 1
class NonlinearQuad(StressObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
StressObject.__init__(self, data_code, isubcase)
#self.eType = 'QUAD4FD' # or CTRIA3
self.code = [self.format_code, self.sort_code, self.s_code]
self.eType = {}
self.fiberDistance = {}
self.oxx = {}
self.oyy = {}
self.ozz = {}
self.txy = {}
self.exx = {}
self.eyy = {}
self.ezz = {}
self.exy = {}
self.es = {}
self.eps = {}
self.ecs = {}
self.dt = dt
if is_sort1:
if dt is not None:
self.add = self.add_sort1
self.add_new_eid = self.add_new_eid_sort1
else:
assert dt is not None
#self.add = self.add_sort2
#self.add_new_eid = self.add_new_eid_sort2
def get_stats(self):
nelements = len(self.eType)
msg = self.get_data_code()
if self.nonlinear_factor is not None: # transient
ntimes = len(self.oxx)
msg.append(' type=%s ntimes=%s nelements=%s\n'
% (self.__class__.__name__, ntimes, nelements))
else:
msg.append(' type=%s nelements=%s\n' % (self.__class__.__name__,
nelements))
msg.append(' eType, fiberDistance, oxx, oyy, ozz, txy, '
'exx, eyy, ezz, exy, es, eps, ecs\n')
return msg
def delete_transient(self, dt):
del self.fiberDistance[dt]
del self.oxx[dt]
del self.oyy[dt]
del self.ozz[dt]
del self.txy[dt]
del self.exx[dt]
del self.eyy[dt]
del self.ezz[dt]
del self.exy[dt]
del self.es[dt]
del self.eps[dt]
del self.ecs[dt]
def get_transients(self):
k = self.oxx.keys()
k.sort()
return k
def add_new_transient(self, dt):
self.fiberDistance[dt] = {}
self.oxx[dt] = {}
self.oyy[dt] = {}
self.ozz[dt] = {}
self.txy[dt] = {}
self.exx[dt] = {}
self.eyy[dt] = {}
self.ezz[dt] = {}
self.exy[dt] = {}
self.es[dt] = {}
self.eps[dt] = {}
self.ecs[dt] = {}
def add_new_eid_sort1(self, dt, eid, fd, sx, sy, sz, txy, es, eps, ecs, ex, ey, ez, exy):
if dt not in self.oxx:
self.add_new_transient(dt)
self.fiberDistance[dt][eid] = [fd]
if isnan(sz):
sz = 0.
if isnan(ez):
ez = 0.
self.oxx[dt][eid] = [sx]
self.oyy[dt][eid] = [sy]
self.ozz[dt][eid] = [sz]
self.txy[dt][eid] = [txy]
self.exx[dt][eid] = [ex]
self.eyy[dt][eid] = [ey]
self.ezz[dt][eid] = [ez]
self.exy[dt][eid] = [exy]
self.es[dt][eid] = [es]
self.eps[dt][eid] = [eps]
self.ecs[dt][eid] = [ecs]
def add_sort1(self, dt, eid, fd, sx, sy, sz, txy, es, eps, ecs, ex, ey, ez, exy):
self.fiberDistance[dt][eid].append(fd)
if isnan(sz):
sz = 0.
if isnan(ez):
ez = 0.
self.oxx[dt][eid].append(sx)
self.oyy[dt][eid].append(sy)
self.ozz[dt][eid].append(sz)
self.txy[dt][eid].append(txy)
self.exx[dt][eid].append(ex)
self.eyy[dt][eid].append(ey)
self.ezz[dt][eid].append(ez)
self.exy[dt][eid].append(exy)
self.es[dt][eid].append(es)
self.eps[dt][eid].append(eps)
self.ecs[dt][eid].append(ecs)
def write_f06(self, f, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
msg_start = [
' ELEMENT-ID = 129\n'
' N O N L I N E A R S T R E S S E S I N Q U A D R I L A T E R A L E L E M E N T S ( Q U A D 4 )\n'
' \n',
' TIME FIBER STRESSES/ TOTAL STRAINS EQUIVALENT EFF. STRAIN EFF. CREEP\n'
' DISTANCE X Y Z XY STRESS PLASTIC/NLELAST STRAIN\n'
]
#0 5.000E-05 -5.000000E-01 -4.484895E+01 -1.561594E+02 -2.008336E-02 1.392609E+02 0.0 0.0
msg_element = {}
msg_time = {}
for (dt, Oxxs) in sorted(iteritems(self.oxx)):
header[1] = ' %s = %10.4E\n' % (self.data_code['name'], dt)
for (eid, oxxs) in sorted(iteritems(Oxxs)):
msg_element[eid] = header + [' ELEMENT-ID = %8i\n' % (eid)]
if eid not in msg_time:
msg_time[eid] = []
for i, oxx in enumerate(oxxs):
fd = self.fiberDistance[dt][eid][i]
oxx = self.oxx[dt][eid][i]
oyy = self.oyy[dt][eid][i]
ozz = self.ozz[dt][eid][i]
txy = self.txy[dt][eid][i]
exx = self.exx[dt][eid][i]
eyy = self.eyy[dt][eid][i]
ezz = self.ezz[dt][eid][i]
exy = self.exy[dt][eid][i]
es = self.es[dt][eid][i]
eps = self.eps[dt][eid][i]
ecs = self.ecs[dt][eid][i]
[oxx, oyy, ozz, txy, exx, eyy, es, eps, ecs, exx, eyy, ezz, exy] = write_floats_13e([oxx, oyy, ozz, txy, exx, eyy, es, eps, ecs, exx, eyy, ezz, exy])
if i == 0:
msg_time[eid].append('0 %9.3E %-13s %-13s %-13s %-13s %-13s %-13s %-13s %s\n' % (dt, fd, oxx, oyy, ozz, txy, es, eps, ecs))
else:
msg_time[eid].append(' %9s %-13s %-13s %-13s %-13s %-13s\n' % ('', '', exx, eyy, ezz, exy))
msg = []
for eid, e in sorted(iteritems(msg_element)):
msg += header + e + msg_start + msg_time[eid]
msg.append(page_stamp % page_num)
page_num += 1
f.write(''.join(msg))
return page_num - 1
class HyperelasticQuad(StressObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
StressObject.__init__(self, data_code, isubcase)
self.eType = 'QUAD4FD'
self.code = [self.format_code, self.sort_code, self.s_code]
self.Type = {}
self.IDs = {}
self.oxx = {}
self.oyy = {}
self.txy = {}
self.angle = {}
self.majorP = {}
self.minorP = {}
self.dt = dt
if is_sort1:
if dt is not None:
self.add = self.add_sort1
self.add_new_eid = self.add_new_eid_sort1
else:
assert dt is not None
#self.add = self.add_sort2
#self.add_new_eid = self.add_new_eid_sort2
def get_stats(self):
nelements = len(self.eType)
msg = self.get_data_code()
if self.nonlinear_factor is not None: # transient
ntimes = len(self.oxx)
msg.append(' type=%s ntimes=%s nelements=%s\n'
% (self.__class__.__name__, ntimes, nelements))
else:
msg.append(' type=%s nelements=%s\n' % (self.__class__.__name__,
nelements))
msg.append(' Type, oxx, oyy, txy, angle, majorP, minorP\n')
return msg
def delete_transient(self, dt):
del self.fiberDistance[dt]
del self.oxx[dt]
del self.oyy[dt]
del self.txy[dt]
del self.angle[dt]
del self.majorP[dt]
del self.minorP[dt]
def get_transients(self):
k = self.oxx.keys()
k.sort()
return k
def add_new_transient(self, dt):
self.oxx[dt] = {}
self.oyy[dt] = {}
self.txy[dt] = {}
self.angle[dt] = {}
self.majorP[dt] = {}
self.minorP[dt] = {}
def add_new_eid_sort1(self, dt, eid, Type, oxx, oyy, txy, angle, majorP, minorP):
if dt not in self.oxx:
self.add_new_transient(dt)
self.Type[eid] = Type
self.oxx[dt] = {eid: [oxx]}
self.oyy[dt] = {eid: [oyy]}
self.txy[dt] = {eid: [txy]}
self.angle[dt] = {eid: [angle]}
self.majorP[dt] = {eid: [majorP]}
self.minorP[dt] = {eid: [minorP]}
def add_sort1(self, dt, eid, ID, oxx, oyy, txy, angle, majorP, minorP):
self.oxx[dt][eid].append(oxx)
self.oyy[dt][eid].append(oyy)
self.txy[dt][eid].append(txy)
self.angle[dt][eid].append(angle)
self.majorP[dt][eid].append(majorP)
self.minorP[dt][eid].append(minorP)
def write_f06(self, f, header=None, page_stamp='PAGE %s', page_num=1, is_mag_phase=False, is_sort1=True):
# .. todo:: doesnt support CTRIA3NL (calls them CQUAD4s)
if header is None:
header = []
msg = [' S T R E S S E S I N H Y P E R E L A S T I C Q U A D R I L A T E R A L E L E M E N T S ( QUAD4FD )\n',
' ELEMENT GRID/ POINT ---------CAUCHY STRESSES-------- PRINCIPAL STRESSES (ZERO SHEAR)\n',
' ID GAUSS ID NORMAL-X NORMAL-Y SHEAR-XY ANGLE MAJOR MINOR\n', ]
#0 1 GAUS 1 7.318995E+00 6.367099E-01 -6.551054E+00 -31.4888 1.133173E+01 -3.376026E+00
# 2 1.097933E+01 4.149028E+00 6.278160E+00 30.7275 1.471111E+01 4.172537E-01
for dt, oxxs in sorted(iteritems(self.oxx)):
#header[-1] = ' LOAD STEP = %12.5E' %(dt)
msg += header
for eid, oxxs in sorted(iteritems(oxxs)):
gauss = self.Type[eid]
oxx = self.oxx[dt][eid]
oyy = self.oyy[dt][eid]
txy = self.txy[dt][eid]
angle = self.angle[dt][eid]
majorP = self.majorP[dt][eid]
minorP = self.minorP[dt][eid]
for i in range(4): # 1,2,3,4
if i == 0:
msg.append('0%8i %8s %8i %13E.6 %13E.6 %13E.6 %13E.6 %13E.6 %13E.6\n' % (eid, gauss, i + 1, oxx[i], oyy[i], txy[i], angle[i], majorP[i], minorP[i]))
else:
msg.append(' %8s %8s %8i %13E.6 %13E.6 %13E.6 %13E.6 %13E.6 %13E.6\n' % ('', '', i + 1, oxx[i], oyy[i], txy[i], angle[i], majorP[i], minorP[i]))
f.write(''.join(msg))
return page_num
| lgpl-3.0 |
teamCarel/EyeTracker | src/capture/recorder.py | 1 | 17948 | '''
(*)~---------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2017 Pupil Labs
Distributed under the terms of the GNU
Lesser General Public License (LGPL v3.0).
See COPYING and COPYING.LESSER for license details.
---------------------------------------------------------------------------~(*)
'''
import os, errno
# import sys, platform, getpass
import csv_utils
from pyglui import ui
import numpy as np
# from scipy.interpolate import UnivariateSpline
from plugin import Plugin
from time import strftime, localtime, time, gmtime
from shutil import copy2
from audio import Audio_Input_Dict
from file_methods import save_object, load_object
from methods import get_system_info
from av_writer import JPEG_Writer, AV_Writer, Audio_Capture
from calibration_routines.camera_intrinsics_estimation import load_camera_calibration
# logging
import logging
logger = logging.getLogger(__name__)
def get_auto_name():
return strftime("%Y_%m_%d", localtime())
# def sanitize_timestamps(ts):
# logger.debug("Checking %s timestamps for monotony in direction and smoothness"%ts.shape[0])
# avg_frame_time = (ts[-1] - ts[0])/ts.shape[0]
# logger.debug('average_frame_time: %s'%(1./avg_frame_time))
# raw_ts = ts #only needed for visualization
# runs = 0
# while True:
# #forward check for non monotonic increasing behaviour
# clean = np.ones((ts.shape[0]),dtype=np.bool)
# damper = 0
# for idx in range(ts.shape[0]-1):
# if ts[idx] >= ts[idx+1]: #not monotonically increasing timestamp
# damper = 50
# clean[idx] = damper <= 0
# damper -=1
# #backward check to smooth timejumps forward
# damper = 0
# for idx in range(ts.shape[0]-1)[::-1]:
# if ts[idx+1]-ts[idx]>1: #more than one second forward jump
# damper = 50
# clean[idx] &= damper <= 0
# damper -=1
# if clean.all() == True:
# if runs >0:
# logger.debug("Timestamps were bad but are ok now. Correction runs: %s"%runs)
# # from matplotlib import pyplot as plt
# # plt.plot(frames,raw_ts)
# # plt.plot(frames,ts)
# # # plt.scatter(frames[~clean],ts[~clean])
# # plt.show()
# else:
# logger.debug("Timestamps are clean.")
# return ts
# runs +=1
# if runs > 4:
# logger.error("Timestamps could not be fixed!")
# return ts
# logger.warning("Timestamps are not sane. We detected non monotitc or jumpy timestamps. Fixing them now")
# frames = np.arange(len(ts))
# s = UnivariateSpline(frames[clean],ts[clean],s=0)
# ts = s(frames)
class Recorder(Plugin):
"""Capture Recorder"""
def __init__(self, g_pool, session_name=get_auto_name(), rec_dir=None,
user_info={'name': '', 'additional_field': 'change_me'},
info_menu_conf={}, show_info_menu=False, record_eye=False,
audio_src='No Audio', raw_jpeg=True):
super().__init__(g_pool)
# update name if it was autogenerated.
if session_name.startswith('20') and len(session_name) == 10:
session_name = get_auto_name()
base_dir = self.g_pool.user_dir.rsplit(os.path.sep, 1)[0]
default_rec_dir = os.path.join(base_dir, 'recordings')
if rec_dir and rec_dir != default_rec_dir and self.verify_path(rec_dir):
self.rec_dir = rec_dir
else:
try:
os.makedirs(default_rec_dir)
except OSError as e:
if e.errno != errno.EEXIST:
logger.error("Could not create Rec dir")
raise e
else:
logger.info('Created standard Rec dir at "{}"'.format(default_rec_dir))
self.rec_dir = default_rec_dir
self.raw_jpeg = raw_jpeg
self.order = .9
self.record_eye = record_eye
self.session_name = session_name
self.audio_devices_dict = Audio_Input_Dict()
if audio_src in list(self.audio_devices_dict.keys()):
self.audio_src = audio_src
else:
self.audio_src = 'No Audio'
self.running = False
self.menu = None
self.button = None
self.user_info = user_info
self.show_info_menu = show_info_menu
self.info_menu = None
self.info_menu_conf = info_menu_conf
def get_init_dict(self):
d = {}
d['record_eye'] = self.record_eye
d['audio_src'] = self.audio_src
d['session_name'] = self.session_name
d['user_info'] = self.user_info
d['info_menu_conf'] = self.info_menu_conf
d['show_info_menu'] = self.show_info_menu
d['rec_dir'] = self.rec_dir
d['raw_jpeg'] = self.raw_jpeg
return d
def init_gui(self):
self.menu = ui.Growing_Menu('Recorder')
self.menu.collapsed = True
self.g_pool.sidebar.insert(3, self.menu)
self.menu.append(ui.Info_Text('Pupil recordings are saved like this: "path_to_recordings/recording_session_name/nnn" where "nnn" is an increasing number to avoid overwrites. You can use "/" in your session name to create subdirectories.'))
self.menu.append(ui.Info_Text('Recordings are saved to "~/pupil_recordings". You can change the path here but note that invalid input will be ignored.'))
self.menu.append(ui.Text_Input('rec_dir', self, setter=self.set_rec_dir, label='Path to recordings'))
self.menu.append(ui.Text_Input('session_name', self, setter=self.set_session_name, label='Recording session name'))
self.menu.append(ui.Switch('show_info_menu', self, on_val=True, off_val=False, label='Request additional user info'))
self.menu.append(ui.Selector('raw_jpeg', self, selection=[True, False], labels=["bigger file, less CPU", "smaller file, more CPU"], label='Compression'))
self.menu.append(ui.Info_Text('Recording the raw eye video is optional. We use it for debugging.'))
self.menu.append(ui.Switch('record_eye', self, on_val=True, off_val=False, label='Record eye'))
def audio_dev_getter():
# fetch list of currently available
self.audio_devices_dict = Audio_Input_Dict()
devices = list(self.audio_devices_dict.keys())
return devices, devices
self.menu.append(ui.Selector('audio_src', self, selection_getter=audio_dev_getter, label='Audio Source'))
self.button = ui.Thumb('running', self, setter=self.toggle, label='R', hotkey='r')
self.button.on_color[:] = (1, .0, .0, .8)
self.g_pool.quickbar.insert(1, self.button)
def deinit_gui(self):
if self.menu:
self.g_pool.sidebar.remove(self.menu)
self.menu = None
if self.button:
self.g_pool.quickbar.remove(self.button)
self.button = None
def toggle(self, _=None):
if self.running:
self.notify_all({'subject': 'recording.should_stop'})
self.notify_all({'subject': 'recording.should_stop', 'remote_notify': 'all'})
else:
self.notify_all({'subject': 'recording.should_start', 'session_name': self.session_name})
self.notify_all({'subject': 'recording.should_start', 'session_name': self.session_name, 'remote_notify': 'all'})
def on_notify(self, notification):
"""Handles recorder notifications
Reacts to notifications:
``recording.should_start``: Starts a new recording session
``recording.should_stop``: Stops current recording session
Emits notifications:
``recording.started``: New recording session started
``recording.stopped``: Current recording session stopped
Args:
notification (dictionary): Notification dictionary
"""
# notification wants to be recorded
if notification.get('record', False) and self.running:
if 'timestamp' not in notification:
logger.error("Notification without timestamp will not be saved.")
else:
self.data['notifications'].append(notification)
elif notification['subject'] == 'recording.should_start':
if self.running:
logger.info('Recording already running!')
elif not self.g_pool.capture.online:
logger.error("Current world capture is offline. Please reconnect or switch to fake capture")
else:
if notification.get("session_name", ""):
self.set_session_name(notification["session_name"])
self.start()
elif notification['subject'] == 'recording.should_stop':
if self.running:
self.stop()
else:
logger.info('Recording already stopped!')
def get_rec_time_str(self):
rec_time = gmtime(time()-self.start_time)
return strftime("%H:%M:%S", rec_time)
def start(self):
self.timestamps = []
self.data = {'pupil_positions': [], 'gaze_positions': [], 'notifications': []}
self.frame_count = 0
self.running = True
self.menu.read_only = True
self.start_time = time()
session = os.path.join(self.rec_dir, self.session_name)
try:
os.makedirs(session)
logger.debug("Created new recordings session dir {}".format(session))
except:
logger.debug("Recordings session dir {} already exists, using it.".format(session))
# set up self incrementing folder within session folder
counter = 0
while True:
self.rec_path = os.path.join(session, "{:03d}/".format(counter))
try:
os.mkdir(self.rec_path)
logger.debug("Created new recording dir {}".format(self.rec_path))
break
except:
logger.debug("We dont want to overwrite data, incrementing counter & trying to make new data folder")
counter += 1
self.meta_info_path = os.path.join(self.rec_path, "info.csv")
with open(self.meta_info_path, 'w', newline='') as csvfile:
csv_utils.write_key_value_file(csvfile, {
'Recording Name': self.session_name,
'Start Date': strftime("%d.%m.%Y", localtime(self.start_time)),
'Start Time': strftime("%H:%M:%S", localtime(self.start_time))
})
if self.audio_src != 'No Audio':
audio_path = os.path.join(self.rec_path, "world.wav")
self.audio_writer = Audio_Capture(audio_path, self.audio_devices_dict[self.audio_src])
else:
self.audio_writer = None
if self.raw_jpeg and self.g_pool.capture.jpeg_support:
self.video_path = os.path.join(self.rec_path, "world.mp4")
self.writer = JPEG_Writer(self.video_path, self.g_pool.capture.frame_rate)
else:
self.video_path = os.path.join(self.rec_path, "world.mp4")
self.writer = AV_Writer(self.video_path, fps=self.g_pool.capture.frame_rate)
try:
cal_pt_path = os.path.join(self.g_pool.user_dir, "user_calibration_data")
cal_data = load_object(cal_pt_path)
notification = {'subject': 'calibration.calibration_data', 'record': True}
notification.update(cal_data)
self.data['notifications'].append(notification)
except:
pass
if self.show_info_menu:
self.open_info_menu()
logger.info("Started Recording.")
self.notify_all({'subject': 'recording.started', 'rec_path': self.rec_path,
'session_name': self.session_name, 'record_eye': self.record_eye,
'compression': self.raw_jpeg})
def open_info_menu(self):
self.info_menu = ui.Growing_Menu('additional Recording Info', size=(300, 300), pos=(300, 300))
self.info_menu.configuration = self.info_menu_conf
def populate_info_menu():
self.info_menu.elements[:-2] = []
for name in self.user_info.keys():
self.info_menu.insert(0, ui.Text_Input(name, self.user_info))
def set_user_info(new_string):
self.user_info = new_string
populate_info_menu()
populate_info_menu()
self.info_menu.append(ui.Info_Text('Use the *user info* field to add/remove additional fields and their values. The format must be a valid Python dictionary. For example -- {"key":"value"}. You can add as many fields as you require. Your custom fields will be saved for your next session.'))
self.info_menu.append(ui.Text_Input('user_info', self, setter=set_user_info, label="User info"))
self.g_pool.gui.append(self.info_menu)
def close_info_menu(self):
if self.info_menu:
self.info_menu_conf = self.info_menu.configuration
self.g_pool.gui.remove(self.info_menu)
self.info_menu = None
def recent_events(self,events):
if self.running:
for key, data in events.items():
if key not in ('dt','frame'):
try:
self.data[key] += data
except KeyError:
self.data[key] = []
self.data[key] += data
if 'frame' in events:
frame = events['frame']
self.timestamps.append(frame.timestamp)
self.writer.write_video_frame(frame)
self.frame_count += 1
# # cv2.putText(frame.img, "Frame %s"%self.frame_count,(200,200), cv2.FONT_HERSHEY_SIMPLEX,1,(255,100,100))
self.button.status_text = self.get_rec_time_str()
def stop(self):
# explicit release of VideoWriter
self.writer.release()
self.writer = None
save_object(self.data, os.path.join(self.rec_path, "pupil_data"))
timestamps_path = os.path.join(self.rec_path, "world_timestamps.npy")
# ts = sanitize_timestamps(np.array(self.timestamps))
ts = np.array(self.timestamps)
np.save(timestamps_path, ts)
try:
copy2(os.path.join(self.g_pool.user_dir, "surface_definitions"),
os.path.join(self.rec_path, "surface_definitions"))
except:
logger.info("No surface_definitions data found. You may want this if you do marker tracking.")
camera_calibration = load_camera_calibration(self.g_pool)
if camera_calibration is not None:
save_object(camera_calibration, os.path.join(self.rec_path, "camera_calibration"))
else:
logger.info("No camera calibration found.")
try:
with open(self.meta_info_path, 'a', newline='') as csvfile:
csv_utils.write_key_value_file(csvfile, {
'Duration Time': self.get_rec_time_str(),
'World Camera Frames': self.frame_count,
'World Camera Resolution': str(self.g_pool.capture.frame_size[0])+"x"+str(self.g_pool.capture.frame_size[1]),
'Capture Software Version': self.g_pool.version,
'Data Format Version': self.g_pool.version,
'System Info': get_system_info()
}, append=True)
except Exception:
logger.exception("Could not save metadata. Please report this bug!")
try:
with open(os.path.join(self.rec_path, "user_info.csv"), 'w', newline='') as csvfile:
csv_utils.write_key_value_file(csvfile, self.user_info)
except Exception:
logger.exception("Could not save userdata. Please report this bug!")
self.close_info_menu()
if self.audio_writer:
self.audio_writer = None
self.running = False
self.menu.read_only = False
self.button.status_text = ''
self.timestamps = []
self.data = {'pupil_positions': [], 'gaze_positions': []}
self.pupil_pos_list = []
self.gaze_pos_list = []
logger.info("Saved Recording.")
self.notify_all({'subject': 'recording.stopped', 'rec_path': self.rec_path})
def cleanup(self):
"""gets called when the plugin get terminated.
either volunatily or forced.
"""
if self.running:
self.stop()
self.deinit_gui()
def verify_path(self, val):
try:
n_path = os.path.expanduser(val)
logger.debug("Expanded user path.")
except:
n_path = val
if not n_path:
logger.warning("Please specify a path.")
return False
elif not os.path.isdir(n_path):
logger.warning("This is not a valid path.")
return False
# elif not os.access(n_path, os.W_OK):
elif not writable_dir(n_path):
logger.warning("Do not have write access to '{}'.".format(n_path))
return False
else:
return n_path
def set_rec_dir(self, val):
n_path = self.verify_path(val)
if n_path:
self.rec_dir = n_path
def set_session_name(self, val):
if not val:
self.session_name = get_auto_name()
else:
if os.path.sep in val:
logger.warning('You session name will create one or more subdirectories')
self.session_name = val
def writable_dir(n_path):
try:
open(os.path.join(n_path, 'dummpy_tmp'), 'w')
except IOError:
return False
else:
os.remove(os.path.join(n_path, 'dummpy_tmp'))
return True
| lgpl-3.0 |
percy-g2/Novathor_xperia_u8500 | 6.1.1.B.1.54/external/blktrace/btt/btt_plot.py | 43 | 11282 | #! /usr/bin/env python
#
# btt_plot.py: Generate matplotlib plots for BTT generate data files
#
# (C) Copyright 2009 Hewlett-Packard Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
btt_plot.py: Generate matplotlib plots for BTT generated data files
Files handled:
AQD - Average Queue Depth Running average of queue depths
BNOS - Block numbers accessed Markers for each block
Q2D - Queue to Issue latencies Running averages
D2C - Issue to Complete latencies Running averages
Q2C - Queue to Complete latencies Running averages
Usage:
btt_plot_aqd.py equivalent to: btt_plot.py -t aqd <type>=aqd
btt_plot_bnos.py equivalent to: btt_plot.py -t bnos <type>=bnos
btt_plot_q2d.py equivalent to: btt_plot.py -t q2d <type>=q2d
btt_plot_d2c.py equivalent to: btt_plot.py -t d2c <type>=d2c
btt_plot_q2c.py equivalent to: btt_plot.py -t q2c <type>=q2c
Arguments:
[ -A | --generate-all ] Default: False
[ -L | --no-legend ] Default: Legend table produced
[ -o <file> | --output=<file> ] Default: <type>.png
[ -T <string> | --title=<string> ] Default: Based upon <type>
[ -v | --verbose ] Default: False
<data-files...>
The -A (--generate-all) argument is different: when this is specified,
an attempt is made to generate default plots for all 5 types (aqd, bnos,
q2d, d2c and q2c). It will find files with the appropriate suffix for
each type ('aqd.dat' for example). If such files are found, a plot for
that type will be made. The output file name will be the default for
each type. The -L (--no-legend) option will be obeyed for all plots,
but the -o (--output) and -T (--title) options will be ignored.
"""
__author__ = 'Alan D. Brunelle <[email protected]>'
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import getopt, glob, os, sys
import matplotlib.pyplot as plt
plot_size = [10.9, 8.4] # inches...
add_legend = True
generate_all = False
output_file = None
title_str = None
type = None
verbose = False
types = [ 'aqd', 'q2d', 'd2c', 'q2c', 'bnos' ]
progs = [ 'btt_plot_%s.py' % t for t in types ]
get_base = lambda file: file[file.find('_')+1:file.rfind('_')]
#------------------------------------------------------------------------------
def fatal(msg):
"""Generate fatal error message and exit"""
print >>sys.stderr, 'FATAL: %s' % msg
sys.exit(1)
#----------------------------------------------------------------------
def get_data(files):
"""Retrieve data from files provided.
Returns a database containing:
'min_x', 'max_x' - Minimum and maximum X values found
'min_y', 'max_y' - Minimum and maximum Y values found
'x', 'y' - X & Y value arrays
'ax', 'ay' - Running average over X & Y --
if > 10 values provided...
"""
#--------------------------------------------------------------
def check(mn, mx, v):
"""Returns new min, max, and float value for those passed in"""
v = float(v)
if mn == None or v < mn: mn = v
if mx == None or v > mx: mx = v
return mn, mx, v
#--------------------------------------------------------------
def avg(xs, ys):
"""Computes running average for Xs and Ys"""
#------------------------------------------------------
def _avg(vals):
"""Computes average for array of values passed"""
total = 0.0
for val in vals:
total += val
return total / len(vals)
#------------------------------------------------------
if len(xs) < 1000:
return xs, ys
axs = [xs[0]]
ays = [ys[0]]
_xs = [xs[0]]
_ys = [ys[0]]
x_range = (xs[-1] - xs[0]) / 100
for idx in range(1, len(ys)):
if (xs[idx] - _xs[0]) > x_range:
axs.append(_avg(_xs))
ays.append(_avg(_ys))
del _xs, _ys
_xs = [xs[idx]]
_ys = [ys[idx]]
else:
_xs.append(xs[idx])
_ys.append(ys[idx])
if len(_xs) > 1:
axs.append(_avg(_xs))
ays.append(_avg(_ys))
return axs, ays
#--------------------------------------------------------------
global verbose
db = {}
min_x = max_x = min_y = max_y = None
for file in files:
if not os.path.exists(file):
fatal('%s not found' % file)
elif verbose:
print 'Processing %s' % file
xs = []
ys = []
for line in open(file, 'r'):
f = line.rstrip().split(None)
if line.find('#') == 0 or len(f) < 2:
continue
(min_x, max_x, x) = check(min_x, max_x, f[0])
(min_y, max_y, y) = check(min_y, max_y, f[1])
xs.append(x)
ys.append(y)
db[file] = {'x':xs, 'y':ys}
if len(xs) > 10:
db[file]['ax'], db[file]['ay'] = avg(xs, ys)
else:
db[file]['ax'] = db[file]['ay'] = None
db['min_x'] = min_x
db['max_x'] = max_x
db['min_y'] = min_y
db['max_y'] = max_y
return db
#----------------------------------------------------------------------
def parse_args(args):
"""Parse command line arguments.
Returns list of (data) files that need to be processed -- /unless/
the -A (--generate-all) option is passed, in which case superfluous
data files are ignored...
"""
global add_legend, output_file, title_str, type, verbose
global generate_all
prog = args[0][args[0].rfind('/')+1:]
if prog == 'btt_plot.py':
pass
elif not prog in progs:
fatal('%s not a valid command name' % prog)
else:
type = prog[prog.rfind('_')+1:prog.rfind('.py')]
s_opts = 'ALo:t:T:v'
l_opts = [ 'generate-all', 'type', 'no-legend', 'output', 'title',
'verbose' ]
try:
(opts, args) = getopt.getopt(args[1:], s_opts, l_opts)
except getopt.error, msg:
print >>sys.stderr, msg
fatal(__doc__)
for (o, a) in opts:
if o in ('-A', '--generate-all'):
generate_all = True
elif o in ('-L', '--no-legend'):
add_legend = False
elif o in ('-o', '--output'):
output_file = a
elif o in ('-t', '--type'):
if not a in types:
fatal('Type %s not supported' % a)
type = a
elif o in ('-T', '--title'):
title_str = a
elif o in ('-v', '--verbose'):
verbose = True
if type == None and not generate_all:
fatal('Need type of data files to process - (-t <type>)')
return args
#------------------------------------------------------------------------------
def gen_title(fig, type, title_str):
"""Sets the title for the figure based upon the type /or/ user title"""
if title_str != None:
pass
elif type == 'aqd':
title_str = 'Average Queue Depth'
elif type == 'bnos':
title_str = 'Block Numbers Accessed'
elif type == 'q2d':
title_str = 'Queue (Q) To Issue (D) Average Latencies'
elif type == 'd2c':
title_str = 'Issue (D) To Complete (C) Average Latencies'
elif type == 'q2c':
title_str = 'Queue (Q) To Complete (C) Average Latencies'
title = fig.text(.5, .95, title_str, horizontalalignment='center')
title.set_fontsize('large')
#------------------------------------------------------------------------------
def gen_labels(db, ax, type):
"""Generate X & Y 'axis'"""
#----------------------------------------------------------------------
def gen_ylabel(ax, type):
"""Set the Y axis label based upon the type"""
if type == 'aqd':
str = 'Number of Requests Queued'
elif type == 'bnos':
str = 'Block Number'
else:
str = 'Seconds'
ax.set_ylabel(str)
#----------------------------------------------------------------------
xdelta = 0.1 * (db['max_x'] - db['min_x'])
ydelta = 0.1 * (db['max_y'] - db['min_y'])
ax.set_xlim(db['min_x'] - xdelta, db['max_x'] + xdelta)
ax.set_ylim(db['min_y'] - ydelta, db['max_y'] + ydelta)
ax.set_xlabel('Runtime (seconds)')
ax.grid(True)
gen_ylabel(ax, type)
#------------------------------------------------------------------------------
def generate_output(type, db):
"""Generate the output plot based upon the type and database"""
#----------------------------------------------------------------------
def color(idx, style):
"""Returns a color/symbol type based upon the index passed."""
colors = [ 'b', 'g', 'r', 'c', 'm', 'y', 'k' ]
l_styles = [ '-', ':', '--', '-.' ]
m_styles = [ 'o', '+', '.', ',', 's', 'v', 'x', '<', '>' ]
color = colors[idx % len(colors)]
if style == 'line':
style = l_styles[(idx / len(l_styles)) % len(l_styles)]
elif style == 'marker':
style = m_styles[(idx / len(m_styles)) % len(m_styles)]
return '%s%s' % (color, style)
#----------------------------------------------------------------------
def gen_legends(a, legends):
leg = ax.legend(legends, 'best', shadow=True)
frame = leg.get_frame()
frame.set_facecolor('0.80')
for t in leg.get_texts():
t.set_fontsize('xx-small')
#----------------------------------------------------------------------
global add_legend, output_file, title_str, verbose
if output_file != None:
ofile = output_file
else:
ofile = '%s.png' % type
if verbose:
print 'Generating plot into %s' % ofile
fig = plt.figure(figsize=plot_size)
ax = fig.add_subplot(111)
gen_title(fig, type, title_str)
gen_labels(db, ax, type)
idx = 0
if add_legend:
legends = []
else:
legends = None
keys = []
for file in db.iterkeys():
if not file in ['min_x', 'max_x', 'min_y', 'max_y']:
keys.append(file)
keys.sort()
for file in keys:
dat = db[file]
if type == 'bnos':
ax.plot(dat['x'], dat['y'], color(idx, 'marker'),
markersize=1)
elif dat['ax'] == None:
continue # Don't add legend
else:
ax.plot(dat['ax'], dat['ay'], color(idx, 'line'),
linewidth=1.0)
if add_legend:
legends.append(get_base(file))
idx += 1
if add_legend and len(legends) > 0:
gen_legends(ax, legends)
plt.savefig(ofile)
#------------------------------------------------------------------------------
def get_files(type):
"""Returns the list of files for the -A option based upon type"""
if type == 'bnos':
files = []
for fn in glob.glob('*c.dat'):
for t in [ 'q2q', 'd2d', 'q2c', 'd2c' ]:
if fn.find(t) >= 0:
break
else:
files.append(fn)
else:
files = glob.glob('*%s.dat' % type)
return files
#------------------------------------------------------------------------------
if __name__ == '__main__':
files = parse_args(sys.argv)
if generate_all:
output_file = title_str = type = None
for t in types:
files = get_files(t)
if len(files) == 0:
continue
elif t != 'bnos':
generate_output(t, get_data(files))
continue
for file in files:
base = get_base(file)
title_str = 'Block Numbers Accessed: %s' % base
output_file = 'bnos_%s.png' % base
generate_output(t, get_data([file]))
elif len(files) < 1:
fatal('Need data files to process')
else:
generate_output(type, get_data(files))
sys.exit(0)
| gpl-2.0 |
dstein64/PyFactorizationMachines | example.py | 1 | 3414 | from __future__ import print_function
import numpy as np
from scipy import sparse
from sklearn import datasets
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import pyfms
import pyfms.regularizers
# This shows examples of how to use PyFactorizationMachines. The datasets may not be
# particularly suitable for using factorization machines.
print('pyfms {}'.format(pyfms.__version__))
print()
np.random.seed(0)
def error_score(y_true, y_pred):
return 1.0 - accuracy_score(y_true, y_pred)
print('*******************************************')
print('* Binary Classification Example')
print('* (with sample weighting and sparse data)')
print('*******************************************')
print()
X, y = datasets.load_boston(return_X_y=True)
# Binarize target
y = y > 30
# Columns 1 and 3 (0-indexed) are sparse.
# Slice data to the first 5 columns for a higher sparsity ratio.
X = X[:,:5]
X = sparse.csr_matrix(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Re-weight instances so that each class gets equal total weighting.
class_count_lookup = dict(zip(*np.unique(y_train, return_counts=True)))
sample_weight = np.array([1.0 / class_count_lookup[_y] for _y in y_train])
# Sparsify data
X_train = sparse.csr_matrix(X_train)
X_test = sparse.csr_matrix(X_test)
classifier_dims = X.shape[1]
fm_classifier = pyfms.Classifier(classifier_dims, k=2, X_format="csr")
fm_classifier.fit(X_train, y_train, sample_weight=sample_weight, nb_epoch=20000)
print('Factorization Machine Error: {}'.format(
error_score(y_test, fm_classifier.predict(X_test))))
logistic_regression = LogisticRegression()
logistic_regression.fit(X_train, y_train, sample_weight=sample_weight)
print('Logistic Regression Error: {}'.format(
error_score(y_test, logistic_regression.predict(X_test))))
print()
print('*******************************************')
print('* Regression Example')
print('* (with L2 Regularization and verbose output)')
print('*******************************************')
print()
X, y = datasets.load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
fm_regressor = pyfms.Regressor(X.shape[1], k=2)
reg = pyfms.regularizers.L2(0, 0, .01)
fm_regressor.fit(X_train, y_train, nb_epoch=50000, verbosity=5000, regularizer=reg)
print()
print('Factorization Machine MSE: {}'.format(
mean_squared_error(y_test, fm_regressor.predict(X_test))))
linear_regression = LinearRegression()
linear_regression.fit(X_train, y_train)
print('Linear Regression MSE: {}'.format(
mean_squared_error(y_test, linear_regression.predict(X_test))))
print()
print('*******************************************')
print('* Saving Model Example')
print('*******************************************')
print()
# Save the factorization machine classifier that was trained earlier
f = "weights.fm"
fm_classifier.save_weights(f)
print('Model saved')
print()
print('*******************************************')
print('* Loading a Saved Model Example')
print('*******************************************')
print()
del fm_classifier
fm_classifier = pyfms.models.Classifier(classifier_dims)
fm_classifier.load_weights(f)
print('Model loaded')
print()
| mit |
ctyeong/unitytool | ReadResult.py | 2 | 2095 | import copy
import math
import pylab
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import matplotlib.pyplot as plt
o = open("pathresults.xml")
paths = [[]]*2
r = dict()
p = -1
t = 0
w = 0
for i, l in enumerate(o.readlines()):
if "name=" in l and "Path" in l:
p+=1
t=0
w = 0
if "time=" in l:
l=l.replace("<results time=\"","")
l=l.replace("\">","")
l=l.replace("\n","")
t+=1
r["Time"] = float(l)
if("Crazyness" in l):
#clear
l=l.replace("<metric name=\"Crazyness\">","")
l=l.replace("</metric>","")
l=l.replace("\n","")
r["Crazyness"] = float(l)
if("Danger3\">" in l):
#clear
l=l.replace("<metric name=\"Danger3\">","")
l=l.replace("</metric>","")
l=l.replace("\n","")
r["Danger3"] = float(l)
if("Los3\">" in l):
#clear
l=l.replace("<metric name=\"Los3\">","")
l=l.replace("</metric>","")
l=l.replace("\n","")
r["Los3"] = float(l)
if("</results>" in l):
#print w
w+=1
#print p
paths[p].append(copy.deepcopy(r))
#Plot the restults
x = [] ;yD = []; yL = [];yC=[]
for i,j in enumerate(paths[0]):
#print i
x.append(j["Time"])
yD.append(j["Danger3"])
yL.append(j["Los3"])
yC.append(j["Crazyness"])
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
par2 = host.twinx()
offset = 70
new_fixed_axis = par2.get_grid_helper().new_fixed_axis
par2.axis["right"] = new_fixed_axis(loc="right",
axes=par2,
offset=(offset, 0))
par2.axis["right"].toggle(all=True)
host.set_xlabel("Time")
host.set_ylabel("Danger")
par1.set_ylabel("Crazyness")
par2.set_ylabel("LOS")
p1, = host.plot(x,yD, label="Danger")
p2, = par1.plot(x,yC, label="Crazy")
p3, = par2.plot(x,yL, label="LOS")
#host.legend()
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
par2.axis["right"].label.set_color(p3.get_color())
plt.draw()
#plt.show()
#nameFile = nameFile.replace(".xml","")
plt.savefig("Output.svg", format = "svg") | mit |
nguy/AWOT | awot/util/write_kmz.py | 1 | 17468 | """
awot.util.write_kmz
========================
Functions to save AWOT data into KML/KMZ files. These files may be displayed
for example with Google Earth.
Some code was directely adapted from the NASA PyAMPR package by Timothy Lang.
https://github.com/nasa/PyAMPR/blob/master/pyampr/pyampr.py
This present method is proof of concept and is expected to expand over time.
"""
from __future__ import absolute_import
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
from ..graph import common as gcommon
from . import helper
from .google_earth_tools import gearth_fig, make_kml
try:
import simplekml
except:
raise ValueError("This module requires installation of simplekml...")
def write_track_kmz(awot, field, lat_name=None, lon_name=None,
time_name=None, start_time=None, end_time=None,
latrange=None, lonrange=None,
cmap=None, track_color='k', track_lw=2.5,
file_path=None, file_name=None,
show_legend=True, legend_label=None):
"""
This method plots geolocated AWOT track data as a filled color Google Earth
kmz.
Will produce overlay.png and, if a legend is created,
legend.png as temporary image files in the current working
directory.
Parameters
----------
awot : dict
AWOT flight data instance.
field : str
Name of variable to use for track data.
lat_name : str
Key in radar instance for latitude variable.
None uses AWOT default.
lon_name : str
Key in radar instance for longitude variable.
None uses AWOT default.
time_name : str
Key in radar instance for time variable.
None uses AWOT default.
start_time : str
UTC time to use as start time for subsetting in datetime format.
(e.g. 2014-08-20 12:30:00)
end_time : str
UTC time to use as an end time for subsetting in datetime format.
(e.g. 2014-08-20 16:30:00)
latrange : 2-tuple
List with lat range defined.
lonrange : 2-tuple
List with lon range defined.
cmap : Matplotlib colormap instance
Colormap desired.
See http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps
and dir(cm) for more.
track_color : str
If color_by_altitude False, this color (see matplotlib)
is used for track.
track_lw : float or int
Line width to use for track.
file_path : str
Path to kmz file. Defaults to current working directory.
file_name : str
Desired file name. If None specified, AWOT attemps
to build using dictionary information.
show_legend : bool
False to suppress the color bar.
legend_label : str
Label to display in legend. If None, AWOT attempts to build
using field dictionary information.
"""
plt.close() # mpl seems buggy if multiple windows are left open
_method_printout()
print('Writing AWOT track KMZ file:')
# Check to see if field exists
gcommon._check_field(awot, field)
# Get the field dictionary
var = gcommon._get_variable_dict(awot, field)
# Get the lat/lon/time dictionaries
if lat_name is None:
latdict = gcommon._get_variable_dict(awot, 'latitude')
else:
latdict = gcommon._get_variable_dict(awot, lat_name)
if lon_name is None:
londict = gcommon._get_variable_dict(awot, 'longitude')
else:
londict = gcommon._get_variable_dict(awot, lon_name)
if time_name is None:
timedict = gcommon._get_variable_dict(awot, 'time')
else:
timedict = gcommon._get_variable_dict(awot, time_name)
dt_start = gcommon._get_start_datetime(timedict, start_time)
dt_end = gcommon._get_start_datetime(timedict, end_time)
datasub = helper.time_subset_awot_dict(timedict, var,
start_time, end_time)
lonsub = helper.time_subset_awot_dict(timedict, londict,
start_time, end_time)
latsub = helper.time_subset_awot_dict(timedict, latdict,
start_time, end_time)
timesub = helper.time_subset_awot_dict(timedict, timedict,
start_time, end_time)
# Filter any bad geolocation data
latd, lond, data, time = _filter_bad_geolocations(
latsub['data'], lonsub['data'], timesub['data'], datasub['data'])
# Get the lat/lon range
latrange, lonrange = _get_latrange_lonrange(
latd, lond, latrange, lonrange)
# Set the beginning and ending times
times = [dt_start, dt_end]
# Set file info
if file_path is None:
file_path = os.getcwd()
if file_name is None:
file_name = ('awot_' + awot['platform'] + '_' + awot['flight_number'] +
'_' + field + '.kmz')
longname = os.path.join(file_path, file_name)
# Google Earth image production
# If no cmap is specified, grab current
if cmap is None:
cmap = plt.get_cmap()
print(lonrange)
print(latrange)
fig, ax = gearth_fig(np.min(lonrange), np.min(latrange),
np.max(lonrange), np.max(latrange))
cs = ax.plot(lond, latd, color=track_color, lw=track_lw)
ax.set_axis_off()
fig.savefig('overlay.png', transparent=True, format='png')
# Now we convert to KMZ
if show_legend is True:
fig = plt.figure(figsize=(1.0, 4.0), facecolor=None, frameon=False)
ax = fig.add_axes([0.0, 0.05, 0.2, 0.9])
cb = fig.colorbar(cs, cax=ax)
cbytick_obj = plt.getp(cb.ax.axes, 'yticklabels')
plt.setp(cbytick_obj, color='w', weight='bold')
if legend_label is None:
ptitle = var['standard_name'] + ' (' + var['units'] + ')'
else:
ptitle = legend_label
cb.set_label(ptitle, rotation=-90, color='w', labelpad=20,
weight='bold')
fig.savefig('legend.png', transparent=True, format='png')
make_kml(np.min(lonrange), np.min(latrange), np.max(lonrange),
np.max(latrange), figs=['overlay.png'],
kmzfile=longname, colorbar='legend.png',
times=times)
os.remove('overlay.png')
os.remove('legend.png')
else:
make_kml(np.min(lonrange), np.min(latrange), np.max(lonrange),
np.max(latrange), figs=['overlay.png'],
kmzfile=longname, times=times)
os.remove('overlay.png')
print('Google Earth image saved to: %s' % longname)
_method_printout()
return
def write_line_kml(awot, field, lat_name=None, lon_name=None,
time_name=None, start_time=None, end_time=None,
latrange=None, lonrange=None,
cmap=None, color=None, lw=None,
file_path=None, file_name=None,
line_name=None, join_to_ground=False):
"""
Write a field of AWOT track data as a KML file for Google Earth.
Parameters
----------
awot : dict
AWOT flight data instance.
field : str
Name of variable to use for track data.
lat_name : str
Key in radar instance for latitude variable.
None uses AWOT default.
lon_name : str
Key in radar instance for longitude variable.
None uses AWOT default.
time_name : str
Key in radar instance for time variable.
None uses AWOT default.
start_time : str
UTC time to use as start time for subsetting in datetime format.
(e.g. 2014-08-20 12:30:00)
end_time : str
UTC time to use as an end time for subsetting in datetime format.
(e.g. 2014-08-20 16:30:00)
latrange : 2-tuple
List with lat range defined.
lonrange : 2-tuple
List with lon range defined.
cmap : Matplotlib colormap instance
Colormap desired.
See http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps
and dir(cm) for more.
color : str
Hex value color string to use for track.
lw : float
Line width to use for track.
file_path : str
Path to kmz file. Defaults to current working directory.
file_name : str
Desired file name. If None specified, AWOT attemps
to build using dictionary information.
line_name : str
Name of line string to create.
join_to_ground : bool
True to create a line that is joined to the ground. False does not.
"""
plt.close() # mpl seems buggy if multiple windows are left open
_method_printout()
print('Writing AWOT track KML file:')
# Check to see if field exists
gcommon._check_field(awot, field)
# Get the field dictionary
var = gcommon._get_variable_dict(awot, field)
# Get the lat/lon/time dictionaries
if lat_name is None:
latdict = gcommon._get_variable_dict(awot, 'latitude')
else:
latdict = gcommon._get_variable_dict(awot, lat_name)
if lon_name is None:
londict = gcommon._get_variable_dict(awot, 'longitude')
else:
londict = gcommon._get_variable_dict(awot, lon_name)
if time_name is None:
timedict = gcommon._get_variable_dict(awot, 'time')
else:
timedict = gcommon._get_variable_dict(awot, time_name)
dt_start = gcommon._get_start_datetime(timedict, start_time)
dt_end = gcommon._get_start_datetime(timedict, end_time)
datasub = helper.time_subset_awot_dict(timedict, var,
start_time, end_time)
lonsub = helper.time_subset_awot_dict(timedict, londict,
start_time, end_time)
latsub = helper.time_subset_awot_dict(timedict, latdict,
start_time, end_time)
timesub = helper.time_subset_awot_dict(timedict, timedict,
start_time, end_time)
# Filter any bad geolocation data
latd, lond, data, time = _filter_bad_geolocations(
latsub['data'], lonsub['data'], timesub['data'], datasub['data'])
# Get the lat/lon range
latrange, lonrange = _get_latrange_lonrange(
latd, lond, latrange, lonrange)
# Set the beginning and ending times
times = [dt_start, dt_end]
# Set file info
if file_path is None:
file_path = os.getcwd()
if file_name is None:
file_name = ('awot_' + awot['platform'] + '_' + awot['flight_number'] +
'_' + field + '.kml')
longname = os.path.join(file_path, file_name)
if line_name is None:
line_name = field
# Now we convert to KMZ
kml = simplekml.Kml()
linestr = kml.newlinestring(name=line_name)
linestr.coords = zip(lond, latd)
# Set properties according to keywords
if lw is not None:
linestr.style.linestyle.width = lw
if color is not None:
linestr.style.linestyle.color = color
if join_to_ground:
linestr.altitudemode = simplekml.AltitudeMode.relativetoground
linestr.extrude = 1
# Save the file
kml.save(longname)
print('KML file saved to: %s' % longname)
_method_printout()
return
def write_poly_kml(name="Polygon", innerboundary=None, outerboundary=None,
lw=4, color=None, fill=False, extrude=False,
join_to_ground=False,
timestamp=False, outputdir=None, filename=None):
'''
Write a polygon KML file for Google Earth.
Parameters
----------
name : str
Name to assign KML polygon instance.
innerboundary : tuple or array
Longitude/latitude coordinate pairs of inner boundary.
outerboundary : tuple or array
Longitude/latitude coordinate pairs of outer boundary.
lw : int
Width of line connecting polygon points.
color : str
Hex string color of line connection polygon points.
fill : bool
True to fill in polygon, False (default) for no fill.
extrude : bool
True to connect line to ground, Fales (default) does not.
join_to_ground : bool
True to create a line that is joined to the ground. False does not.
timestamp : bool
True adds UTC time at processing.
outputdir : str
Directory to save KML file. Defaults to current working directory.
filename : str
KML filename to save. Defualts to polygon.kml
'''
# Create a kml instance
kml = simplekml.Kml()
# Instantiate a polygon instance
pol = kml.newpolygon(name=name)
# Set the boundaries
if outerboundary is not None:
pol.outerboundaryis = outerboundary
if innerboundary is not None:
pol.innerboundaryis = innerboundary
if fill:
pol.polystyle.fill = 1
else:
pol.polystyle.fill = 0
if extrude:
pol.extrude = 1
else:
pol.extrude = 0
if join_to_ground:
pol.altitudemode = simplekml.AltitudeMode.relativetoground
pol.extrude = 1
# Set styling properties according to keywords
if lw is not None:
pol.linestyle.width = lw
if color is not None:
pol.linestyle.color = simplekml.Color.hex(color)
if timestamp:
pol.timestamp.when = datetime.strftime(datetime.utcnow(),
'%Y-%m-%d %H:%M:%SZ')
# Save the file
if outputdir is None:
outputdir = os.getcwd()
if filename is None:
filename = "polygon"
outfname = os.path.join(outputdir, filename + ".kml")
kml.save(outfname)
return
def write_kmz(fig, ax, plot, lonrange, latrange, times,
file_path=None, file_name=None,
show_legend=True, legend_label=None):
"""
This method plots geolocated AWOT track data as a filled color Google Earth
kmz.
Will produce overlay.png and, if a legend is created,
legend.png as temporary image files in the current working
directory.
Parameters
----------
latrange : 2-tuple
List with lat range defined.
lonrange : 2-tuple
List with lon range defined.
file_path : str
Path to kmz file. Defaults to current working directory.
file_name : str
Desired file name. If None specified, AWOT attemps
to build using dictionary information.
show_legend : bool
False to suppress the color bar.
legend_label : str
Label to display in legend. If None, AWOT attempts to build
using field dictionary information.
"""
plt.close() # mpl seems buggy if multiple windows are left open
_method_printout()
print('Writing AWOT track KMZ file:')
longname = os.path.join(file_path, file_name)
print(lonrange)
print(latrange)
ax.set_axis_off()
fig.savefig('overlay.png', transparent=True, format='png')
# Now we convert to KMZ
if show_legend is True:
fig = plt.figure(figsize=(1.0, 4.0), facecolor=None, frameon=False)
ax = fig.add_axes([0.0, 0.05, 0.2, 0.9])
cb = fig.colorbar(plot, cax=ax)
cbytick_obj = plt.getp(cb.ax.axes, 'yticklabels')
plt.setp(cbytick_obj, color='w', weight='bold')
cb.set_label(legend_label, rotation=-90, color='w', labelpad=20,
weight='bold')
fig.savefig('legend.png', transparent=True, format='png')
make_kml(np.min(lonrange), np.min(latrange), np.max(lonrange),
np.max(latrange), figs=['overlay.png'],
kmzfile=longname, colorbar='legend.png',
times=times)
os.remove('overlay.png')
os.remove('legend.png')
else:
make_kml(np.min(lonrange), np.min(latrange), np.max(lonrange),
np.max(latrange), figs=['overlay.png'],
kmzfile=longname, times=times)
os.remove('overlay.png')
print('Google Earth image saved to: %s' % longname)
_method_printout()
return
###################
# Get methods #
###################
def _get_latrange_lonrange(lats=None, lons=None, latrange=None, lonrange=None):
""" Determine domain of plot based on what user provided. """
if latrange is None:
latrange = [np.min(lats), np.max(lats)]
else:
latrange = np.sort(latrange)
if lonrange is None:
lonrange = [np.min(lons), np.max(lons)]
else:
lonrange = np.sort(lonrange)
return latrange, lonrange
####################
# Data methods #
####################
def _filter_bad_geolocations(lats, lons, data, time):
""" Internal method to filter bad geolocation data. """
# Attempt to deal with bad geolocation data
# (e.g., Latitude/Longitude=bad_data)
cond1 = np.logical_or(lats < -90, lats > 90)
cond2 = np.logical_or(lons < -180, lons > 180)
condition = np.logical_or(cond1, cond2)
indices = np.where(condition)
if np.shape(indices)[1] > 0:
print("Removing bad data")
data = np.delete(data, indices[0], axis=0)
lons = np.delete(lons, indices[0], axis=0)
lats = np.delete(lats, indices[0], axis=0)
time = np.delete(time, indices[0], axis=0)
return lats, lons, data, time
######################
# Helper methods #
######################
def _method_printout():
""" Helps clarify text output. """
print('\n********************\n')
print('')
| gpl-2.0 |
deepakantony/sms-tools | lectures/05-Sinusoidal-model/plots-code/synthesis-window.py | 22 | 1725 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
from scipy.fftpack import fft, ifft, fftshift
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
M = 601
w = np.blackman(M)
N = 1024
hN = N/2
Ns = 512
hNs = Ns/2
H = Ns/4
pin = 5000
t = -70
x1 = x[pin:pin+w.size]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
freqs = iploc*fs/N
Y = UF.genSpecSines(freqs, ipmag, ipphase, Ns, fs)
mY = 20*np.log10(abs(Y[:hNs]))
pY = np.unwrap(np.angle(Y[:hNs]))
y= fftshift(ifft(Y))*sum(blackmanharris(Ns))
sw = np.zeros(Ns)
ow = triang(2*H);
sw[hNs-H:hNs+H] = ow
bh = blackmanharris(Ns)
bh = bh / sum(bh)
sw[hNs-H:hNs+H] = sw[hNs-H:hNs+H] / bh[hNs-H:hNs+H]
plt.figure(1, figsize=(9, 6))
plt.subplot(3,1,1)
plt.plot(np.arange(hNs), mY, 'r', lw=1.5)
plt.axis([0, hNs,-90,max(mY)+2])
plt.title("mY, Blackman-Harris, Ns = 512")
plt.subplot(3,1,2)
plt.plot(np.arange(-hNs,hNs), y, 'b', lw=1.5)
plt.plot(np.arange(-hNs,hNs), max(y)*bh/max(bh), 'k', alpha=.5,lw=1.5)
plt.axis([-hNs, hNs,min(y),max(y)+.1])
plt.title("y, size = Ns = 512 (Blackman-Harris window)")
yw = y * sw / max(sw)
plt.subplot(3,1,3)
plt.plot(np.arange(-hNs,hNs), yw, 'b',lw=1.5)
plt.plot(np.arange(-hNs/2,hNs/2), max(y)*ow/max(ow), 'k', alpha=.5,lw=1.5)
plt.axis([-hNs, hNs,min(yw),max(yw)+.1])
plt.title("yw = y * triangular / Blackman Harris; size = Ns/2 = 256")
plt.tight_layout()
plt.savefig('synthesis-window.png')
plt.show()
| agpl-3.0 |
ch3ll0v3k/scikit-learn | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
cactusbin/nyt | matplotlib/examples/pylab_examples/psd_demo2.py | 9 | 1445 | #This example shows the effects of some of the different PSD parameters
import numpy as np
import matplotlib.pyplot as plt
dt = np.pi / 100.
fs = 1. / dt
t = np.arange(0, 8, dt)
y = 10. * np.sin(2 * np.pi * 4 * t) + 5. * np.sin(2 * np.pi * 4.25 * t)
y = y + np.random.randn(*t.shape)
#Plot the raw time series
fig = plt.figure()
fig.subplots_adjust(hspace=0.45, wspace=0.3)
ax = fig.add_subplot(2, 1, 1)
ax.plot(t, y)
#Plot the PSD with different amounts of zero padding. This uses the entire
#time series at once
ax2 = fig.add_subplot(2, 3, 4)
ax2.psd(y, NFFT=len(t), pad_to=len(t), Fs=fs)
ax2.psd(y, NFFT=len(t), pad_to=len(t)*2, Fs=fs)
ax2.psd(y, NFFT=len(t), pad_to=len(t)*4, Fs=fs)
plt.title('zero padding')
#Plot the PSD with different block sizes, Zero pad to the length of the original
#data sequence.
ax3 = fig.add_subplot(2, 3, 5, sharex=ax2, sharey=ax2)
ax3.psd(y, NFFT=len(t), pad_to=len(t), Fs=fs)
ax3.psd(y, NFFT=len(t)//2, pad_to=len(t), Fs=fs)
ax3.psd(y, NFFT=len(t)//4, pad_to=len(t), Fs=fs)
ax3.set_ylabel('')
plt.title('block size')
#Plot the PSD with different amounts of overlap between blocks
ax4 = fig.add_subplot(2, 3, 6, sharex=ax2, sharey=ax2)
ax4.psd(y, NFFT=len(t)//2, pad_to=len(t), noverlap=0, Fs=fs)
ax4.psd(y, NFFT=len(t)//2, pad_to=len(t), noverlap=int(0.05*len(t)/2.), Fs=fs)
ax4.psd(y, NFFT=len(t)//2, pad_to=len(t), noverlap=int(0.2*len(t)/2.), Fs=fs)
ax4.set_ylabel('')
plt.title('overlap')
plt.show()
| unlicense |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/scipy/signal/_arraytools.py | 28 | 7553 | """
Functions for acting on a axis of an array.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
def axis_slice(a, start=None, stop=None, step=None, axis=-1):
"""Take a slice along axis 'axis' from 'a'.
Parameters
----------
a : numpy.ndarray
The array to be sliced.
start, stop, step : int or None
The slice parameters.
axis : int, optional
The axis of `a` to be sliced.
Examples
--------
>>> a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> axis_slice(a, start=0, stop=1, axis=1)
array([[1],
[4],
[7]])
>>> axis_slice(a, start=1, axis=0)
array([[4, 5, 6],
[7, 8, 9]])
Notes
-----
The keyword arguments start, stop and step are used by calling
slice(start, stop, step). This implies axis_slice() does not
handle its arguments the exacty the same as indexing. To select
a single index k, for example, use
axis_slice(a, start=k, stop=k+1)
In this case, the length of the axis 'axis' in the result will
be 1; the trivial dimension is not removed. (Use numpy.squeeze()
to remove trivial axes.)
"""
a_slice = [slice(None)] * a.ndim
a_slice[axis] = slice(start, stop, step)
b = a[a_slice]
return b
def axis_reverse(a, axis=-1):
"""Reverse the 1-d slices of `a` along axis `axis`.
Returns axis_slice(a, step=-1, axis=axis).
"""
return axis_slice(a, step=-1, axis=axis)
def odd_ext(x, n, axis=-1):
"""
Odd extension at the boundaries of an array
Generate a new ndarray by making an odd extension of `x` along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import odd_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> odd_ext(a, 2)
array([[-1, 0, 1, 2, 3, 4, 5, 6, 7],
[-4, -1, 0, 1, 4, 9, 16, 23, 28]])
Odd extension is a "180 degree rotation" at the endpoints of the original
array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = odd_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='odd extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_end = axis_slice(x, start=0, stop=1, axis=axis)
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((2 * left_end - left_ext,
x,
2 * right_end - right_ext),
axis=axis)
return ext
def even_ext(x, n, axis=-1):
"""
Even extension at the boundaries of an array
Generate a new ndarray by making an even extension of `x` along an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import even_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> even_ext(a, 2)
array([[ 3, 2, 1, 2, 3, 4, 5, 4, 3],
[ 4, 1, 0, 1, 4, 9, 16, 9, 4]])
Even extension is a "mirror image" at the boundaries of the original array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = even_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='even extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def const_ext(x, n, axis=-1):
"""
Constant extension at the boundaries of an array
Generate a new ndarray that is a constant extension of `x` along an axis.
The extension repeats the values at the first and last element of
the axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import const_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> const_ext(a, 2)
array([[ 1, 1, 1, 2, 3, 4, 5, 5, 5],
[ 0, 0, 0, 1, 4, 9, 16, 16, 16]])
Constant extension continues with the same values as the endpoints of the
array:
>>> t = np.linspace(0, 1.5, 100)
>>> a = 0.9 * np.sin(2 * np.pi * t**2)
>>> b = const_ext(a, 40)
>>> import matplotlib.pyplot as plt
>>> plt.plot(arange(-40, 140), b, 'b', lw=1, label='constant extension')
>>> plt.plot(arange(100), a, 'r', lw=2, label='original')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 1:
return x
left_end = axis_slice(x, start=0, stop=1, axis=axis)
ones_shape = [1] * x.ndim
ones_shape[axis] = n
ones = np.ones(ones_shape, dtype=x.dtype)
left_ext = ones * left_end
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = ones * right_end
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def zero_ext(x, n, axis=-1):
"""
Zero padding at the boundaries of an array
Generate a new ndarray that is a zero padded extension of `x` along
an axis.
Parameters
----------
x : ndarray
The array to be extended.
n : int
The number of elements by which to extend `x` at each end of the
axis.
axis : int, optional
The axis along which to extend `x`. Default is -1.
Examples
--------
>>> from scipy.signal._arraytools import zero_ext
>>> a = np.array([[1, 2, 3, 4, 5], [0, 1, 4, 9, 16]])
>>> zero_ext(a, 2)
array([[ 0, 0, 1, 2, 3, 4, 5, 0, 0],
[ 0, 0, 0, 1, 4, 9, 16, 0, 0]])
"""
if n < 1:
return x
zeros_shape = list(x.shape)
zeros_shape[axis] = n
zeros = np.zeros(zeros_shape, dtype=x.dtype)
ext = np.concatenate((zeros, x, zeros), axis=axis)
return ext
| agpl-3.0 |
vkscool/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/bezier.py | 70 | 14387 | """
A module providing some utility functions regarding bezier path manipulation.
"""
import numpy as np
from math import sqrt
from matplotlib.path import Path
from operator import xor
# some functions
def get_intersection(cx1, cy1, cos_t1, sin_t1,
cx2, cy2, cos_t2, sin_t2):
""" return a intersecting point between a line through (cx1, cy1)
and having angle t1 and a line through (cx2, cy2) and angle t2.
"""
# line1 => sin_t1 * (x - cx1) - cos_t1 * (y - cy1) = 0.
# line1 => sin_t1 * x + cos_t1 * y = sin_t1*cx1 - cos_t1*cy1
line1_rhs = sin_t1 * cx1 - cos_t1 * cy1
line2_rhs = sin_t2 * cx2 - cos_t2 * cy2
# rhs matrix
a, b = sin_t1, -cos_t1
c, d = sin_t2, -cos_t2
ad_bc = a*d-b*c
if ad_bc == 0.:
raise ValueError("Given lines do not intersect")
#rhs_inverse
a_, b_ = d, -b
c_, d_ = -c, a
a_, b_, c_, d_ = [k / ad_bc for k in [a_, b_, c_, d_]]
x = a_* line1_rhs + b_ * line2_rhs
y = c_* line1_rhs + d_ * line2_rhs
return x, y
def get_normal_points(cx, cy, cos_t, sin_t, length):
"""
For a line passing through (*cx*, *cy*) and having a angle *t*,
return locations of the two points located along its perpendicular line at the distance of *length*.
"""
if length == 0.:
return cx, cy, cx, cy
cos_t1, sin_t1 = sin_t, -cos_t
cos_t2, sin_t2 = -sin_t, cos_t
x1, y1 = length*cos_t1 + cx, length*sin_t1 + cy
x2, y2 = length*cos_t2 + cx, length*sin_t2 + cy
return x1, y1, x2, y2
## BEZIER routines
# subdividing bezier curve
# http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-sub.html
def _de_casteljau1(beta, t):
next_beta = beta[:-1] * (1-t) + beta[1:] * t
return next_beta
def split_de_casteljau(beta, t):
"""split a bezier segment defined by its controlpoints *beta*
into two separate segment divided at *t* and return their control points.
"""
beta = np.asarray(beta)
beta_list = [beta]
while True:
beta = _de_casteljau1(beta, t)
beta_list.append(beta)
if len(beta) == 1:
break
left_beta = [beta[0] for beta in beta_list]
right_beta = [beta[-1] for beta in reversed(beta_list)]
return left_beta, right_beta
def find_bezier_t_intersecting_with_closedpath(bezier_point_at_t, inside_closedpath,
t0=0., t1=1., tolerence=0.01):
""" Find a parameter t0 and t1 of the given bezier path which
bounds the intersecting points with a provided closed
path(*inside_closedpath*). Search starts from *t0* and *t1* and it
uses a simple bisecting algorithm therefore one of the end point
must be inside the path while the orther doesn't. The search stop
when |t0-t1| gets smaller than the given tolerence.
value for
- bezier_point_at_t : a function which returns x, y coordinates at *t*
- inside_closedpath : return True if the point is insed the path
"""
# inside_closedpath : function
start = bezier_point_at_t(t0)
end = bezier_point_at_t(t1)
start_inside = inside_closedpath(start)
end_inside = inside_closedpath(end)
if not xor(start_inside, end_inside):
raise ValueError("the segment does not seemed to intersect with the path")
while 1:
# return if the distance is smaller than the tolerence
if (start[0]-end[0])**2 + (start[1]-end[1])**2 < tolerence**2:
return t0, t1
# calculate the middle point
middle_t = 0.5*(t0+t1)
middle = bezier_point_at_t(middle_t)
middle_inside = inside_closedpath(middle)
if xor(start_inside, middle_inside):
t1 = middle_t
end = middle
end_inside = middle_inside
else:
t0 = middle_t
start = middle
start_inside = middle_inside
class BezierSegment:
"""
A simple class of a 2-dimensional bezier segment
"""
# Highrt order bezier lines can be supported by simplying adding
# correcponding values.
_binom_coeff = {1:np.array([1., 1.]),
2:np.array([1., 2., 1.]),
3:np.array([1., 3., 3., 1.])}
def __init__(self, control_points):
"""
*control_points* : location of contol points. It needs have a
shpae of n * 2, where n is the order of the bezier line. 1<=
n <= 3 is supported.
"""
_o = len(control_points)
self._orders = np.arange(_o)
_coeff = BezierSegment._binom_coeff[_o - 1]
_control_points = np.asarray(control_points)
xx = _control_points[:,0]
yy = _control_points[:,1]
self._px = xx * _coeff
self._py = yy * _coeff
def point_at_t(self, t):
"evaluate a point at t"
one_minus_t_powers = np.power(1.-t, self._orders)[::-1]
t_powers = np.power(t, self._orders)
tt = one_minus_t_powers * t_powers
_x = sum(tt * self._px)
_y = sum(tt * self._py)
return _x, _y
def split_bezier_intersecting_with_closedpath(bezier,
inside_closedpath,
tolerence=0.01):
"""
bezier : control points of the bezier segment
inside_closedpath : a function which returns true if the point is inside the path
"""
bz = BezierSegment(bezier)
bezier_point_at_t = bz.point_at_t
t0, t1 = find_bezier_t_intersecting_with_closedpath(bezier_point_at_t,
inside_closedpath,
tolerence=tolerence)
_left, _right = split_de_casteljau(bezier, (t0+t1)/2.)
return _left, _right
def find_r_to_boundary_of_closedpath(inside_closedpath, xy,
cos_t, sin_t,
rmin=0., rmax=1., tolerence=0.01):
"""
Find a radius r (centered at *xy*) between *rmin* and *rmax* at
which it intersect with the path.
inside_closedpath : function
cx, cy : center
cos_t, sin_t : cosine and sine for the angle
rmin, rmax :
"""
cx, cy = xy
def _f(r):
return cos_t*r + cx, sin_t*r + cy
find_bezier_t_intersecting_with_closedpath(_f, inside_closedpath,
t0=rmin, t1=rmax, tolerence=tolerence)
## matplotlib specific
def split_path_inout(path, inside, tolerence=0.01, reorder_inout=False):
""" divide a path into two segment at the point where inside(x, y)
becomes False.
"""
path_iter = path.iter_segments()
ctl_points, command = path_iter.next()
begin_inside = inside(ctl_points[-2:]) # true if begin point is inside
bezier_path = None
ctl_points_old = ctl_points
concat = np.concatenate
iold=0
i = 1
for ctl_points, command in path_iter:
iold=i
i += len(ctl_points)/2
if inside(ctl_points[-2:]) != begin_inside:
bezier_path = concat([ctl_points_old[-2:], ctl_points])
break
ctl_points_old = ctl_points
if bezier_path is None:
raise ValueError("The path does not seem to intersect with the patch")
bp = zip(bezier_path[::2], bezier_path[1::2])
left, right = split_bezier_intersecting_with_closedpath(bp,
inside,
tolerence)
if len(left) == 2:
codes_left = [Path.LINETO]
codes_right = [Path.MOVETO, Path.LINETO]
elif len(left) == 3:
codes_left = [Path.CURVE3, Path.CURVE3]
codes_right = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
elif len(left) == 4:
codes_left = [Path.CURVE4, Path.CURVE4, Path.CURVE4]
codes_right = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
else:
raise ValueError()
verts_left = left[1:]
verts_right = right[:]
#i += 1
if path.codes is None:
path_in = Path(concat([path.vertices[:i], verts_left]))
path_out = Path(concat([verts_right, path.vertices[i:]]))
else:
path_in = Path(concat([path.vertices[:iold], verts_left]),
concat([path.codes[:iold], codes_left]))
path_out = Path(concat([verts_right, path.vertices[i:]]),
concat([codes_right, path.codes[i:]]))
if reorder_inout and begin_inside == False:
path_in, path_out = path_out, path_in
return path_in, path_out
def inside_circle(cx, cy, r):
r2 = r**2
def _f(xy):
x, y = xy
return (x-cx)**2 + (y-cy)**2 < r2
return _f
# quadratic bezier lines
def get_cos_sin(x0, y0, x1, y1):
dx, dy = x1-x0, y1-y0
d = (dx*dx + dy*dy)**.5
return dx/d, dy/d
def get_parallels(bezier2, width):
"""
Given the quadraitc bezier control points *bezier2*, returns
control points of quadrativ bezier lines roughly parralel to given
one separated by *width*.
"""
# The parallel bezier lines constructed by following ways.
# c1 and c2 are contol points representing the begin and end of the bezier line.
# cm is the middle point
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c2x, c2y = bezier2[2]
# t1 and t2 is the anlge between c1 and cm, cm, c2.
# They are also a angle of the tangential line of the path at c1 and c2
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c2x, c2y)
# find c1_left, c1_right which are located along the lines
# throught c1 and perpendicular to the tangential lines of the
# bezier path at a distance of width. Same thing for c2_left and
# c2_right with respect to c2.
c1x_left, c1y_left, c1x_right, c1y_right = \
get_normal_points(c1x, c1y, cos_t1, sin_t1, width)
c2x_left, c2y_left, c2x_right, c2y_right = \
get_normal_points(c2x, c2y, cos_t2, sin_t2, width)
# find cm_left which is the intersectng point of a line through
# c1_left with angle t1 and a line throught c2_left with angle
# t2. Same with cm_right.
cmx_left, cmy_left = get_intersection(c1x_left, c1y_left, cos_t1, sin_t1,
c2x_left, c2y_left, cos_t2, sin_t2)
cmx_right, cmy_right = get_intersection(c1x_right, c1y_right, cos_t1, sin_t1,
c2x_right, c2y_right, cos_t2, sin_t2)
# the parralel bezier lines are created with control points of
# [c1_left, cm_left, c2_left] and [c1_right, cm_right, c2_right]
path_left = [(c1x_left, c1y_left), (cmx_left, cmy_left), (c2x_left, c2y_left)]
path_right = [(c1x_right, c1y_right), (cmx_right, cmy_right), (c2x_right, c2y_right)]
return path_left, path_right
def make_wedged_bezier2(bezier2, length, shrink_factor=0.5):
"""
Being similar to get_parallels, returns
control points of two quadrativ bezier lines having a width roughly parralel to given
one separated by *width*.
"""
xx1, yy1 = bezier2[2]
xx2, yy2 = bezier2[1]
xx3, yy3 = bezier2[0]
cx, cy = xx3, yy3
x0, y0 = xx2, yy2
dist = sqrt((x0-cx)**2 + (y0-cy)**2)
cos_t, sin_t = (x0-cx)/dist, (y0-cy)/dist,
x1, y1, x2, y2 = get_normal_points(cx, cy, cos_t, sin_t, length)
xx12, yy12 = (xx1+xx2)/2., (yy1+yy2)/2.,
xx23, yy23 = (xx2+xx3)/2., (yy2+yy3)/2.,
dist = sqrt((xx12-xx23)**2 + (yy12-yy23)**2)
cos_t, sin_t = (xx12-xx23)/dist, (yy12-yy23)/dist,
xm1, ym1, xm2, ym2 = get_normal_points(xx2, yy2, cos_t, sin_t, length*shrink_factor)
l_plus = [(x1, y1), (xm1, ym1), (xx1, yy1)]
l_minus = [(x2, y2), (xm2, ym2), (xx1, yy1)]
return l_plus, l_minus
def find_control_points(c1x, c1y, mmx, mmy, c2x, c2y):
""" Find control points of the bezier line throught c1, mm, c2. We
simply assume that c1, mm, c2 which have parameteric value 0, 0.5, and 1.
"""
cmx = .5 * (4*mmx - (c1x + c2x))
cmy = .5 * (4*mmy - (c1y + c2y))
return [(c1x, c1y), (cmx, cmy), (c2x, c2y)]
def make_wedged_bezier2(bezier2, width, w1=1., wm=0.5, w2=0.):
"""
Being similar to get_parallels, returns
control points of two quadrativ bezier lines having a width roughly parralel to given
one separated by *width*.
"""
# c1, cm, c2
c1x, c1y = bezier2[0]
cmx, cmy = bezier2[1]
c3x, c3y = bezier2[2]
# t1 and t2 is the anlge between c1 and cm, cm, c3.
# They are also a angle of the tangential line of the path at c1 and c3
cos_t1, sin_t1 = get_cos_sin(c1x, c1y, cmx, cmy)
cos_t2, sin_t2 = get_cos_sin(cmx, cmy, c3x, c3y)
# find c1_left, c1_right which are located along the lines
# throught c1 and perpendicular to the tangential lines of the
# bezier path at a distance of width. Same thing for c3_left and
# c3_right with respect to c3.
c1x_left, c1y_left, c1x_right, c1y_right = \
get_normal_points(c1x, c1y, cos_t1, sin_t1, width*w1)
c3x_left, c3y_left, c3x_right, c3y_right = \
get_normal_points(c3x, c3y, cos_t2, sin_t2, width*w2)
# find c12, c23 and c123 which are middle points of c1-cm, cm-c3 and c12-c23
c12x, c12y = (c1x+cmx)*.5, (c1y+cmy)*.5
c23x, c23y = (cmx+c3x)*.5, (cmy+c3y)*.5
c123x, c123y = (c12x+c23x)*.5, (c12y+c23y)*.5
# tangential angle of c123 (angle between c12 and c23)
cos_t123, sin_t123 = get_cos_sin(c12x, c12y, c23x, c23y)
c123x_left, c123y_left, c123x_right, c123y_right = \
get_normal_points(c123x, c123y, cos_t123, sin_t123, width*wm)
path_left = find_control_points(c1x_left, c1y_left,
c123x_left, c123y_left,
c3x_left, c3y_left)
path_right = find_control_points(c1x_right, c1y_right,
c123x_right, c123y_right,
c3x_right, c3y_right)
return path_left, path_right
if 0:
path = Path([(0, 0), (1, 0), (2, 2)],
[Path.MOVETO, Path.CURVE3, Path.CURVE3])
left, right = divide_path_inout(path, inside)
clf()
ax = gca()
| gpl-3.0 |
pandyag/trading-with-python | lib/interactiveBrokers/histData.py | 76 | 6472 | '''
Created on May 8, 2013
Copyright: Jev Kuznetsov
License: BSD
Module for downloading historic data from IB
'''
import ib
import pandas as pd
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
import logger as logger
from pandas import DataFrame, Index
import os
import datetime as dt
import time
from time import sleep
from extra import timeFormat, dateFormat
class Downloader(object):
def __init__(self,debug=False):
self._log = logger.getLogger('DLD')
self._log.debug('Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pd.__version__,ib.version))
self.tws = ibConnection()
self._dataHandler = _HistDataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler,message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self,msg):
print '[debug]', msg
def requestData(self,contract,endDateTime,durationStr='1 D',barSizeSetting='30 secs',whatToShow='TRADES',useRTH=1,formatDate=1):
if isinstance(endDateTime,dt.datetime): # convert to string
endDateTime = endDateTime.strftime(timeFormat)
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol,endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(10)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId,contract,endDateTime,durationStr,barSizeSetting,whatToShow,useRTH,formatDate)
self._reqId+=1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time()-startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
# def getIntradayData(self,contract, dateTuple ):
# ''' get full day data on 1-s interval
# date: a tuple of (yyyy,mm,dd)
# '''
#
# openTime = dt.datetime(*dateTuple)+dt.timedelta(hours=16)
# closeTime = dt.datetime(*dateTuple)+dt.timedelta(hours=22)
#
# timeRange = pd.date_range(openTime,closeTime,freq='30min')
#
# datasets = []
#
# for t in timeRange:
# datasets.append(self.requestData(contract,t.strftime(timeFormat)))
#
# return pd.concat(datasets)
def disconnect(self):
self.tws.disconnect()
class _HistDataHandler(object):
''' handles incoming messages '''
def __init__(self,tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler,message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open':[],'high':[],'low':[],'close':[],'volume':[],'count':[],'WAP':[]}
def msgHandler(self,msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
if len(msg.date) > 8:
self._timestamp.append(dt.datetime.strptime(msg.date,timeFormat))
else:
self._timestamp.append(dt.datetime.strptime(msg.date,dateFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
''' return downloaded data as a DataFrame '''
df = DataFrame(data=self._data,index=Index(self._timestamp))
return df
class TimeKeeper(object):
'''
class for keeping track of previous requests, to satify the IB requirements
(max 60 requests / 10 min)
each time a requiest is made, a timestamp is added to a txt file in the user dir.
'''
def __init__(self):
self._log = logger.getLogger('TK')
dataDir = os.path.expanduser('~')+'/twpData'
if not os.path.exists(dataDir):
os.mkdir(dataDir)
self._timeFormat = "%Y%m%d %H:%M:%S"
self.dataFile = os.path.normpath(os.path.join(dataDir,'requests.txt'))
# Create file if it's missing
if not os.path.exists(self.dataFile):
open(self.dataFile,'w').close()
self._log.debug('Data file: {0}'.format(self.dataFile))
def addRequest(self):
''' adds a timestamp of current request'''
with open(self.dataFile,'a') as f:
f.write(dt.datetime.now().strftime(self._timeFormat)+'\n')
def nrRequests(self,timeSpan=600):
''' return number of requests in past timespan (s) '''
delta = dt.timedelta(seconds=timeSpan)
now = dt.datetime.now()
requests = 0
with open(self.dataFile,'r') as f:
lines = f.readlines()
for line in lines:
if now-dt.datetime.strptime(line.strip(),self._timeFormat) < delta:
requests+=1
if requests==0: # erase all contents if no requests are relevant
open(self.dataFile,'w').close()
self._log.debug('past requests: {0}'.format(requests))
return requests
if __name__ == '__main__':
from extra import createContract
dl = Downloader(debug=True) # historic data downloader class
contract = createContract('SPY') # create contract using defaults (STK,SMART,USD)
data = dl.requestData(contract,"20141208 16:00:00 EST") # request 30-second data bars up till now
data.to_csv('SPY.csv') # write data to csv
print 'Done' | bsd-3-clause |
WillieMaddox/scipy | tools/refguide_check.py | 3 | 27754 | #!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a Scipy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --check_docs optimize
"""
from __future__ import print_function
import sys
import os
import re
import copy
import inspect
import warnings
import doctest
import tempfile
import io
import docutils.core
from docutils.parsers.rst import directives
import shutil
import glob
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from argparse import ArgumentParser, REMAINDER
import numpy as np
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
# Remove sphinx directives that don't run without Sphinx environment
directives._directives.pop('versionadded', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "scipy"
PUBLIC_SUBMODULES = [
'cluster',
'cluster.hierarchy',
'cluster.vq',
'constants',
'fftpack',
'fftpack.convolve',
'integrate',
'interpolate',
'io',
'io.arff',
'io.wavfile',
'linalg',
'linalg.blas',
'linalg.lapack',
'linalg.interpolative',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'sparse',
'sparse.csgraph',
'sparse.linalg',
'spatial',
'spatial.distance',
'special',
'stats',
'stats.mstats',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
'scipy.stats.kstwobign', # inaccurate cdf or ppf
'scipy.stats.levy_stable',
'scipy.special.sinc', # comes from numpy
'scipy.misc.who', # comes from numpy
'weave.rst', # tutorial for a deprecated module
'io.rst', # XXX: need to figure out how to deal w/ mat files
])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.csgraph',
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = set(only_ref).intersection(deprecated)
only_ref = set(only_ref).difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data',
'obj', 'versionadded', 'module', 'class',
'ref', 'func', 'toctree', 'moduleauthor',
'sectionauthor', 'codeauthor', 'eq',
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,}
class DTRunner(doctest.DocTestRunner):
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', 'ax.axis', 'plt.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',
'# reformatted'}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = dict(CHECK_NAMESPACE)
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except:
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = ('[\w\d_]+\(' +
', '.join(['[\w\d_]+=(.+)']*num) +
'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogenous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""Run modified doctests for the set of `tests`.
Returns: list of [(success_flag, output), ...]
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = []
success = True
def out(msg):
output.append(msg)
class MyStderr(object):
"""Redirect stderr to the current stdout"""
def write(self, msg):
if doctest_warnings:
sys.stdout.write(msg)
else:
out(msg)
# Run tests, trying to restore global state afterward
old_printoptions = np.get_printoptions()
old_errstate = np.seterr()
old_stderr = sys.stderr
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
sys.stderr = MyStderr()
try:
os.chdir(tmpdir)
# try to ensure random seed is NOT reproducible
np.random.seed(None)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t, out=out)
if fails > 0:
success = False
finally:
sys.stderr = old_stderr
os.chdir(cwd)
shutil.rmtree(tmpdir)
np.set_printoptions(**old_printoptions)
np.seterr(**old_errstate)
return success, output
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in docstrings of the module's public symbols.
Returns: list of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Returns: list of [(item_name, success_flag, output), ...]
Notes
-----
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
results = []
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
return results
full_name = fname
text = open(fname).read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
for part in text.split('\n\n'):
tests = parser.get_doctest(part, ns, fname, fname, 0)
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts += [part]
# Reassemble the good bits and doctest them:
good_text = '\n\n'.join(good_parts)
tests = parser.get_doctest(good_text, ns, fname, fname, 0)
success, output = _run_doctests([tests], full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def init_matplotlib():
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=list(PUBLIC_SUBMODULES),
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true", help="Run also doctests")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--skip-tutorial", action="store_true",
help="Skip running doctests in the tutorial.")
args = parser.parse_args(argv)
modules = []
names_dict = {}
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in list(module_names):
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
dots = True
success = True
results = []
print("Running checks for %d modules:" % (len(modules),))
if args.doctests or not args.skip_tutorial:
init_matplotlib()
for module in modules:
if dots:
if module is not modules[0]:
sys.stderr.write(' ')
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
if not args.skip_tutorial:
tut_path = os.path.join(os.getcwd(), 'doc', 'source', 'tutorial', '*.rst')
print('\nChecking tutorial files at %s:' % tut_path)
for filename in sorted(glob.glob(tut_path)):
if dots:
sys.stderr.write('\n')
sys.stderr.write(os.path.split(filename)[1] + ' ')
sys.stderr.flush()
tut_results = check_doctests_testfile(filename, (args.verbose >= 2),
dots=dots, doctest_warnings=args.doctest_warnings)
def scratch(): pass # stub out a "module", see below
scratch.__name__ = filename
results.append((scratch, tut_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
all_success = True
for module, mod_results in results:
success = all(x[1] for x in mod_results)
all_success = all_success and success
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if all_success:
print("\nOK: refguide and doctests checks passed!")
sys.exit(0)
else:
print("\nERROR: refguide or doctests have errors")
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| bsd-3-clause |
xuewei4d/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 17 | 6481 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set. [1]_, [2]_
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [3]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] Johanna Hardin, David M Rocke. The distribution of robust distances.
Journal of Computational and Graphical Statistics. December 1, 2005,
14(4): 928-946.
.. [2] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
.. [3] P. J. Rousseeuw. Least median of squares regression. Journal of American
Statistical Ass., 79:871, 1984.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1])).astype(int)
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
lw = 2
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", lw=lw, color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", lw=lw, color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", lw=lw, color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size // 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size // 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size // 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size // 5):(x_size // 2 - 1)],
err_cov_emp_full.mean(1)[(x_size // 5):(x_size // 2 - 1)],
color='green', ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
nipunagarwala/cs224s_final_project | code/utils/mvTrainDev.py | 1 | 2719 | import os
import pickle
import shutil
import pandas as pd
from sklearn.model_selection import train_test_split
"""
One-time use.
Splits data/train/* into data/train/* and data/dev/*
at random such that transcripts do not overlap between sets,
with an 80-20 split.
As part of this, this script revises utteranceInfo.pkl
and creates utteranceInfo.pkl.
"""
with open("data/train/utteranceInfo.pkl", "rb") as f:
meta = pickle.load(f)
PROB_OF_TRAINING = 0.8
# Transcripts can be used a variable number of times
# Iterate over each transcript and the number of times it is used,
# and split it and only its instances fairly among train/dev
#
# What is the distribution of transcript usage in their training dat?
#>>> meta["transcript"].value_counts().value_counts()
# 1 391
# 3 5
# 4 35
# 5 2
# 6 38
# 16 4
# 17 36
# aka 391 transcripts are used only 1 time, 5 transcripts are used 3 times, ...
transcript_uses = meta["transcript"].value_counts()
num_uses = transcript_uses.value_counts().index
train = []
dev = []
for use_count in num_uses:
transcript_values = transcript_uses[transcript_uses == use_count].index
t_transcripts, d_transcripts = train_test_split(transcript_values, train_size=PROB_OF_TRAINING, random_state=42)
train.append( meta[meta["transcript"].isin(t_transcripts)] )
dev.append( meta[meta["transcript"].isin(d_transcripts)] )
train = pd.concat(train)
dev = pd.concat(dev)
dev["split"] = "dev"
# Move around the results
directory = "data/train"
newtrain = os.path.join(directory, "newtrain")
os.mkdir(newtrain)
for i, utterance in train.iterrows():
old_pkl_filename = os.path.join(directory, utterance["label"] + ".pkl")
assert( utterance["split"] == "train" )
new_pkl_filename = os.path.join(newtrain, utterance["label"] + ".pkl")
os.rename(old_pkl_filename, new_pkl_filename)
newdev = os.path.join(directory, "newdev")
os.mkdir(newdev)
for i, utterance in dev.iterrows():
old_pkl_filename = os.path.join(directory, utterance["label"] + ".pkl")
assert( utterance["split"] == "dev" )
new_pkl_filename = os.path.join(newdev, utterance["label"] + ".pkl")
os.rename(old_pkl_filename, new_pkl_filename)
# Write out the meta info
with open(os.path.join(directory, "newtrain", "utteranceInfo.pkl"), "wb") as f:
pickle.dump(train, f)
with open(os.path.join(directory, "newdev", "utteranceInfo.pkl"), "wb") as f:
pickle.dump(dev, f)
##############################
# Move things around
##############################
os.rename("data/train/newtrain", "data/newtrain")
os.rename("data/train/newdev", "data/dev")
shutil.rmtree("data/train")
os.rename("data/newtrain", "data/train")
| mit |
linebp/pandas | pandas/errors/__init__.py | 3 | 1640 | # flake8: noqa
""" expose public exceptions & warnings """
from pandas._libs.tslib import OutOfBoundsDatetime
class PerformanceWarning(Warning):
"""
Warnings shown when there is a possible performance
impact.
"""
class UnsupportedFunctionCall(ValueError):
"""
If attempting to call a numpy function on a pandas
object. For example using ``np.cumsum(groupby_object)``.
"""
class UnsortedIndexError(KeyError):
"""
Error raised when attempting to get a slice of a MultiIndex
and the index has not been lexsorted. Subclass of `KeyError`.
.. versionadded:: 0.20.0
"""
class ParserError(ValueError):
"""
Exception that is thrown by an error is encountered in `pd.read_csv`
"""
class DtypeWarning(Warning):
"""
Warning that is raised for a dtype incompatiblity. This is
can happen whenever `pd.read_csv` encounters non-
uniform dtypes in a column(s) of a given CSV file
"""
class EmptyDataError(ValueError):
"""
Exception that is thrown in `pd.read_csv` (by both the C and
Python engines) when empty data or header is encountered
"""
class ParserWarning(Warning):
"""
Warning that is raised in `pd.read_csv` whenever it is necessary
to change parsers (generally from 'c' to 'python') contrary to the
one specified by the user due to lack of support or functionality for
parsing particular attributes of a CSV file with the requsted engine
"""
class MergeError(ValueError):
"""
Error raised when problems arise during merging due to problems
with input data. Subclass of `ValueError`.
"""
| bsd-3-clause |
BigBigXiong/ML_learning | logic_regression.py | 1 | 2925 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 14:59:24 2017
@author: Austin
"""
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from sklearn.preprocessing import PolynomialFeatures
import seaborn as sns
def loadData(file, delimeter):
data = np.loadtxt(file, delimiter=delimeter)
print("dims: ", data.shape)
print(data[1:6, :])
return(data)
def plotData(data, label_x, label_y, label_pos, label_neg, axes=None):
neg = data[:, 2] == 0
pos = data[:, 2] == 1
if axes == None:
axes = plt.gca()
axes.scatter(data[pos][:, 0], data[pos][:, 1], marker='+', c='k', s=60,
linewidths=2, label=label_pos)
axes.scatter(data[neg][:,0], data[neg][:,1], c='y', s=60, label=label_neg)
axes.set_xlabel(label_x)
axes.set_ylabel(label_y)
axes.legend(frameon= True, fancybox = True)
#定义sigmoid函数
def sigmoid(z):
return(1 / (1 + np.exp(-z)))
#定义损失函数
def costFunction(theta, X, y):
m = y.size
h = sigmoid(X.dot(theta))
try:
J = -1.0*(1.0/m)*(np.log(h).T.dot(y)+np.log(1-h).T.dot(1-y))
except Exception(e):
return(np.inf)
if np.isnan(J[0]):
return(np.inf)
return J[0]
def gradient(theta, X, y):
m = y.size
h = sigmoid(X.dot(theta.reshape(-1,1)))
grad =(1.0/m)*X.T.dot(h-y)
return(grad.flatten())
def predict(theta, X, threshold=0.5):
p = sigmoid(X.dot(theta.T)) >= threshold
return(p.astype('int'))
def main():
data = loadData("data1.txt", ",")
X = np.c_[np.ones((data.shape[0],1)), data[:,0:2]]
y = np.c_[data[:, 2]]
plotData(data, 'Exam 1 score', 'Exam 2 score', 'Pass', 'Fail')
initial_theta = np.zeros(X.shape[1])
cost = costFunction(initial_theta, X, y)
grad = gradient(initial_theta, X, y)
print('Cost: \n', cost)
print('Grad: \n', grad)
res = minimize(costFunction, initial_theta,
args=(X,y), jac=gradient, options={'maxiter':400})
score = sigmoid(np.array([1, 45, 85]).dot(res.x.T))
print(score)
plt.scatter(45, 85, s=60, c='r', marker='v', label='(45, 85)')
plotData(data, 'Exam 1 score', 'Exam 2 score', 'Admitted', 'Not admitted')
x1_min, x1_max = X[:,1].min(), X[:,1].max()
x2_min, x2_max = X[:,2].min(), X[:,2].max()
xx1, xx2 = np.meshgrid(np.linspace(x1_min, x1_max), np.linspace(x2_min, x2_max))
h = sigmoid(np.c_[np.ones((xx1.ravel().shape[0],1)), xx1.ravel(), xx2.ravel()].dot(res.x))
h = h.reshape(xx1.shape)
plt.contour(xx1, xx2, h, [0.5], linewidths=1, colors='b');
if __name__ == '__main__':
main()
| gpl-3.0 |
wathen/PhD | MHD/FEniCS/MHD/CG/PicardIter_Direct/precondition/tests/MHD.py | 1 | 14147 | #!/usr/bin/python
# interpolate scalar gradient onto nedelec space
from dolfin import *
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
Print = PETSc.Sys.Print
# from MatrixOperations import *
import numpy as np
#import matplotlib.pylab as plt
import PETScIO as IO
import common
import scipy
import scipy.io
import time as t
import BiLinear as forms
import IterOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import Solver as S
import ExactSol
import P as Precond
import cProfile, pstats, StringIO
m = 4
# m = 2
IterType = "Full"
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
NSave = np.zeros((m-1,1))
Mave = np.zeros((m-1,1))
TotalTime = np.zeros((m-1,1))
TotalWork = []
InnerTolerances = []
nn = 2
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
MU[0]= 1e0
test = [[1e-6,1e-6],[1e-6,1e-5],[1e-6,1e-4],[1e-6,1e-3],[1e-6,1e-6,'Reduce1',2],[1e-6,1e-6,'Reduce1',5],[1e-6,1e-6,'Reduce1',10]]
# test = [[1e-6,1e-6],[1e-5,1e-5],[1e-4,1e-4],[1e-3,1e-3]]
test = [[1e-6,1e-6],[1e-6,1e-4],[1e-6,1e-2],[1e-6,1e-1]]
# test =[[1e-6,1e-1]]
MinTol = 1e-3
aa = 0
for kk in xrange(1,len(test)+1):
aa += 1
for xx in xrange(1,m):
print xx
level[xx-1] = xx+3
nn = 2**(level[xx-1])
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
mesh = RectangleMesh(0, 0, 1, 1, nn, nn,'left')
parameters["form_compiler"]["precision"] = 15
parameters["form_compiler"]["quadrature_degree"] = -1
order = 2
parameters['reorder_dofs_serial'] = False
Velocity = VectorFunctionSpace(mesh, "CG", order)
Pressure = FunctionSpace(mesh, "CG", order-1)
Magnetic = FunctionSpace(mesh, "N1curl", order)
Lagrange = FunctionSpace(mesh, "CG", order)
W = MixedFunctionSpace([Velocity,Pressure,Magnetic,Lagrange])
# W = Velocity*Pressure*Magnetic*Lagrange
Velocitydim[xx-1] = Velocity.dim()
Pressuredim[xx-1] = Pressure.dim()
Magneticdim[xx-1] = Magnetic.dim()
Lagrangedim[xx-1] = Lagrange.dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(), Lagrange.dim()]
def boundary(x, on_boundary):
return on_boundary
u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD2D(4,1)
bcu = DirichletBC(W.sub(0),u0, boundary)
bcp = DirichletBC(W.sub(1),p0, boundary)
bcb = DirichletBC(W.sub(2),b0, boundary)
bcr = DirichletBC(W.sub(3),r0, boundary)
# bc = [u0,p0,b0,r0]
bcs = [bcu,bcb,bcr]
FSpaces = [Velocity,Pressure,Magnetic,Lagrange]
(u, p, b, r) = TrialFunctions(W)
(v, q, c,s ) = TestFunctions(W)
kappa = 100.0
parameters['linear_algebra_backend'] = 'uBLAS'
Mu_m =1e1
MU = 1.0
F_NS = -MU*Laplacian+Advection+gradPres-kappa*NS_Couple
if kappa == 0:
F_M = Mu_m*CurlCurl+gradR -kappa*M_Couple
else:
F_M = Mu_m*kappa*CurlCurl+gradR -kappa*M_Couple
params = [kappa,Mu_m,MU]
F_NS = -MU*Laplacian+Advection+gradPres-kappa*NS_Couple
F_M = Mu_m*kappa*CurlCurl+gradR -kappa*M_Couple
params = [kappa,Mu_m,MU]
u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,Neumann=Expression(("0","0")),options ="New")
p_k.vector()[:]= p_k.vector().array()+np.abs(np.min(p_k.vector().array()))
# bcu.apply(u_k)
# bcb.apply(b_k)
# bcr.apply(r_k)
x = Iter.u_prev(u_k,p_k,b_k,r_k)
ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, W,F_M,F_NS, u_k,b_k,params,IterType)
print CoupleTerm
RHSform = forms.PicardRHS(mesh, W, u_k, p_k, b_k, r_k, params)
bcu = DirichletBC(W.sub(0),Expression(("0","0")), boundary)
bcb = DirichletBC(W.sub(2),Expression(("0","0")), boundary)
bcr = DirichletBC(W.sub(3),Expression(("0")), boundary)
bcs = [bcu,bcb,bcr]
p = forms.Preconditioner(mesh,W,u_k,b_k,params,IterType)
PP,Pb = assemble_system(p, Lns,bcs)
NS_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim()))
M_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim(),W.dim()))
if IterType == "Full" or IterType == "MD":
(pQ) = TrialFunction(Pressure)
(qQ) = TestFunction(Pressure)
print MU
Q = assemble(inner(pQ,qQ)*dx)
L = assemble(inner(grad(pQ),grad(qQ))*dx)
n = FacetNormal(mesh)
fp = MU*inner(grad(qQ), grad(pQ))*dx+inner((u_k[0]*grad(pQ)[0]+u_k[1]*grad(pQ)[1]),qQ)*dx + (1/2)*div(u_k)*inner(pQ,qQ)*dx - (1/2)*(u_k[0]*n[0]+u_k[1]*n[1])*inner(pQ,qQ)*ds
L = CP.Assemble(L)
if IterType == "CD":
AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
A,b = CP.Assemble(AA,bb)
P = CP.Assemble(PP)
u = b.duplicate()
Mits = 0
NSits = 0
time
InnerTol = []
OuterTol = []
# MPI.barrier()
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-4 # tolerance
iter = 0 # iteration counter
maxiter = 50 # max no of iterations allowed
SolutionTime = 0
outer = 0
parameters['linear_algebra_backend'] = 'uBLAS'
TotalStart = t.clock()
while eps > tol and iter < maxiter:
iter += 1
if IterType == "CD":
bb = assemble((Lmaxwell + Lns) - RHSform)
for bc in bcs:
bc.apply(bb)
A,b = CP.Assemble(AA,bb)
P = CP.Assemble(PP)
print b
Mass = 0
L = 0
F = 0
else:
# tic()
AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
A,b = CP.Assemble(AA,bb)
del AA
F = assemble(fp)
F = CP.Assemble(F)
P = CP.Assemble(PP)
# P = S.ExactPrecond(PP,Q,L,F,FSpaces)
Mass = CP.Assemble(Q)
# print "Assemble time >>>>>>",toc()
if iter == 1:
uu = b.duplicate()
else:
uu = uu
pr = cProfile.Profile()
start = t.clock()
pr.enable()
# if len(test[kk-1]) ==2:
OuterTol = test[kk-1][0]
InnerTol = test[kk-1][1]
print InnerTol
print OuterTol
u,it1,it2 = S.solve(A,b,uu,P,[NS_is,M_is],FSpaces,IterType,OuterTol,InnerTol,Mass,L,F)
# else:
# if test[kk-1] == "Reduce":
# OuterTol = test[kk-1][0]
# InnerTol.append(test[kk-1][1]*((iter)*test[kk-1][3]))
# print InnerTol
# print OuterTol
# u,it1,it2 = S.solve(A,b,uu,P,[NS_is,M_is],FSpaces,IterType,OuterTol,InnerTol[iter-1],Mass,L,F)
# else:
# OuterTol = test[kk-1][0]
# if iter == 1:
# InnerTol.append(test[kk-1][1])
# else:
# print InnerTol[iter-2]
# InnerTol.append(min(MinTol,InnerTol[iter-2]*test[kk-1][3]))
# print InnerTol
# print OuterTol
# u,it1,it2 = S.solve(A,b,uu,P,[NS_is,M_is],FSpaces,IterType,OuterTol,InnerTol[iter-1],Mass,L,F)
del A,P
# print InnerTol[iter-1]
pr.disable()
# time = toc()
time = (t.clock() - start)
s = StringIO.StringIO()
print "Solve time >>>>>>", time
print it1,it2
NSits += it1
Mits +=it2
SolutionTime = SolutionTime +time
# tic()
u, p, b, r, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"inf",iter)
u_k.assign(u)
p_k.assign(p)
b_k.assign(b)
r_k.assign(r)
# print "Correction time >>>>>>", toc()
# p_k.vector()[:]= p_k.vector().array()+np.abs(np.min(p_k.vector().array()))
x = Iter.u_prev(u_k,p_k,b_k,r_k)
print toc()
# u_k,b_k,epsu,epsb=Direct.PicardTolerance(x,u_k,b_k,FSpaces,dim,"inf",iter)
TotalTime[xx-1] = t.clock()-TotalStart
NSave[xx-1] = (float(NSits)/iter)
Mave[xx-1] = (float(Mits)/iter)
iterations[xx-1] = iter
SolTime[xx-1] = SolutionTime/iter
ue = u0
pe = p0
be = b0
re = r0
ExactSolution = [ue,pe,be,re]
#errL2u[xx-1], errH1u[xx-1], errL2p[xx-1], errL2b[xx-1], errCurlb[xx-1], errL2r[xx-1], errH1r[xx-1] = Iter.Errors(x,mesh,FSpaces,ExactSolution,order,dim)
if xx == 1:
l2uorder[xx-1] = 0
else:
l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1]))
H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1]))
l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1]))
l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1]))
Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1]))
l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1]))
H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1]))
import pandas as pd
# print "\n\n Velocity convergence"
# VelocityTitles = ["l","Total DoF","V DoF","Soln Time","V-L2","L2-order","V-H1","H1-order"]
# VelocityValues = np.concatenate((level,Wdim,Velocitydim,SolTime,errL2u,l2uorder,errH1u,H1uorder),axis=1)
# VelocityTable= pd.DataFrame(VelocityValues, columns = VelocityTitles)
# pd.set_option('precision',3)
# VelocityTable = MO.PandasFormat(VelocityTable,"V-L2","%2.4e")
# VelocityTable = MO.PandasFormat(VelocityTable,'V-H1',"%2.4e")
# VelocityTable = MO.PandasFormat(VelocityTable,"H1-order","%1.2f")
# VelocityTable = MO.PandasFormat(VelocityTable,'L2-order',"%1.2f")
# print VelocityTable.to_latex()
# print "\n\n Pressure convergence"
# PressureTitles = ["l","Total DoF","P DoF","Soln Time","P-L2","L2-order"]
# PressureValues = np.concatenate((level,Wdim,Pressuredim,SolTime,errL2p,l2porder),axis=1)
# PressureTable= pd.DataFrame(PressureValues, columns = PressureTitles)
# pd.set_option('precision',3)
# PressureTable = MO.PandasFormat(PressureTable,"P-L2","%2.4e")
# PressureTable = MO.PandasFormat(PressureTable,'L2-order',"%1.2f")
# print PressureTable.to_latex()
name = "OutputNew8/table"+str(aa)
print "\n\n Iteration table"
if IterType == "Full":
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av Outer its","Av Inner its",]
else:
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av NS iters","Av M iters"]
IterValues = np.concatenate((level,Wdim,SolTime,TotalTime,iterations,NSave,Mave),axis=1)
IterTable= pd.DataFrame(IterValues, columns = IterTitles)
if IterType == "Full":
IterTable = MO.PandasFormat(IterTable,'Av Outer its',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av Inner its',"%2.1f")
else:
IterTable = MO.PandasFormat(IterTable,'Av NS iters',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av M iters',"%2.1f")
print IterTable.to_latex()
IterTable.to_csv(name)
print " \n Outer Tol: ",OuterTol, "Inner Tol: ", InnerTol
file = open("OutputNew8/tol"+str(aa)+".txt", "w")
file.write("Outer:\n"+str(OuterTol)+"\n\n")
file.write("Inner:\n")
if len(test[kk-1]) ==2:
file.write(str(InnerTol))
else:
for s in InnerTol:
file.write(str(s))
file.write(str(kappa)+","+str(MU))
print "-------------------------------------------------------\n\n\n\n\n\n\n"
# \begin{tabular}{lrrrrrll}
# \toprule
# {} & l & DoF & AV solve Time & Total picard time & picard iterations & Av Outer its & Av Inner its \\
# \midrule
# 0 & 3 & 1620 & 7.568889 & 68.87 & 9 & 245.2 & 3.4 \\
# 1 & 4 & 6180 & 1.267143 & 10.28 & 7 & 96.9 & 2.0 \\
# 2 & 5 & 24132 & 5.147143 & 40.59 & 7 & 107.6 & 2.7 \\
# 3 & 6 & 95364 & 19.560000 & 154.99 & 7 & 94.4 & 2.1 \\
# \bottomrule
# \end{tabular}
# Outer Tol: 1e-06 Inner Tol: 0.1
# # # if (ShowResultPlots == 'yes'):
# plot(u_k)
# # plot(interpolate(ue,Velocity))
# plot(p_k)
# # pe = interpolate(pe,Pressure)
# # pe.vector()[:] -= np.max(pe.vector().array() )/2
# # plot(interpolate(pe,Pressure))
# plot(b_k)
# # plot(interpolate(be,Magnetic))
# plot(r_k)
# # plot(interpolate(re,Lagrange))
# # # interactive()
print kappa, MU
# interactive()
| mit |
sryza/freewaydata | python/clusterstations.py | 1 | 1314 | import pandas as pd
import loadfreewaydata
import sklearn.cluster as cluster
import plotonmap
import numpy as np
#rawdata = loadfreewaydata.load_measurement_data_hour('../d07_text_station_hour_2013_01.txt')
freeway_data = loadfreewaydata.load_freeway_metadata('../d07_stations_2012_09_06.txt')
def cluster_stations(rawdata, colname, filename):
times = rawdata['timestamp'].unique()
speeds_by_station = {g[0]:g[1][colname] for g in rawdata.groupby('station')}
for station_speeds in speeds_by_station.values():
station_speeds.index = times
# rows are stations, columns are times
station_mat = pd.DataFrame(data=speeds_by_station).transpose()
# filter out stations with no data
station_mat = station_mat[station_mat.count(axis=1) == 744] # TODO: replace 744 with num columns
# do the clustering
num_clusters = 5
kmeans = cluster.KMeans(n_clusters = num_clusters)
kmeans.fit(station_mat)
colors = np.linspace(0, 1, num_clusters)
station_colors = {station_mat.index[i]:colors[kmeans.labels_[i]] for i in range(len(station_mat.index))}
plotonmap.plot_on_map(station_colors, freeway_data, '../html/showfreeways.html.template', filename)
# cluster_stations(rawdata, 'avgspeed', '../speedclusters.html')
# cluster_stations(rawdata, 'totalflow', '../flowclusters.html')
| apache-2.0 |
levelrf/level_basestation | gr-filter/examples/decimate.py | 13 | 5841 | #!/usr/bin/env python
#
# Copyright 2009,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import filter
import sys, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 10000000 # number of samples to use
self._fs = 10000 # initial sampling rate
self._decim = 20 # Decimation rate
# Generate the prototype filter taps for the decimators with a 200 Hz bandwidth
self._taps = filter.firdes.low_pass_2(1, self._fs,
200, 150,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._decim))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._decim
print "Taps per channel: ", tpc
# Build the input signal source
# We create a list of freqs, and a sine wave is generated and added to the source
# for each one of these frequencies.
self.signals = list()
self.add = gr.add_cc()
freqs = [10, 20, 2040]
for i in xrange(len(freqs)):
self.signals.append(gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freqs[i], 1))
self.connect(self.signals[i], (self.add,i))
self.head = gr.head(gr.sizeof_gr_complex, self._N)
# Construct a PFB decimator filter
self.pfb = filter.pfb.decimator_ccf(self._decim, self._taps, 0)
# Construct a standard FIR decimating filter
self.dec = filter.fir_filter_ccf(self._decim, self._taps)
self.snk_i = gr.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Create the sink for the decimated siganl
self.snk = gr.vector_sink_c()
self.connect(self.pfb, self.snk)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(16,9))
fig2 = pylab.figure(2, figsize=(16,9))
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._fs
# Plot the input to the decimator
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b")
p1_t = sp1_t.plot(t_in, x_in.imag, "r")
sp1_t.set_ylim([-tb._decim*1.1, tb._decim*1.1])
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot the output of the decimator
fs_o = tb._fs / tb._decim
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk.data()[Ns:Ns+Ne]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("PFB Decimated Signal", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o.real, "b-o")
p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
ajaybhat/scikit-image | doc/examples/segmentation/plot_otsu.py | 10 | 1209 | """
============
Thresholding
============
Thresholding is used to create a binary image. This example uses Otsu's method
to calculate the threshold value.
Otsu's method calculates an "optimal" threshold (marked by a red line in the
histogram below) by maximizing the variance between two classes of pixels,
which are separated by the threshold. Equivalently, this threshold minimizes
the intra-class variance.
.. [1] http://en.wikipedia.org/wiki/Otsu's_method
"""
import matplotlib
import matplotlib.pyplot as plt
from skimage.data import camera
from skimage.filters import threshold_otsu
matplotlib.rcParams['font.size'] = 9
image = camera()
thresh = threshold_otsu(image)
binary = image > thresh
#fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(8, 2.5))
fig = plt.figure(figsize=(8, 2.5))
ax1 = plt.subplot(1, 3, 1, adjustable='box-forced')
ax2 = plt.subplot(1, 3, 2)
ax3 = plt.subplot(1, 3, 3, sharex=ax1, sharey=ax1, adjustable='box-forced')
ax1.imshow(image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.hist(image)
ax2.set_title('Histogram')
ax2.axvline(thresh, color='r')
ax3.imshow(binary, cmap=plt.cm.gray)
ax3.set_title('Thresholded')
ax3.axis('off')
plt.show()
| bsd-3-clause |
fspaolo/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 9 | 4822 | from itertools import product
import numpy as np
from nose.tools import assert_true
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.todense(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.todense(), 1), np.ones((3, 1)))
pred = np.dot(A.todense(), X)
assert_less(np.linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).todense()
reconstruction_error = np.linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = np.linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(np.linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(20), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 20]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = np.linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = np.linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
import warnings
from nose.tools import assert_raises
M = np.ones((10, 3))
with warnings.catch_warnings(record=True):
assert_raises(ValueError, manifold.locally_linear_embedding,
M, 2, 1, method='standard', eigen_solver='arpack')
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
youprofit/scikit-image | skimage/io/tests/test_plugin.py | 24 | 3393 | from contextlib import contextmanager
from numpy.testing import assert_equal, raises
from skimage import io
from skimage.io import manage_plugins
io.use_plugin('pil')
priority_plugin = 'pil'
def setup_module():
manage_plugins.use_plugin('test') # see ../_plugins/test_plugin.py
def teardown_module():
io.reset_plugins()
@contextmanager
def protect_preferred_plugins():
"""Contexts where `preferred_plugins` can be modified w/o side-effects."""
preferred_plugins = manage_plugins.preferred_plugins.copy()
try:
yield
finally:
manage_plugins.preferred_plugins = preferred_plugins
def test_read():
io.imread('test.png', as_grey=True, dtype='i4', plugin='test')
def test_save():
io.imsave('test.png', [1, 2, 3], plugin='test')
def test_show():
io.imshow([1, 2, 3], plugin_arg=(1, 2), plugin='test')
def test_collection():
io.imread_collection('*.png', conserve_memory=False, plugin='test')
def test_use():
manage_plugins.use_plugin('test')
manage_plugins.use_plugin('test', 'imshow')
@raises(ValueError)
def test_failed_use():
manage_plugins.use_plugin('asd')
def test_use_priority():
manage_plugins.use_plugin(priority_plugin)
plug, func = manage_plugins.plugin_store['imread'][0]
assert_equal(plug, priority_plugin)
manage_plugins.use_plugin('test')
plug, func = manage_plugins.plugin_store['imread'][0]
assert_equal(plug, 'test')
def test_use_priority_with_func():
manage_plugins.use_plugin('pil')
plug, func = manage_plugins.plugin_store['imread'][0]
assert_equal(plug, 'pil')
manage_plugins.use_plugin('test', 'imread')
plug, func = manage_plugins.plugin_store['imread'][0]
assert_equal(plug, 'test')
plug, func = manage_plugins.plugin_store['imsave'][0]
assert_equal(plug, 'pil')
manage_plugins.use_plugin('test')
plug, func = manage_plugins.plugin_store['imsave'][0]
assert_equal(plug, 'test')
def test_plugin_order():
p = io.plugin_order()
assert 'imread' in p
assert 'test' in p['imread']
def test_available():
assert 'qt' in io.available_plugins
assert 'test' in io.find_available_plugins(loaded=True)
def test_load_preferred_plugins_all():
from skimage.io._plugins import pil_plugin, matplotlib_plugin
with protect_preferred_plugins():
manage_plugins.preferred_plugins = {'all': ['pil'],
'imshow': ['matplotlib']}
manage_plugins.reset_plugins()
for plugin_type in ('imread', 'imsave'):
plug, func = manage_plugins.plugin_store[plugin_type][0]
assert func == getattr(pil_plugin, plugin_type)
plug, func = manage_plugins.plugin_store['imshow'][0]
assert func == getattr(matplotlib_plugin, 'imshow')
def test_load_preferred_plugins_imread():
from skimage.io._plugins import pil_plugin, matplotlib_plugin
with protect_preferred_plugins():
manage_plugins.preferred_plugins['imread'] = ['pil']
manage_plugins.reset_plugins()
plug, func = manage_plugins.plugin_store['imread'][0]
assert func == pil_plugin.imread
plug, func = manage_plugins.plugin_store['imshow'][0]
assert func == matplotlib_plugin.imshow, func.__module__
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
| bsd-3-clause |
NetEaseGame/AutomatorX | scripts/image.py | 6 | 3198 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'hzsunshx'
#import numpy as np
import cv2
#from matplotlib import pyplot as plt
MIN_MATCH_COUNT = 8
def _middlePoint(pts):
def add(p1, p2):
return (p1[0]+p2[0], p1[1]+p2[1])
def distance(p1, p2):
import math
l2 = (p1[0]-p2[0])*(p1[0]-p2[0]) + (p1[1]-p2[1])*(p1[1]-p2[1])
return math.sqrt(l2)
# debug
for p in pts:
print 'Point:', p.pt
length = len(pts)
sumx, sumy = reduce(add, [p.pt for p in pts])
point = sumx/length, sumy/length
print 'step1: result=', point
# filter out ok points
avg_distance = sum([distance(point, p.pt) for p in pts])/length
print 'avg distance=', avg_distance
good = []
sumx, sumy = 0.0, 0.0
for p in pts:
print 'point: %s, distance: %.2f' %(p.pt, distance(p.pt, point))
if distance(p.pt, point) < 1.2*avg_distance:
good.append(p.pt)
sumx += p.pt[0]
sumy += p.pt[1]
else:
print 'not good', p.pt
print 'step1: result=', point
point = map(long, (sumx/len(good), sumy/len(good)))
print 'step2: point=', point
return point
def find_image_position(origin='origin.png', query='query.png', outfile=None):
'''
find all image positions
@return None if not found else a tuple: (origin.shape, query.shape, postions)
might raise Exception
'''
img1 = cv2.imread(query, 0) # query image(small)
img2 = cv2.imread(origin, 0) # train image(big)
# Initiate SIFT detector
sift = cv2.SIFT()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
print len(kp1), len(kp2)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
# flann
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
print len(kp1), len(kp2), 'good cnt:', len(good)
if len(good)*1.0/len(kp1) < 0.5:
#if len(good)<MIN_MATCH_COUNT:
print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
return img2.shape, img1.shape, []
queryPts = []
trainPts = []
for dm in good:
queryPts.append(kp1[dm.queryIdx])
trainPts.append(kp2[dm.trainIdx])
img3 = cv2.drawKeypoints(img1, queryPts)
cv2.imwrite('image/query.png', img3)
img3 = cv2.drawKeypoints(img2, trainPts)
point = _middlePoint(trainPts)
print 'position in', point
if outfile:
edge = 10
top_left = (point[0]-edge, point[1]-edge)
bottom_right = (point[0]+edge, point[1]+edge)
cv2.rectangle(img3, top_left, bottom_right, 255, 2)
cv2.imwrite(outfile, img3)
return img2.shape, img1.shape, [point]
if __name__ == '__main__':
pts = find_image_position('image/mule.png', 'image/template.png',
outfile='image/train.png')
print pts
| apache-2.0 |
jamesliu/mxnet | example/deep-embedded-clustering/data.py | 16 | 1384 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
from __future__ import print_function
import os
import numpy as np
from sklearn.datasets import fetch_mldata
def get_mnist():
""" Gets MNIST dataset """
np.random.seed(1234) # set seed for deterministic ordering
data_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
data_path = os.path.join(data_path, '../../data')
mnist = fetch_mldata('MNIST original', data_home=data_path)
p = np.random.permutation(mnist.data.shape[0])
X = mnist.data[p].astype(np.float32)*0.02
Y = mnist.target[p]
return X, Y
| apache-2.0 |
rrohan/scikit-learn | doc/conf.py | 210 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
rickyhan83/ml_kaggle | Digit Recognizer/plot_numbers.py | 1 | 1048 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
trains = pd.read_csv("train.csv")
Y = trains['label']
del trains['label']
X_datasarr = trains.as_matrix()
X_norm = X_datasarr > 0
X = X_norm.astype(int)
ones = []
for i in range(len(Y)):
if Y[i] == 4:
ones.append(i)
fig = plt.figure()
for index in range(25):
#fig, ax = plt.subplots()
ax = fig.add_subplot(5,5,index+1)
data = X[ones[index]]
digit = np.zeros((28,28),dtype=int)
xlist = []
ylist = []
col = 28
row = 28
for i in range(row):
for j in range(col):
if data.item(i * 28 + j)==1:
digit [i][j] = data.item(i * 28 + j)
data = np.rot90(digit,k=3)
for i in range(row):
for j in range(col):
if data.item(i * 28 + j)==1:
xlist.append(i)
ylist.append(j)
ax.set_xlim([0,28])
ax.set_ylim([0,28])
plt.scatter(xlist,ylist, marker = 'x')
| apache-2.0 |
phoebe-project/phoebe2-docs | 2.1/tutorials/RV.py | 1 | 4824 | #!/usr/bin/env python
# coding: utf-8
# 'rv' Datasets and Options
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.1,<2.2"')
# As always, let's do imports and initialize a logger and a new Bundle. See [Building a System](building_a_system.html) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
# Dataset Parameters
# --------------------------
#
# Let's add an RV dataset to the Bundle.
# In[3]:
b.add_dataset('rv')
print b.filter(kind='rv')
# In[4]:
print b.filter(kind='rv_dep')
# For information on these passband-dependent parameters, see the section on the [lc dataset](LC) (these are used only to compute fluxes when rv_method=='flux-weighted')
# ### times
# In[5]:
print b['times']
# ### rvs
# In[6]:
print b['rvs']
# ### sigmas
# In[7]:
print b['sigmas']
# Compute Options
# ------------------
#
# Let's look at the compute options (for the default PHOEBE 2 backend) that relate to the RV dataset.
#
# Other compute options are covered elsewhere:
# * parameters related to dynamics are explained in the section on the [orb dataset](ORB)
# * parameters related to meshing, eclipse detection, and subdivision (used if rv_method=='flux-weighted') are explained in the section on the [mesh dataset](MESH)
# * parameters related to computing fluxes (used if rv_method=='flux-weighted') are explained in the section on the [lc dataset](LC)
# In[8]:
print b['compute']
# ### rv_method
# In[9]:
print b['rv_method']
# If rv_method is set to 'dynamical' then the computed radial velocities are simply the z-velocities of the centers of mass of each component. In this case, only the dynamical options are relevant. For more details on these, see the section on the [orb dataset](ORB).
#
# If rv_method is set to 'flux-weighted' then radial velocities are determined by the z-velocity of each visible surface element of the mesh, weighted by their respective intensities. Since the stars are placed in their orbits by the dynamic options, the section on the [orb dataset](ORB) is still applicable. So are the meshing options described in [mesh dataset](MESH) and the options for computing fluxes in [lc dataset](LC).
# ### rv_grav
# In[10]:
print b['rv_grav']
# See the [Gravitational Redshift Example Script](../examples/grav_redshift) for more details on the influence this parameter has on radial velocities.
# Synthetics
# ------------------
# In[11]:
b.set_value_all('times', np.linspace(0,1,101))
# In[12]:
b.run_compute(irrad_method='none')
# In[13]:
b['rv@model'].twigs
# In[14]:
print b['times@primary@rv@model']
# In[15]:
print b['rvs@primary@rv@model']
# Plotting
# ---------------
#
# By default, RV datasets plot as 'rvs' vs 'times'.
# In[16]:
afig, mplfig = b['rv@model'].plot(show=True)
# Since these are the only two columns available in the synthetic model, the only other options is to plot in phase instead of time.
# In[17]:
afig, mplfig = b['rv@model'].plot(x='phases', show=True)
# In system hierarchies where there may be multiple periods, it is also possible to determine whose period to use for phasing.
# In[18]:
b['period'].components
# In[19]:
afig, mplfig = b['rv@model'].plot(x='phases:binary', show=True)
# Mesh Fields
# ---------------------
#
#
# By adding a mesh dataset and setting the columns parameter, radial velocities per-element quantities can be exposed and plotted. Since the radial velocities are flux-weighted, the flux-related quantities are also included. For a description of these, see the section on the [lc dataset](LC).
#
# Let's add a mesh at the first time of the rv dataset and re-call run_compute
# In[20]:
b.add_dataset('mesh', times=[0], dataset='mesh01')
# In[21]:
print b['columns'].choices
# In[22]:
b['columns'] = ['rvs@rv01']
# In[23]:
b.run_compute(irrad_method='none')
# In[24]:
print b['model'].datasets
# These new columns are stored with the rv's dataset tag, but with the mesh model-kind.
# In[25]:
b.filter(dataset='rv01', kind='mesh', context='model').twigs
# Any of these columns are then available to use as edge or facecolors when plotting the mesh (see the section on the [MESH dataset](MESH)).
# In[26]:
afig, mplfig = b['mesh01@model'].plot(fc='rvs', ec='None', show=True)
# ### rvs
# In[27]:
print b['rvs@primary@rv01@mesh@model']
| gpl-3.0 |
acq4/acq4 | acq4/analysis/modules/IVCurve/IVCurve.py | 3 | 89452 | # -*- coding: utf-8 -*-
from __future__ import print_function
"""
IVCurve: Analysis module that analyzes current-voltage and firing
relationships from current clamp data.
This is part of Acq4
Paul B. Manis, Ph.D.
2011-2013.
Pep8 compliant (via pep8.py) 10/25/2013
Refactoring begun 3/21/2015
"""
from collections import OrderedDict
import os
import os.path
# import traceback
import itertools
import functools
# import gc
import numpy as np
import scipy
from acq4.util import Qt
# from acq4.util import DataManager
from acq4.analysis.AnalysisModule import AnalysisModule
import acq4.pyqtgraph as pg
# from acq4.pyqtgraph import configfile
# from acq4.util.metaarray import MetaArray
import acq4.util.matplotlibexporter as matplotlibexporter
import acq4.analysis.tools.Utility as Utility # pbm's utilities...
import acq4.analysis.tools.Fitting as Fitting # pbm's fitting stuff...
import acq4.analysis.tools.ScriptProcessor as ScriptProcessor
from . import ctrlTemplate
import pprint
import time
# noinspection PyPep8
class IVCurve(AnalysisModule):
"""
IVCurve is an Analysis Module for Acq4.
IVCurve performs analyses of current-voltage relationships in
electrophysiology experiments. The module is interactive, and is primarily
designed to allow a preliminary examination of data collected in current clamp and voltage clamp.
Results analyzed include:
Resting potential (average RMP through the episodes in the protocol).
Input resistance (maximum slope if IV relationship below Vrest)
Cell time constant (single exponential fit)
Ih Sag amplitude and tau
Spike rate as a function of injected current
Interspike interval as a function of time for each current level
RMP as a function of time through the protocol
"""
def __init__(self, host):
AnalysisModule.__init__(self, host)
self.Clamps = self.dataModel.GetClamps() # access the "GetClamps" class for reading data
self.data_template = (
OrderedDict([('Species', (12, '{:>12s}')), ('Age', (5, '{:>5s}')), ('Sex', (3, '{:>3s}')), ('Weight', (6, '{:>6s}')),
('Temperature', (10, '{:>10s}')), ('ElapsedTime', (11, '{:>11.2f}')),
('RMP', (5, '{:>5.1f}')), ('Rin', (5, '{:>5.1f}')), ('Bridge', (5, '{:>5.1f}')),
('tau', (5, '{:>5.1f}')), ('AdaptRatio', (9, '{:>9.3f}')),
('tauh', (5, '{:>5.1f}')), ('Gh', (6, '{:>6.2f}')),
('FiringRate', (12, '{:>9.1f}')),
('AP1_HalfWidth', (13, '{:>13.2f}')), ('AP1_Latency', (11, '{:>11.1f}')),
('AP2_HalfWidth', (13, '{:>13.2f}')), ('AP2_Latency', (11, '{:>11.1f}')),
('AHP_Depth', (9, '{:9.2f}')),
('Description', (11, '{:s}')),
]))
self.Script = ScriptProcessor.ScriptProcessor(host)
self.Script.setAnalysis(analysis=self.updateAnalysis,
fileloader = self.loadFileRequested, template = self.data_template,
clamps = self.Clamps, printer=self.printAnalysis,
dbupdate=self.dbStoreClicked) # specify the routines to be called and data sets to be used
self.loaded = None
self.filename = None
self.dirsSet = None
self.lrss_flag = True # show is default
self.lrpk_flag = True
self.rmp_flag = True
self.bridgeCorrection = None # bridge correction in Mohm.
self.showFISI = True # show FISI or ISI as a function of spike number (when False)
self.lrtau_flag = False
self.regions_exist = False
self.tauh_fits = {}
self.tauh_fitted = {}
self.tau_fits = {}
self.tau_fitted = {}
self.regions_exist = False
self.regions = {}
self.analysis_summary = {}
self.tx = None
self.keep_analysis_count = 0
self.dataMarkers = []
self.doUpdates = True
self.colors = ['w', 'g', 'b', 'r', 'y', 'c']
self.symbols = ['o', 's', 't', 'd', '+']
self.color_list = itertools.cycle(self.colors)
self.symbol_list = itertools.cycle(self.symbols)
self.script_header = False
self.Clamps.data_mode = 'IC' # analysis depends on the type of data we have.
self.clear_results()
# --------------graphical elements-----------------
self._sizeHint = (1280, 900) # try to establish size of window
self.ctrlWidget = Qt.QWidget()
self.ctrl = ctrlTemplate.Ui_Form()
self.ctrl.setupUi(self.ctrlWidget)
self.main_layout = pg.GraphicsView() # instead of GraphicsScene?
# make fixed widget for the module output
self.widget = Qt.QWidget()
self.gridLayout = Qt.QGridLayout()
self.widget.setLayout(self.gridLayout)
self.gridLayout.setContentsMargins(4, 4, 4, 4)
self.gridLayout.setSpacing(1)
# Setup basic GUI
self._elements_ = OrderedDict([
('File Loader',
{'type': 'fileInput', 'size': (170, 50), 'host': self}),
('Parameters',
{'type': 'ctrl', 'object': self.ctrlWidget, 'host': self,
'size': (160, 700)}),
('Plots',
{'type': 'ctrl', 'object': self.widget, 'pos': ('right',),
'size': (400, 700)}),
])
self.initializeElements()
self.file_loader_instance = self.getElement('File Loader', create=True)
# grab input form the "Ctrl" window
self.ctrl.IVCurve_Update.clicked.connect(self.updateAnalysis)
self.ctrl.IVCurve_PrintResults.clicked.connect(
functools.partial(self.printAnalysis, printnow=True,
script_header=True))
if not matplotlibexporter.HAVE_MPL:
self.ctrl.IVCurve_MPLExport.setEnabled = False # make button inactive
# self.ctrl.IVCurve_MPLExport.clicked.connect(self.matplotlibExport)
else:
self.ctrl.IVCurve_MPLExport.clicked.connect(
functools.partial(matplotlibexporter.matplotlibExport, gridlayout=self.gridLayout,
title=self.filename))
self.ctrl.IVCurve_KeepAnalysis.clicked.connect(self.resetKeepAnalysis)
self.ctrl.IVCurve_getFileInfo.clicked.connect(self.get_file_information)
[self.ctrl.IVCurve_RMPMode.currentIndexChanged.connect(x)
for x in [self.update_rmpAnalysis, self.analyzeSpikes]]
self.ctrl.IVCurve_FISI_ISI_button.clicked.connect(self.displayFISI_ISI)
self.ctrl.dbStoreBtn.clicked.connect(self.dbStoreClicked)
self.ctrl.IVCurve_OpenScript_Btn.clicked.connect(self.read_script)
self.ctrl.IVCurve_RunScript_Btn.clicked.connect(self.rerun_script)
self.ctrl.IVCurve_PrintScript_Btn.clicked.connect(self.Script.print_script_output)
#self.scripts_form.PSPReversal_ScriptCopy_Btn.clicked.connect(self.copy_script_output)
#self.scripts_form.PSPReversal_ScriptFormatted_Btn.clicked.connect(self.print_formatted_script_output)
self.ctrl.IVCurve_ScriptName.setText('None')
self.layout = self.getElement('Plots', create=True)
# instantiate the graphs using a gridLayout (also facilitates matplotlib export; see export routine below)
self.data_plot = pg.PlotWidget()
self.gridLayout.addWidget(self.data_plot, 0, 0, 3, 1)
self.label_up(self.data_plot, 'T (s)', 'V (V)', 'Data')
self.cmd_plot = pg.PlotWidget()
self.gridLayout.addWidget(self.cmd_plot, 3, 0, 1, 1)
self.label_up(self.cmd_plot, 'T (s)', 'I (A)', 'Command')
self.RMP_plot = pg.PlotWidget()
self.gridLayout.addWidget(self.RMP_plot, 1, 1, 1, 1)
self.label_up(self.RMP_plot, 'T (s)', 'V (mV)', 'RMP')
self.fiPlot = pg.PlotWidget()
self.gridLayout.addWidget(self.fiPlot, 2, 1, 1, 1)
self.label_up(self.fiPlot, 'I (pA)', 'Spikes (#)', 'F-I')
self.fslPlot = pg.PlotWidget()
self.gridLayout.addWidget(self.fslPlot, 3, 1, 1, 1)
self.label_up(self.fslPlot, 'I (pA)', 'Fsl/Fisi (ms)', 'FSL/FISI')
self.IV_plot = pg.PlotWidget()
self.gridLayout.addWidget(self.IV_plot, 0, 1, 1, 1)
self.label_up(self.IV_plot, 'I (pA)', 'V (V)', 'I-V')
for row, s in enumerate([20, 10, 10, 10]):
self.gridLayout.setRowStretch(row, s)
# self.tailPlot = pg.PlotWidget()
# self.gridLayout.addWidget(self.fslPlot, 3, 1, 1, 1)
# self.label_up(self.tailPlot, 'V (V)', 'I (A)', 'Tail Current')
# Add a color scale
self.color_scale = pg.GradientLegend((20, 150), (-10, -10))
self.data_plot.scene().addItem(self.color_scale)
self.ctrl.pushButton.clicked.connect(functools.partial(self.initialize_regions,
reset=True))
def clear_results(self):
"""
Clear results resets variables.
This is typically needed every time a new data set is loaded.
"""
self.filename = ''
self.r_in = 0.0
self.tau = 0.0
self.adapt_ratio = 0.0
self.spikes_counted = False
self.nospk = []
self.spk = []
self.Sequence = ''
self.ivss = [] # steady-state IV (window 2)
self.ivpk = [] # peak IV (window 1)
self.fsl = [] # first spike latency
self.fisi = [] # first isi
self.rmp = [] # resting membrane potential during sequence
self.analysis_summary = {}
self.script_header = True
def resetKeepAnalysis(self):
self.keep_analysis_count = 0 # reset counter.
def show_or_hide(self, lrregion='', forcestate=None):
"""
Show or hide specific regions in the display
Parameters
----------
lrregion : str, default: ''
name of the region('lrwin0', etc)
forcestate : None or Boolean, default: None
Set True to force the show status, False to Hide.
If forcestate is None, then uses the region's 'shstate' value
to set the state.
Returns
-------
nothing
"""
if lrregion == '':
print('PSPReversal:show_or_hide:: lrregion is {:<s}'.format(lrregion))
return
region = self.regions[lrregion]
if forcestate is not None:
if forcestate:
region['region'].show()
region['state'].setChecked(Qt.Qt.Checked)
region['shstate'] = True
else:
region['region'].hide()
region['state'].setChecked(Qt.Qt.Unchecked)
region['shstate'] = False
else:
if not region['shstate']:
region['region'].show()
region['state'].setChecked(Qt.Qt.Checked)
region['shstate'] = True
else:
region['region'].hide()
region['state'].setChecked(Qt.Qt.Unchecked)
region['shstate'] = False
def displayFISI_ISI(self):
"""
Control display of first interspike interval/first spike latency
versus ISI over time.
"""
if self.showFISI: # currently showin FISI/FSL; switch to ISI over time
self.showFISI = False
else:
self.showFISI = True
self.update_SpikePlots()
def initialize_regions(self, reset=False):
"""
initialize_regions sets the linear regions on the displayed data
Here we create the analysis regions in the plot. However, this should
NOT happen until the plot has been created
Note the the information about each region is held in a dictionary,
which for each region has a dictionary that accesses the UI and class
methods for that region. This later simplifies the code and reduces
repetitive sections.
"""
# hold all the linear regions in a dictionary
if not self.regions_exist:
self.regions['lrleak'] = {'name': 'leak', # use a "leak" window
'region': pg.LinearRegionItem([0, 1], orientation=pg.LinearRegionItem.Horizontal,
brush=pg.mkBrush(255, 255, 0, 50.)),
'plot': self.cmd_plot,
'state': self.ctrl.IVCurve_subLeak,
'shstate': False, # keep internal copy of the state
'mode': self.ctrl.IVCurve_subLeak.isChecked(),
'start': self.ctrl.IVCurve_LeakMin,
'stop': self.ctrl.IVCurve_LeakMax,
'updater': self.updateAnalysis,
'units': 'pA'}
self.ctrl.IVCurve_subLeak.region = self.regions['lrleak']['region'] # save region with checkbox
self.regions['lrwin0'] = {'name': 'win0', # peak window
'region': pg.LinearRegionItem([0, 1],
brush=pg.mkBrush(128, 128, 128, 50.)),
'plot': self.data_plot,
'state': self.ctrl.IVCurve_showHide_lrpk,
'shstate': True, # keep internal copy of the state
'mode': None,
'start': self.ctrl.IVCurve_pkTStart,
'stop': self.ctrl.IVCurve_pkTStop,
'updater': self.updateAnalysis,
'units': 'ms'}
self.ctrl.IVCurve_showHide_lrpk.region = self.regions['lrwin0']['region'] # save region with checkbox
self.regions['lrwin1'] = {'name': 'win2', # ss window
'region': pg.LinearRegionItem([0, 1],
brush=pg.mkBrush(0, 0, 255, 50.)),
'plot': self.data_plot,
'state': self.ctrl.IVCurve_showHide_lrss,
'shstate': True, # keep internal copy of the state
'mode': None,
'start': self.ctrl.IVCurve_ssTStart,
'stop': self.ctrl.IVCurve_ssTStop,
'updater': self.updateAnalysis,
'units': 'ms'}
self.ctrl.IVCurve_showHide_lrss.region = self.regions['lrwin1']['region'] # save region with checkbox
# self.lrtau = pg.LinearRegionItem([0, 1],
# brush=pg.mkBrush(255, 0, 0, 50.))
self.regions['lrrmp'] = {'name': 'rmp',
'region': pg.LinearRegionItem([0, 1],
brush=pg.mkBrush
(255, 255, 0, 25.)),
'plot': self.data_plot,
'state': self.ctrl.IVCurve_showHide_lrrmp,
'shstate': True, # keep internal copy of the state
'mode': None,
'start': self.ctrl.IVCurve_rmpTStart,
'stop': self.ctrl.IVCurve_rmpTStop,
'updater': self.update_rmpAnalysis,
'units': 'ms'}
self.ctrl.IVCurve_showHide_lrrmp.region = self.regions['lrrmp']['region'] # save region with checkbox
# establish that measurement is on top, exclusion is next, and reference is on bottom
self.regions['lrtau'] = {'name': 'tau',
'region': pg.LinearRegionItem([0, 1],
brush=pg.mkBrush
(255, 255, 0, 25.)),
'plot': self.data_plot,
'state': self.ctrl.IVCurve_showHide_lrtau,
'shstate': False, # keep internal copy of the state
'mode': None,
'start': self.ctrl.IVCurve_tau2TStart,
'stop': self.ctrl.IVCurve_tau2TStop,
'updater': self.update_Tauh,
'units': 'ms'}
self.ctrl.IVCurve_showHide_lrtau.region = self.regions['lrtau']['region'] # save region with checkbox
self.regions['lrwin0']['region'].setZValue(500)
self.regions['lrwin1']['region'].setZValue(100)
self.regions['lrtau']['region'].setZValue(1000)
self.regions['lrrmp']['region'].setZValue(1000)
self.regions['lrleak']['region'].setZValue(1000)
for regkey, reg in self.regions.items(): # initialize region states
self.show_or_hide(lrregion=regkey, forcestate=reg['shstate'])
for regkey, reg in self.regions.items():
reg['plot'].addItem(reg['region'])
reg['state'].clicked.connect(functools.partial(self.show_or_hide,
lrregion=regkey))
if reg['updater'] is not None:
reg['region'].sigRegionChangeFinished.connect(
functools.partial(reg['updater'], region=reg['name']))
# if self.regions[reg]['mode'] is not None:
# self.regions[reg]['mode'].currentIndexChanged.connect(self.interactive_analysis)
if reset:
for regkey, reg in self.regions.items(): # initialize region states
self.show_or_hide(lrregion=regkey, forcestate=reg['shstate'])
for reg in self.regions.values():
for s in ['start', 'stop']:
reg[s].setSuffix(' ' + reg['units'])
self.regions_exist = True
def get_file_information(self, default_dh=None):
"""
get_file_information reads the sequence information from the
currently selected data file
Two-dimensional sequences are supported.
Parameter
---------
default_dh : data handle, default None
the data handle to use to access the file information
Return
------
nothing:
"""
if default_dh is None:
dh = self.file_loader_instance.selectedFiles()
else:
dh = default_dh
if not dh or len(dh) == 0: # when using scripts, the fileloader may not know..
return
dh = dh[0] # only the first file
sequence = self.dataModel.listSequenceParams(dh)
keys = list(sequence.keys())
leftseq = [str(x) for x in sequence[keys[0]]]
if len(keys) > 1:
rightseq = [str(x) for x in sequence[keys[1]]]
else:
rightseq = []
leftseq.insert(0, 'All')
rightseq.insert(0, 'All')
### specific to our program - relocate
self.ctrl.IVCurve_Sequence1.clear()
self.ctrl.IVCurve_Sequence2.clear()
self.ctrl.IVCurve_Sequence1.addItems(leftseq)
self.ctrl.IVCurve_Sequence2.addItems(rightseq)
self.sequence = sequence
def updaterStatus(self, mode='on'):
"""
Change the auto updater status
"""
for regkey, reg in self.regions.items():
if mode in ['on', 'On', True]:
self.doUpdates = True
reg['region'].sigRegionChangeFinished.connect(
functools.partial(reg['updater'], region=reg['name']))
if mode in ['off', 'Off', None, False]:
self.doUpdates = False
try:
reg['region'].sigRegionChangeFinished.disconnect()
except: # may already be disconnected...so fail gracefully
pass
def loadFileRequested(self, dh, analyze=True, bridge=None):
"""
loadFileRequested is called by "file loader" when a file is requested.
FileLoader is provided by the AnalysisModule class
dh is the handle to the currently selected directory (or directories)
This function loads all of the successive records from the specified protocol.
Ancillary information from the protocol is stored in class variables.
Extracts information about the commands, sometimes using a rather
simplified set of assumptions. Much of the work for reading the data is
performed in the GetClamps class in PatchEPhys.
:param dh: the directory handle (or list of handles) representing the selected
entitites from the FileLoader in the Analysis Module
:modifies: plots, sequence, data arrays, data mode, etc.
:return: True if successful; otherwise raises an exception
"""
self.data_plot.clearPlots()
self.cmd_plot.clearPlots()
self.clear_results()
self.updaterStatus('Off')
if len(dh) == 0:
raise Exception("IVCurve::loadFileRequested: " +
"Select an IV protocol directory.")
if len(dh) != 1:
raise Exception("IVCurve::loadFileRequested: " +
"Can only load one file at a time.")
self.get_file_information(default_dh=dh) # Get info from most recent file requested
dh = dh[0] # just get the first one
self.filename = dh.name()
self.current_dirhandle = dh # this is critical!
self.loaded = dh
self.analysis_summary = self.dataModel.cell_summary(dh) # get other info as needed for the protocol
# print 'analysis summary: ', self.analysis_summary
pars = {} # need to pass some parameters from the GUI
pars['limits'] = self.ctrl.IVCurve_IVLimits.isChecked() # checkbox: True if loading limited current range
pars['cmin'] = self.ctrl.IVCurve_IVLimitMin.value() # minimum current level to load
pars['cmax'] = self.ctrl.IVCurve_IVLimitMax.value() # maximum current level to load
pars['KeepT'] = self.ctrl.IVCurve_KeepT.isChecked() # keep timebase
# sequence selections:
# pars[''sequence'] is a dictionary
# The dictionary has 'index' (currentIndex()) and 'count' from the GUI
pars['sequence1'] = {'index': [self.ctrl.IVCurve_Sequence1.currentIndex() - 1]}
pars['sequence1']['count'] = self.ctrl.IVCurve_Sequence1.count() - 1
pars['sequence2'] = {'index': [self.ctrl.IVCurve_Sequence2.currentIndex() - 1]}
pars['sequence2']['count'] = self.ctrl.IVCurve_Sequence2.count() - 1
ci = self.Clamps.getClampData(dh, pars)
if ci is None:
return False
self.ctrl.IVCurve_dataMode.setText(self.Clamps.data_mode)
# self.bridgeCorrection = 200e6
# print 'bridge: ', bridge
if bridge is not None:
self.bridgeCorrection = bridge
self.ctrl.IVCurve_bridge.setValue(self.bridgeCorrection)
#for i in range(self.Clamps.traces.shape[0]):
print('******** Doing bridge correction: ', self.bridgeCorrection)
self.Clamps.traces = self.Clamps.traces - (self.bridgeCorrection * self.Clamps.cmd_wave)
else:
br = self.ctrl.IVCurve_bridge.value()*1e6
# print 'br: ', br
if br != 0.0:
self.bridgeCorrection = br
self.Clamps.traces = self.Clamps.traces - (self.bridgeCorrection * self.Clamps.cmd_wave)
else:
self.bridgeCorrection = None
# now plot the data
self.ctrl.IVCurve_tauh_Commands.clear()
self.ctrl.IVCurve_tauh_Commands.addItems(ci['cmdList'])
self.color_scale.setIntColorScale(0, len(ci['dirs']), maxValue=200)
self.make_map_symbols()
self.plot_traces()
self.setup_regions()
self.get_window_analysisPars() # prepare the analysis parameters
self.updaterStatus('on') # re-enable update status
if analyze: # only do this if requested (default). Don't do in script processing ....yet
self.updateAnalysis()
return True
def plot_traces(self, multimode=False):
"""
Plot the current data traces.
:param multimode: try using "multiline plot routine" to speed up plots (no color though)
:return: nothing
"""
if self.ctrl.IVCurve_KeepAnalysis.isChecked():
self.keep_analysis_count += 1
else:
self.keep_analysis_count = 0 # always make sure is reset
# this is the only way to reset iterators.
self.color_list = itertools.cycle(self.colors)
self.symbol_list = itertools.cycle(self.symbols)
self.clearDecorators()
self.make_map_symbols()
self.data_plot.plotItem.clearPlots()
self.cmd_plot.plotItem.clearPlots()
ntr = self.Clamps.traces.shape[0]
self.data_plot.setDownsampling(auto=False, mode='mean')
self.data_plot.setClipToView(False) # setting True deletes some points used for decoration of spikes by shape
self.cmd_plot.setDownsampling(auto=False, mode='mean')
self.cmd_plot.setClipToView(True) # can leave this true since we do not put symbols on the plot
self.data_plot.disableAutoRange()
self.cmd_plot.disableAutoRange()
cmdindxs = np.unique(self.Clamps.commandLevels) # find the unique voltages
colindxs = [int(np.where(cmdindxs == self.Clamps.commandLevels[i])[0]) for i in range(len(self.Clamps.commandLevels))] # make a list to use
if multimode:
pass
# datalines = MultiLine(self.Clamps.time_base, self.Clamps.traces, downsample=10)
# self.data_plot.addItem(datalines)
# cmdlines = MultiLine(self.Clamps.time_base, self.Clamps.cmd_wave, downsample=10)
# self.cmd_plot.addItem(cmdlines)
else:
for i in range(ntr):
atrace = self.Clamps.traces[i]
acmdwave = self.Clamps.cmd_wave[i]
self.data_plot.plot(x=self.Clamps.time_base, y=atrace, downSample=10, downSampleMethod='mean',
pen=pg.intColor(colindxs[i], len(cmdindxs), maxValue=255))
self.cmd_plot.plot(x=self.Clamps.time_base, y=acmdwave, downSample=10, downSampleMethod='mean',
pen=pg.intColor(colindxs[i], len(cmdindxs), maxValue=255))
if self.Clamps.data_mode in self.dataModel.ic_modes:
self.label_up(self.data_plot, 'T (s)', 'V (V)', 'Data')
self.label_up(self.cmd_plot, 'T (s)', 'I (%s)' % self.Clamps.command_units, 'Data')
elif self.Clamps.data_mode in self.dataModel.vc_modes: # voltage clamp
self.label_up(self.data_plot, 'T (s)', 'I (A)', 'Data')
self.label_up(self.cmd_plot, 'T (s)', 'V (%s)' % self.Clamps.command_units, 'Data')
else: # mode is not known: plot both as V
self.label_up(self.data_plot, 'T (s)', 'V (V)', 'Data')
self.label_up(self.cmd_plot, 'T (s)', 'V (%s)' % self.Clamps.command_units, 'Data')
self.data_plot.autoRange()
self.cmd_plot.autoRange()
def setup_regions(self):
"""
Initialize the positions of the lr regions on the display.
We attempt to use a logical set of values based on the timing of command steps
and stimulus events
:return:
"""
self.initialize_regions() # now create the analysis regions, if not already existing
if self.ctrl.IVCurve_KeepT.isChecked() is False: # change regions; otherwise keep...
tstart_pk = self.Clamps.tstart
tdur_pk = self.Clamps.tdur * 0.4 # use first 40% of trace for peak
tstart_ss = self.Clamps.tstart + 0.75 * self.Clamps.tdur
tdur_ss = self.Clamps.tdur * 0.25
tstart_tau = self.Clamps.tstart + 0.1 * self.Clamps.tdur
tdur_tau = 0.9 * self.Clamps.tdur
# tauh window
self.regions['lrtau']['region'].setRegion([tstart_tau,
tstart_tau + tdur_tau])
# peak voltage window
self.regions['lrwin0']['region'].setRegion([tstart_pk,
tstart_pk + tdur_pk])
# steady-state meausurement:
self.regions['lrwin1']['region'].setRegion([tstart_ss,
tstart_ss + tdur_ss])
# rmp measurement
self.regions['lrrmp']['region'].setRegion([0., self.Clamps.tstart * 0.9]) # rmp window
# print 'rmp window region: ', self.Clamps.tstart * 0.9
for r in ['lrtau', 'lrwin0', 'lrwin1', 'lrrmp']:
self.regions[r]['region'].setBounds([0., np.max(self.Clamps.time_base)]) # limit regions to data
def get_window_analysisPars(self):
"""
Retrieve the settings of the lr region windows, and some other general values
in preparation for analysis
:return:
"""
self.analysis_parameters = {} # start out empty so we are not fooled by priors
for region in ['lrleak', 'lrwin0', 'lrwin1', 'lrrmp', 'lrtau']:
rgninfo = self.regions[region]['region'].getRegion() # from the display
self.regions[region]['start'].setValue(rgninfo[0] * 1.0e3) # report values to screen
self.regions[region]['stop'].setValue(rgninfo[1] * 1.0e3)
self.analysis_parameters[region] = {'times': rgninfo}
# for region in ['lrwin0', 'lrwin1', 'lrwin2']:
# if self.regions[region]['mode'] is not None:
# self.analysis_parameters[region]['mode'] = self.regions[region]['mode'].currentText()
# self.get_alternation() # get values into the analysisPars dictionary
# self.get_baseline()
# self.get_junction()
def updateAnalysis(self, presets=None, region=None):
"""updateAnalysis re-reads the time parameters and re-analyzes the spikes"""
# print 'self.Script.script: ', self.Script.script['Cells'].keys()
if presets in [True, False]:
presets = None
# print '\n\n*******\n', traceback.format_stack(limit=7)
if presets is not None and type(presets) == type({}): # copy from dictionary of presets into analysis parameters
for k in presets.keys():
self.analysis_summary[k] = presets[k]
if 'SpikeThreshold' in presets.keys():
self.ctrl.IVCurve_SpikeThreshold.setValue(float(presets['SpikeThreshold']))
#print 'set threshold to %f' % float(presets['SpikeThreshold'])
if 'bridgeCorrection' in presets.keys():
self.bridgeCorrection = presets['bridgeCorrection']
print('####### BRIDGE CORRRECTION #######: ', self.bridgeCorrection)
else:
self.bridgeCorrection = 0.
self.get_window_analysisPars()
# print 'updateanalysis: readparsupdate'
self.readParsUpdate(clearFlag=True, pw=False)
def readParsUpdate(self, clearFlag=False, pw=False):
"""
Read the parameter window entries, set the lr regions to the values
in the window, and do an update on the analysis
Parameters
----------
clearFlag : Boolean, False
appears to be unused
pw : Boolean, False
appears to be unused
"""
if not self.doUpdates:
return
# analyze spikes first (gets information on which traces to exclude/include for other calculations)
# print 'readparsupdate, calling analyze spikes'
self.analyzeSpikes()
self.analysis_summary['tauh'] = np.nan # define these because they may not get filled...
self.analysis_summary['Gh'] = np.nan
(pen, filledbrush, emptybrush, symbol, n, clearFlag) = self.map_symbol()
# update RMP first as we might need it for the others.
if self.ctrl.IVCurve_showHide_lrrmp.isChecked():
rgnx1 = self.ctrl.IVCurve_rmpTStart.value() / 1.0e3
rgnx2 = self.ctrl.IVCurve_rmpTStop.value() / 1.0e3
self.regions['lrrmp']['region'].setRegion([rgnx1, rgnx2])
self.update_rmpAnalysis(clear=clearFlag, pw=pw)
if self.ctrl.IVCurve_showHide_lrss.isChecked():
rgnx1 = self.ctrl.IVCurve_ssTStart.value() / 1.0e3
rgnx2 = self.ctrl.IVCurve_ssTStop.value() / 1.0e3
self.regions['lrwin1']['region'].setRegion([rgnx1, rgnx2])
self.update_ssAnalysis()
if self.ctrl.IVCurve_showHide_lrpk.isChecked():
rgnx1 = self.ctrl.IVCurve_pkTStart.value() / 1.0e3
rgnx2 = self.ctrl.IVCurve_pkTStop.value() / 1.0e3
self.regions['lrwin0']['region'].setRegion([rgnx1, rgnx2])
self.update_pkAnalysis(clear=clearFlag, pw=pw)
if self.ctrl.IVCurve_subLeak.isChecked():
rgnx1 = self.ctrl.IVCurve_LeakMin.value() / 1e3
rgnx2 = self.ctrl.IVCurve_LeakMax.value() / 1e3
self.regions['lrleak']['region'].setRegion([rgnx1, rgnx2])
self.update_ssAnalysis()
self.update_pkAnalysis()
if self.ctrl.IVCurve_showHide_lrtau.isChecked():
# include tau in the list... if the tool is selected
rgnx1 = self.ctrl.IVCurve_tau2TStart.value() / 1e3
rgnx2 = self.ctrl.IVCurve_tau2TStop.value() / 1e3
self.regions['lrtau']['region'].setRegion([rgnx1, rgnx2])
self.update_Tauh()
if self.ctrl.IVCurve_PeakMode.currentIndexChanged:
self.peakmode = self.ctrl.IVCurve_PeakMode.currentText()
self.update_pkAnalysis()
self.analyzeSpikeShape() # finally do the spike shape
self.ctrl.IVCurve_bridge.setValue(0.) # reset bridge value after analysis.
def read_script(self, name=''):
"""
read a script file from disk, and use that information to drive the analysis
:param name:
:return:
"""
self.script_name = self.Script.read_script()
if self.script_name is None:
print('Failed to read script')
self.ctrl.IVCurve_ScriptName.setText('None')
return
self.ctrl.IVCurve_ScriptName.setText(os.path.basename(self.script_name))
self.Script.run_script()
def rerun_script(self):
"""
revalidate and run the current script
:return:
"""
self.Script.run_script()
def analyzeSpikes(self):
"""
analyzeSpikes: Using the threshold set in the control panel, count the
number of spikes in the stimulation window (self.Clamps.tstart, self.Clamps.tend)
Updates the spike plot(s).
The following variables are set:
self.spikecount: a 1-D numpy array of spike counts, aligned with the
current (command)
self.adapt_ratio: the adaptation ratio of the spike train
self.fsl: a numpy array of first spike latency for each command level
self.fisi: a numpy array of first interspike intervals for each
command level
self.nospk: the indices of command levels where no spike was detected
self.spk: the indices of command levels were at least one spike
was detected
"""
if self.keep_analysis_count == 0:
clearFlag = True
else:
clearFlag = False
self.analysis_summary['FI_Curve'] = None
# print '***** analyzing Spikes'
if self.Clamps.data_mode not in self.dataModel.ic_modes or self.Clamps.time_base is None:
print('IVCurve::analyzeSpikes: Cannot count spikes, ' +
'and dataMode is ', self.Clamps.data_mode, 'and ICModes are: ', self.dataModel.ic_modes, 'tx is: ', self.tx)
self.spikecount = []
self.fiPlot.plot(x=[], y=[], clear=clearFlag, pen='w',
symbolSize=6, symbolPen='b',
symbolBrush=(0, 0, 255, 200), symbol='s')
self.fslPlot.plot(x=[], y=[], pen='w', clear=clearFlag,
symbolSize=6, symbolPen='g',
symbolBrush=(0, 255, 0, 200), symbol='t')
self.fslPlot.plot(x=[], y=[], pen='w', symbolSize=6,
symbolPen='y',
symbolBrush=(255, 255, 0, 200), symbol='s')
return
twin = self.Clamps.tend - self.Clamps.tstart # measurements window in seconds
maxspkrate = 50 # max rate to count in adaptation is 50 spikes/second
minspk = 4
maxspk = int(maxspkrate*twin) # scale max dount by range of spike counts
threshold = self.ctrl.IVCurve_SpikeThreshold.value() * 1e-3
self.analysis_summary['SpikeThreshold'] = self.ctrl.IVCurve_SpikeThreshold.value()
ntr = len(self.Clamps.traces)
self.spikecount = np.zeros(ntr)
self.fsl = np.zeros(ntr)
self.fisi = np.zeros(ntr)
ar = np.zeros(ntr)
self.allisi = {}
self.spikes = [[] for i in range(ntr)]
self.spikeIndices = [[] for i in range(ntr)]
#print 'clamp start/end: ', self.Clamps.tstart, self.Clamps.tend
for i in range(ntr):
(spikes, spkx) = Utility.findspikes(self.Clamps.time_base, self.Clamps.traces[i],
threshold, t0=self.Clamps.tstart,
t1=self.Clamps.tend,
dt=self.Clamps.sample_interval,
mode='peak', # best to use peak for detection
interpolate=False,
debug=False)
if len(spikes) == 0:
#print 'no spikes found'
continue
self.spikes[i] = spikes
#print 'found %d spikes in trace %d' % (len(spikes), i)
self.spikeIndices[i] = [np.argmin(np.fabs(self.Clamps.time_base-t)) for t in spikes]
self.spikecount[i] = len(spikes)
self.fsl[i] = (spikes[0] - self.Clamps.tstart)*1e3
if len(spikes) > 1:
self.fisi[i] = (spikes[1] - spikes[0])*1e3
self.allisi[i] = np.diff(spikes)*1e3
# for Adaptation ratio analysis
if minspk <= len(spikes) <= maxspk:
misi = np.mean(np.diff(spikes[-3:]))*1e3
ar[i] = misi / self.fisi[i]
iAR = np.where(ar > 0)
self.adapt_ratio = np.mean(ar[iAR]) # only where we made the measurement
self.analysis_summary['AdaptRatio'] = self.adapt_ratio
self.ctrl.IVCurve_AR.setText(u'%7.3f' % self.adapt_ratio)
self.nospk = np.where(self.spikecount == 0)
self.spk = np.where(self.spikecount > 0)[0]
self.analysis_summary['FI_Curve'] = np.array([self.Clamps.values, self.spikecount])
# print self.analysis_summary['FI_Curve']
self.spikes_counted = True
self.update_SpikePlots()
def _timeindex(self, t):
return np.argmin(self.Clamps.time_base-t)
def analyzeSpikeShape(self, printSpikeInfo=False):
# analyze the spike shape.
# based on Druckman et al. Cerebral Cortex, 2013
begin_dV = 12.0 # V/s or mV/ms
ntr = len(self.Clamps.traces)
# print 'analyzespikeshape, self.spk: ', self.spk
self.spikeShape = OrderedDict()
rmp = np.zeros(ntr)
iHold = np.zeros(ntr)
for i in range(ntr):
if len(self.spikes[i]) == 0:
continue
trspikes = OrderedDict()
if printSpikeInfo:
print(np.array(self.Clamps.values))
print(len(self.Clamps.traces))
(rmp[i], r2) = Utility.measure('mean', self.Clamps.time_base, self.Clamps.traces[i],
0.0, self.Clamps.tstart)
(iHold[i], r2) = Utility.measure('mean', self.Clamps.time_base, self.Clamps.cmd_wave[i],
0.0, self.Clamps.tstart)
for j in range(len(self.spikes[i])):
thisspike = {'trace': i, 'AP_number': j, 'AP_beginIndex': None, 'AP_endIndex': None,
'peakIndex': None, 'peak_T': None, 'peak_V': None, 'AP_Latency': None,
'AP_beginV': None, 'halfwidth': None, 'trough_T': None,
'trough_V': None, 'peaktotroughT': None,
'current': None, 'iHold': None,
'pulseDuration': None, 'tstart': self.Clamps.tstart} # initialize the structure
thisspike['current'] = self.Clamps.values[i] - iHold[i]
thisspike['iHold'] = iHold[i]
thisspike['pulseDuration'] = self.Clamps.tend - self.Clamps.tstart # in seconds
thisspike['peakIndex'] = self.spikeIndices[i][j]
thisspike['peak_T'] = self.Clamps.time_base[thisspike['peakIndex']]
thisspike['peak_V'] = self.Clamps.traces[i][thisspike['peakIndex']] # max voltage of spike
thisspike['tstart'] = self.Clamps.tstart
# find the minimum going forward - that is AHP min
dt = (self.Clamps.time_base[1]-self.Clamps.time_base[0])
dv = np.diff(self.Clamps.traces[i])/dt
k = self.spikeIndices[i][j] + 1
if j < self.spikecount[i] - 1: # find end of spike (top of next, or end of trace)
kend = self.spikeIndices[i][j+1]
else:
kend = len(self.Clamps.traces[i])
try:
km = np.argmin(dv[k:kend])+k # find fastst falling point, use that for start of detection
except:
continue
# v = self.Clamps.traces[i][km]
# vlast = self.Clamps.traces[i][km]
#kmin = np.argmin(np.argmin(dv2[k:kend])) + k # np.argmin(np.fabs(self.Clamps.traces[i][k:kend]))+k
kmin = np.argmin(self.Clamps.traces[i][km:kend])+km
thisspike['AP_endIndex'] = kmin
thisspike['trough_T'] = self.Clamps.time_base[thisspike['AP_endIndex']]
thisspike['trough_V'] = self.Clamps.traces[i][kmin]
if thisspike['AP_endIndex'] is not None:
thisspike['peaktotrough'] = thisspike['trough_T'] - thisspike['peak_T']
k = self.spikeIndices[i][j]-1
if j > 0:
kbegin = self.spikeIndices[i][j-1] # trspikes[j-1]['AP_endIndex'] # self.spikeIndices[i][j-1] # index to previ spike start
else:
kbegin = k - int(0.002/dt) # for first spike - 4 msec prior only
if kbegin*dt <= self.Clamps.tstart:
kbegin = kbegin + int(0.0002/dt) # 1 msec
# revise k to start at max of rising phase
try:
km = np.argmax(dv[kbegin:k]) + kbegin
except:
continue
if (km - kbegin < 1):
km = kbegin + int((k - kbegin)/2.) + 1
kthresh = np.argmin(np.fabs(dv[kbegin:km] - begin_dV)) + kbegin # point where slope is closest to begin
thisspike['AP_beginIndex'] = kthresh
thisspike['AP_Latency'] = self.Clamps.time_base[kthresh]
thisspike['AP_beginV'] = self.Clamps.traces[i][thisspike['AP_beginIndex']]
if thisspike['AP_beginIndex'] is not None and thisspike['AP_endIndex'] is not None:
halfv = 0.5*(thisspike['peak_V'] + thisspike['AP_beginV'])
kup = np.argmin(np.fabs(self.Clamps.traces[i][thisspike['AP_beginIndex']:thisspike['peakIndex']] - halfv))
kup += thisspike['AP_beginIndex']
kdown = np.argmin(np.fabs(self.Clamps.traces[i][thisspike['peakIndex']:thisspike['AP_endIndex']] - halfv))
kdown += thisspike['peakIndex']
if kup is not None and kdown is not None:
thisspike['halfwidth'] = self.Clamps.time_base[kdown] - self.Clamps.time_base[kup]
thisspike['hw_up'] = self.Clamps.time_base[kup]
thisspike['hw_down'] = self.Clamps.time_base[kdown]
thisspike['hw_v'] = halfv
trspikes[j] = thisspike
self.spikeShape[i] = trspikes
if printSpikeInfo:
pp = pprint.PrettyPrinter(indent=4)
for m in sorted(self.spikeShape.keys()):
print('----\nTrace: %d has %d APs' % (m, len(list(self.spikeShape[m].keys()))))
for n in sorted(self.spikeShape[m].keys()):
pp.pprint(self.spikeShape[m][n])
self.analysis_summary['spikes'] = self.spikeShape # save in the summary dictionary too
self.analysis_summary['iHold'] = np.mean(iHold)
self.analysis_summary['pulseDuration'] = self.Clamps.tend - self.Clamps.tstart
self.getClassifyingInfo() # build analysis summary here as well.
self.clearDecorators()
self.spikeDecorator()
def spikeDecorator(self):
"""
Put markers on the spikes to visually confirm the analysis of thresholds, etc.
"""
# get colors
cmdindxs = np.unique(self.Clamps.commandLevels) # find the unique voltages
colindxs = [int(np.where(cmdindxs == self.Clamps.commandLevels[i])[0]) for i in range(len(self.Clamps.commandLevels))] # make a list to use
alllats = []
allpeakt = []
allpeakv = []
for i, trace in enumerate(self.spikeShape):
aps = []
tps = []
paps = []
ptps = []
taps = []
ttps = []
hwv = []
tups = []
tdps = []
for j, spk in enumerate(self.spikeShape[trace]):
aps.append(self.spikeShape[trace][spk]['AP_beginV'])
alllats.append(self.spikeShape[trace][spk]['AP_Latency'])
tps.append(self.spikeShape[trace][spk]['AP_Latency'])
u =self.data_plot.plot(tps, aps, pen=None, symbol='o', brush=pg.mkBrush('g'), symbolSize=4)
self.dataMarkers.append(u)
for j, spk in enumerate(self.spikeShape[trace]):
paps.append(self.spikeShape[trace][spk]['peak_V'])
ptps.append(self.spikeShape[trace][spk]['peak_T'])
allpeakt.append(self.spikeShape[trace][spk]['peak_T']+0.01)
allpeakv.append(self.spikeShape[trace][spk]['peak_V'])
# u = self.data_plot.plot(allpeakt, allpeakv, pen=None, symbol='o', brush=pg.mkBrush('r'), size=2)
# self.dataMarkers.append(u)
u = self.data_plot.plot(ptps, paps, pen=None, symbol='t', brush=pg.mkBrush('w'), symbolSize=4)
self.dataMarkers.append(u)
for j, spk in enumerate(self.spikeShape[trace]):
taps.append(self.spikeShape[trace][spk]['trough_V'])
ttps.append(self.spikeShape[trace][spk]['trough_T'])
u = self.data_plot.plot(ttps, taps, pen=None, symbol='+', brush=pg.mkBrush('r'), symbolSize=4)
self.dataMarkers.append(u)
for j, spk in enumerate(self.spikeShape[trace]):
tups.append(self.spikeShape[trace][spk]['hw_up'])
tdps.append(self.spikeShape[trace][spk]['hw_down'])
hwv.append(self.spikeShape[trace][spk]['hw_v'])
u =self.data_plot.plot(tups, hwv, pen=None, symbol='d', brush=pg.mkBrush('c'), symbolSize=4)
self.dataMarkers.append(u)
d =self.data_plot.plot(tdps, hwv, pen=None, symbol='s', brush=pg.mkBrush('c'), symbolSize=4)
self.dataMarkers.append(d)
def clearDecorators(self):
if len(self.dataMarkers) > 0:
[self.dataMarkers[k].clear() for k,m in enumerate(self.dataMarkers)]
self.dataMarkers = []
def getIVCurrentThresholds(self):
# figure out "threshold" for spike, get 150% and 300% points.
nsp = []
icmd = []
for m in sorted(self.spikeShape.keys()):
n = len(self.spikeShape[m].keys()) # number of spikes in the trace
if n > 0:
nsp.append(len(self.spikeShape[m].keys()))
icmd.append(self.spikeShape[m][0]['current'])
try:
iamin = np.argmin(icmd)
except:
raise ValueError('IVCurve:getIVCurrentThresholds - icmd seems to be ? : ', icmd)
imin = np.min(icmd)
ia150 = np.argmin(np.abs(1.5*imin-np.array(icmd)))
iacmdthr = np.argmin(np.abs(imin-self.Clamps.values))
ia150cmdthr = np.argmin(np.abs(icmd[ia150] - self.Clamps.values))
#print 'thr indices and values: ', iacmdthr, ia150cmdthr, self.Clamps.values[iacmdthr], self.Clamps.values[ia150cmdthr]
return (iacmdthr, ia150cmdthr) # return threshold indices into self.Clamps.values array at threshold and 150% point
def getClassifyingInfo(self):
"""
Adds the classifying information according to Druckmann et al., Cerebral Cortex, 2013
to the analysis summary
"""
(jthr, j150) = self.getIVCurrentThresholds() # get the indices for the traces we need to pull data from
if jthr == j150:
print('\n%s:' % self.filename)
print('Threshold current T and 1.5T the same: using next up value for j150')
print('jthr, j150, len(spikeShape): ', jthr, j150, len(self.spikeShape))
print('1 ', self.spikeShape[jthr][0]['current']*1e12)
print('2 ', self.spikeShape[j150+1][0]['current']*1e12)
print(' >> Threshold current: %8.3f 1.5T current: %8.3f, next up: %8.3f' % (self.spikeShape[jthr][0]['current']*1e12,
self.spikeShape[j150][0]['current']*1e12, self.spikeShape[j150+1][0]['current']*1e12))
j150 = jthr + 1
if len(self.spikeShape[j150]) >= 1 and self.spikeShape[j150][0]['halfwidth'] is not None:
self.analysis_summary['AP1_Latency'] = (self.spikeShape[j150][0]['AP_Latency'] - self.spikeShape[j150][0]['tstart'])*1e3
self.analysis_summary['AP1_HalfWidth'] = self.spikeShape[j150][0]['halfwidth']*1e3
else:
self.analysis_summary['AP1_Latency'] = np.inf
self.analysis_summary['AP1_HalfWidth'] = np.inf
if len(self.spikeShape[j150]) >= 2 and self.spikeShape[j150][1]['halfwidth'] is not None:
self.analysis_summary['AP2_Latency'] = (self.spikeShape[j150][1]['AP_Latency'] - self.spikeShape[j150][1]['tstart'])*1e3
self.analysis_summary['AP2_HalfWidth'] = self.spikeShape[j150][1]['halfwidth']*1e3
else:
self.analysis_summary['AP2_Latency'] = np.inf
self.analysis_summary['AP2_HalfWidth'] = np.inf
rate = len(self.spikeShape[j150])/self.spikeShape[j150][0]['pulseDuration'] # spikes per second, normalized for pulse duration
# first AHP depth
# print 'j150: ', j150
# print self.spikeShape[j150][0].keys()
# print self.spikeShape[j150]
AHPDepth = self.spikeShape[j150][0]['AP_beginV'] - self.spikeShape[j150][0]['trough_V']
self.analysis_summary['FiringRate'] = rate
self.analysis_summary['AHP_Depth'] = AHPDepth*1e3 # convert to mV
# pprint.pprint(self.analysis_summary)
# except:
# raise ValueError ('Failed Classification for cell: %s' % self.filename)
def update_Tau_membrane(self, peak_time=None, printWindow=False, whichTau=1, vrange=[-5., -20.]):
"""
Compute time constant (single exponential) from the
onset of the response
using lrpk window, and only steps that produce a voltage change between 5 and 20 mV below rest
or as specified
"""
if len(self.Clamps.commandLevels) == 0: # probably not ready yet to do the update.
return
if self.Clamps.data_mode not in self.dataModel.ic_modes: # only permit in IC
return
rgnpk = list(self.regions['lrwin0']['region'].getRegion())
Func = 'exp1' # single exponential fit with DC offset.
Fits = Fitting.Fitting()
if self.rmp == []:
self.update_rmpAnalysis()
#print self.rmp
initpars = [self.rmp*1e-3, 0.010, 0.01]
peak_time = None
icmdneg = np.where(self.Clamps.commandLevels < -20e-12)
maxcmd = np.min(self.Clamps.commandLevels)
ineg = np.where(self.Clamps.commandLevels[icmdneg] < 0.0)
if peak_time is not None and ineg != np.array([]):
rgnpk[1] = np.max(peak_time[ineg[0]])
dt = self.Clamps.sample_interval
rgnindx = [int((rgnpk[1]-0.005)/dt), int((rgnpk[1])/dt)]
rmps = self.ivbaseline
vmeans = np.mean(self.Clamps.traces[:, rgnindx[0]:rgnindx[1]], axis=1) - self.ivbaseline
indxs = np.where(np.logical_and((vrange[0]*1e-3 >= vmeans[ineg]),
(vmeans[ineg] >= vrange[1]*1e-3)))
indxs = list(indxs[0])
whichdata = ineg[0][indxs] # restricts to valid values
itaucmd = self.Clamps.commandLevels[ineg]
whichaxis = 0
fpar = []
names = []
okdata = []
if len(self.tau_fitted.keys()) > 0:
[self.tau_fitted[k].clear() for k in self.tau_fitted.keys()]
self.tau_fitted = {}
for j, k in enumerate(whichdata):
self.tau_fitted[j] = self.data_plot.plot(self.Clamps.time_base, self.Clamps.traces[k], pen=pg.mkPen('w'))
(fparx, xf, yf, namesx) = Fits.FitRegion([k], whichaxis,
self.Clamps.time_base,
self.Clamps.traces,
dataType='2d',
t0=rgnpk[0], t1=rgnpk[1],
fitFunc=Func,
fitPars=initpars,
method='SLSQP',
bounds=[(-0.1, 0.1), (-0.1, 0.1), (0.005, 0.30)])
if not fparx:
raise Exception('IVCurve::update_Tau_membrane: Charging tau fitting failed - see log')
#print 'j: ', j, len(fpar)
if fparx[0][1] < 2.5e-3: # amplitude must be > 2.5 mV to be useful
continue
fpar.append(fparx[0])
names.append(namesx[0])
okdata.append(k)
self.taupars = fpar
self.tauwin = rgnpk
self.taufunc = Func
self.whichdata = okdata
taus = []
for j in range(len(fpar)):
outstr = ""
taus.append(fpar[j][2])
for i in range(0, len(names[j])):
outstr += '%s = %f, ' % (names[j][i], fpar[j][i])
if printWindow:
print("FIT(%d, %.1f pA): %s " %
(whichdata[j], itaucmd[j] * 1e12, outstr))
meantau = np.mean(taus)
self.ctrl.IVCurve_Tau.setText(u'%18.1f ms' % (meantau * 1.e3))
self.tau = meantau
self.analysis_summary['tau'] = self.tau*1.e3
tautext = 'Mean Tau: %8.1f'
if printWindow:
print(tautext % (meantau * 1e3))
self.show_tau_plot()
def show_tau_plot(self):
Fits = Fitting.Fitting()
fitPars = self.taupars
xFit = np.zeros((len(self.taupars), 500))
for i in range(len(self.taupars)):
xFit[i,:] = np.arange(0, self.tauwin[1]-self.tauwin[0], (self.tauwin[1]-self.tauwin[0])/500.)
yFit = np.zeros((len(fitPars), xFit.shape[1]))
fitfunc = Fits.fitfuncmap[self.taufunc]
if len(self.tau_fits.keys()) > 0:
[self.tau_fits[k].clear() for k in self.tau_fits.keys()]
self.tau_fits = {}
for k, whichdata in enumerate(self.whichdata):
yFit[k] = fitfunc[0](fitPars[k], xFit[k], C=None) # +self.ivbaseline[whichdata]
self.tau_fits[k] = self.data_plot.plot(xFit[k]+self.tauwin[0], yFit[k], pen=pg.mkPen('r', width=2, style=Qt.Qt.DashLine))
def update_Tauh(self, region=None, printWindow=False):
""" compute tau (single exponential) from the onset of the markers
using lrtau window, and only for the step closest to the selected
current level in the GUI window.
Parameters
----------
region : dummy argument, default : None
printWindow : Boolean, default : False
region is a dummy argument...
Also compute the ratio of the sag from the peak (marker1) to the
end of the trace (marker 2).
Based on analysis in Fujino and Oertel, J. Neuroscience 2001,
to type cells based on different Ih kinetics and magnitude.
"""
self.analysis_summary['tauh'] = np.nan
self.analysis_summary['Gh'] = np.nan
if not self.ctrl.IVCurve_showHide_lrtau.isChecked():
return
rgn = self.regions['lrtau']['region'].getRegion()
Func = 'exp1' # single exponential fit to the whole region
Fits = Fitting.Fitting()
initpars = [-80.0 * 1e-3, -10.0 * 1e-3, 50.0 * 1e-3]
# find the current level that is closest to the target current
s_target = self.ctrl.IVCurve_tauh_Commands.currentIndex()
itarget = self.Clamps.values[s_target] # retrive actual value from commands
self.neg_cmd = itarget
idiff = np.abs(np.array(self.Clamps.commandLevels) - itarget)
amin = np.argmin(idiff) # amin appears to be the same as s_target
# target trace (as selected in cmd drop-down list):
target = self.Clamps.traces[amin]
# get Vrmp - # rmp approximation.
vrmp = np.median(target['Time': 0.0:self.Clamps.tstart - 0.005]) * 1000.
self.neg_vrmp = vrmp
# get peak and steady-state voltages
pkRgn = self.regions['lrwin0']['region'].getRegion()
ssRgn = self.regions['lrwin1']['region'].getRegion()
vpk = target['Time': pkRgn[0]:pkRgn[1]].min() * 1000
self.neg_pk = (vpk - vrmp) / 1000.
vss = np.median(target['Time': ssRgn[0]:ssRgn[1]]) * 1000
self.neg_ss = (vss - vrmp) / 1000.
whichdata = [int(amin)]
itaucmd = [self.Clamps.commandLevels[amin]]
self.ctrl.IVCurve_tau2TStart.setValue(rgn[0] * 1.0e3)
self.ctrl.IVCurve_tau2TStop.setValue(rgn[1] * 1.0e3)
fd = self.Clamps.traces['Time': rgn[0]:rgn[1]][whichdata][0]
if len(self.tauh_fitted.keys()) > 0:
[self.tauh_fitted[k].clear() for k in self.tauh_fitted.keys()]
self.tauh_fitted = {}
for k, d in enumerate(whichdata):
self.tauh_fitted[k] = self.data_plot.plot(fd, pen=pg.mkPen('w'))
# now do the fit
whichaxis = 0
(fpar, xf, yf, names) = Fits.FitRegion(whichdata, whichaxis,
self.Clamps.traces.xvals('Time'),
self.Clamps.traces.view(np.ndarray),
dataType='2d',
t0=rgn[0], t1=rgn[1],
fitFunc=Func,
fitPars=initpars)
if not fpar:
raise Exception('IVCurve::update_Tauh: tau_h fitting failed - see log')
bluepen = pg.mkPen('b', width=2.0, style=Qt.Qt.DashLine)
if len(self.tauh_fits.keys()) > 0:
[self.tauh_fits[k].clear() for k in self.tauh_fits.keys()]
self.tauh_fits = {}
self.tauh_fits[0] = self.data_plot.plot(xf[0]+rgn[0], yf[0], pen=bluepen)
# self.tauh_fits.update()
s = np.shape(fpar)
taus = []
for j in range(0, s[0]):
outstr = ""
taus.append(fpar[j][2])
for i in range(0, len(names[j])):
outstr += '%s = %f, ' % (names[j][i], fpar[j][i])
if printWindow:
print("Ih FIT(%d, %.1f pA): %s " %
(whichdata[j], itaucmd[j] * 1e12, outstr))
meantau = np.mean(taus)
self.ctrl.IVCurve_Tauh.setText(u'%8.1f ms' % (meantau * 1.e3))
self.tau2 = meantau
bovera = (vss - vrmp) / (vpk - vrmp)
self.ctrl.IVCurve_Ih_ba.setText('%8.1f' % (bovera * 100.))
self.ctrl.IVCurve_ssAmp.setText('%8.2f' % (vss - vrmp))
self.ctrl.IVCurve_pkAmp.setText('%8.2f' % (vpk - vrmp))
if bovera < 0.55 and self.tau2 < 0.015: #
self.ctrl.IVCurve_FOType.setText('D Stellate')
else:
self.ctrl.IVCurve_FOType.setText('T Stellate')
# estimate of Gh:
Gpk = itarget / self.neg_pk
Gss = itarget / self.neg_ss
self.Gh = Gss - Gpk
self.analysis_summary['tauh'] = self.tau2*1.e3
self.analysis_summary['Gh'] = self.Gh
self.ctrl.IVCurve_Gh.setText('%8.2f nS' % (self.Gh * 1e9))
def update_ssAnalysis(self):
"""
Compute the steady-state IV from the selected time window
Parameters
----------
None.
Returns
-------
nothing.
modifies:
ivss, yleak, ivss_cmd, cmd.
The IV curve is only valid when there are no spikes detected in
the window. The values in the curve are taken as the mean of the
current and the voltage in the time window, at each command step.
We also compute the input resistance.
For voltage clamp data, we can optionally remove the "leak" current.
The resulting curve is plotted.
"""
if self.Clamps.traces is None:
return
rgnss = self.regions['lrwin1']['region'].getRegion()
r1 = rgnss[1]
if rgnss[1] == rgnss[0]:
print('Steady-state regions have no width; using 100 msec. window for ss ')
r1 = rgnss[0] + 0.1
self.ctrl.IVCurve_ssTStart.setValue(rgnss[0] * 1.0e3)
self.ctrl.IVCurve_ssTStop.setValue(r1 * 1.0e3)
data1 = self.Clamps.traces['Time': rgnss[0]:r1]
# print 'data shape: ', data1.shape
if data1.shape[1] == 0 or data1.shape[0] == 1:
return # skip it
self.ivss = []
# check out whether there are spikes in the window that is selected
threshold = self.ctrl.IVCurve_SpikeThreshold.value() * 1e-3
ntr = len(self.Clamps.traces)
if not self.spikes_counted:
print('updatess: spikes not counted yet? ')
self.analyzeSpikes()
# spikecount = np.zeros(ntr)
# for i in range(ntr):
# (spike, spk) = Utility.findspikes(self.Clamps.time_base, self.Clamps.traces[i],
# threshold,
# t0=rgnss[0], t1=r1,
# dt=self.Clamps.sample_interval,
# mode='schmitt',
# interpolate=False,
# debug=False)
# if len(spike) > 0:
# spikecount[i] = len(spike)
# nospk = np.where(spikecount == 0)
# print 'spikes checked'
self.ivss = data1.mean(axis=1) # all traces
if self.ctrl.IVCurve_SubBaseline.isChecked():
self.ivss = self.ivss - self.ivbaseline
if len(self.nospk) >= 1:
# Steady-state IV where there are no spikes
self.ivss = self.ivss[self.nospk]
self.ivss_cmd = self.Clamps.commandLevels[self.nospk]
# self.commandLevels = commands[self.nospk]
# compute Rin from the SS IV:
# this makes the assumption that:
# successive trials are in order (as are commands)
# commands are not repeated...
if len(self.ivss_cmd) > 0 and len(self.ivss) > 0:
self.r_in = np.max(np.diff
(self.ivss) / np.diff(self.ivss_cmd))
self.ctrl.IVCurve_Rin.setText(u'%9.1f M\u03A9'
% (self.r_in * 1.0e-6))
self.analysis_summary['Rin'] = self.r_in*1.0e-6
else:
self.ctrl.IVCurve_Rin.setText(u'No valid points')
self.yleak = np.zeros(len(self.ivss))
if self.ctrl.IVCurve_subLeak.isChecked():
if self.Clamps.data_mode in self.dataModel.ic_modes:
sf = 1e-12
elif self.Clamps.data_mode in self.dataModel.vc_modes:
sf = 1e-3
else:
sf = 1.0
(x, y) = Utility.clipdata(self.ivss, self.ivss_cmd,
self.ctrl.IVCurve_LeakMin.value() * sf,
self.ctrl.IVCurve_LeakMax.value() * sf)
try:
p = np.polyfit(x, y, 1) # linear fit
self.yleak = np.polyval(p, self.ivss_cmd)
self.ivss = self.ivss - self.yleak
except:
raise ValueError('IVCurve Leak subtraction: no valid points to correct')
isort = np.argsort(self.ivss_cmd)
self.ivss_cmd = self.ivss_cmd[isort]
self.ivss = self.ivss[isort]
self.analysis_summary['IV_Curve_ss'] = [self.ivss_cmd, self.ivss]
self.update_IVPlot()
def update_pkAnalysis(self, clear=False, pw=False):
"""
Compute the peak IV (minimum) from the selected window
mode can be 'min', 'max', or 'abs'
Parameters
----------
clear : Boolean, False
pw : Boolean, False
pw is passed to update_taumembrane to control printing.
"""
if self.Clamps.traces is None:
return
mode = self.ctrl.IVCurve_PeakMode.currentText()
rgnpk = self.regions['lrwin0']['region'].getRegion()
self.ctrl.IVCurve_pkTStart.setValue(rgnpk[0] * 1.0e3)
self.ctrl.IVCurve_pkTStop.setValue(rgnpk[1] * 1.0e3)
data2 = self.Clamps.traces['Time': rgnpk[0]:rgnpk[1]]
if data2.shape[1] == 0:
return # skip it - window missed the data
# check out whether there are spikes in the window that is selected
# but only in current clamp
nospk = []
peak_pos = None
if self.Clamps.data_mode in self.dataModel.ic_modes:
threshold = self.ctrl.IVCurve_SpikeThreshold.value() * 1e-3
ntr = len(self.Clamps.traces)
if not self.spikes_counted:
print('update_pkAnalysis: spikes not counted')
self.analyzeSpikes()
spikecount = np.zeros(ntr)
# for i in range(ntr):
# (spike, spk) = Utility.findspikes(self.Clamps.time_base, self.Clamps.traces[i],
# threshold,
# t0=rgnpk[0], t1=rgnpk[1],
# dt=self.Clamps.sample_interval,
# mode='schmitt',
# interpolate=False, debug=False)
# if len(spike) == 0:
# continue
# spikecount[i] = len(spike)
# nospk = np.where(spikecount == 0)
# nospk = np.array(nospk)[0]
if mode == 'Min':
self.ivpk = data2.min(axis=1)
peak_pos = np.argmin(data2, axis=1)
elif mode == 'Max':
self.ivpk = data2.max(axis=1)
peak_pos = np.argmax(data2, axis=1)
elif mode == 'Abs': # find largest regardless of the sign ('minormax')
x1 = data2.min(axis=1)
peak_pos1 = np.argmin(data2, axis=1)
x2 = data2.max(axis=1)
peak_pos2 = np.argmax(data2, axis=1)
self.ivpk = np.zeros(data2.shape[0])
for i in range(data2.shape[0]):
if -x1[i] > x2[i]:
self.ivpk[i] = x1[i]
peak_pos = peak_pos1
else:
self.ivpk[i] = x2[i]
peak_pos = peak_pos2
# self.ivpk = np.array([np.max(x1[i], x2[i]) for i in range(data2.shape[0]])
#self.ivpk = np.maximum(np.fabs(data2.min(axis=1)), data2.max(axis=1))
if self.ctrl.IVCurve_SubBaseline.isChecked():
self.ivpk = self.ivpk - self.ivbaseline
if len(self.nospk) >= 1:
# Peak (min, max or absmax voltage) IV where there are no spikes
self.ivpk = self.ivpk[self.nospk]
self.ivpk_cmd = self.Clamps.commandLevels[self.nospk]
else:
self.ivpk_cmd = self.Clamps.commandLevels
self.ivpk = self.ivpk.view(np.ndarray)
if self.ctrl.IVCurve_subLeak.isChecked():
self.ivpk = self.ivpk - self.yleak
# now sort data in ascending command levels
isort = np.argsort(self.ivpk_cmd)
self.ivpk_cmd = self.ivpk_cmd[isort]
self.ivpk = self.ivpk[isort]
self.analysis_summary['IV_Curve_pk'] = [self.ivpk_cmd, self.ivpk]
self.update_IVPlot()
peak_time = self.Clamps.time_base[peak_pos]
self.update_Tau_membrane(peak_time=peak_time, printWindow=pw)
def update_rmpAnalysis(self, **kwargs):
"""
Compute the RMP over time/commands from the selected window
"""
if self.Clamps.traces is None:
return
rgnrmp = self.regions['lrrmp']['region'].getRegion()
self.ctrl.IVCurve_rmpTStart.setValue(rgnrmp[0] * 1.0e3)
self.ctrl.IVCurve_rmpTStop.setValue(rgnrmp[1] * 1.0e3)
data1 = self.Clamps.traces['Time': rgnrmp[0]:rgnrmp[1]]
data1 = data1.view(np.ndarray)
self.ivbaseline = data1.mean(axis=1) # all traces
self.ivbaseline_cmd = self.Clamps.commandLevels
self.rmp = np.mean(self.ivbaseline) * 1e3 # convert to mV
self.ctrl.IVCurve_vrmp.setText('%8.2f' % self.rmp)
self.update_RMPPlot()
self.analysis_summary['RMP'] = self.rmp
def make_map_symbols(self):
"""
Given the current state of things, (keeping the analysis, when
superimposing multiple results, for example),
sets self.currentSymDict with a dict of pen, fill color, empty color, a symbol from
our lists, and a clearflag. Used to overplot different data.
"""
n = self.keep_analysis_count
pen = next(self.color_list)
filledbrush = pen
emptybrush = None
symbol = next(self.symbol_list)
if n == 0:
clearFlag = True
else:
clearFlag = False
self.currentSymDict = {'pen': pen, 'filledbrush': filledbrush,
'emptybrush': emptybrush, 'symbol': symbol,
'n': n, 'clearFlag': clearFlag}
def map_symbol(self):
cd = self.currentSymDict
if cd['filledbrush'] == 'w':
cd['filledbrush'] = pg.mkBrush((128, 128, 128))
if cd['pen'] == 'w':
cd['pen'] = pg.mkPen((128, 128, 128))
self.lastSymbol = (cd['pen'], cd['filledbrush'],
cd['emptybrush'], cd['symbol'],
cd['n'], cd['clearFlag'])
return self.lastSymbol
def update_IVPlot(self):
"""
Draw the peak and steady-sate IV to the I-V window
Note: x axis is always I or V, y axis V or I
"""
if self.ctrl.IVCurve_KeepAnalysis.isChecked() is False:
self.IV_plot.clear()
(pen, filledbrush, emptybrush, symbol, n, clearFlag) = \
self.map_symbol()
if self.Clamps.data_mode in self.dataModel.ic_modes:
if (len(self.ivss) > 0 and
self.ctrl.IVCurve_showHide_lrss.isChecked()):
self.IV_plot.plot(self.ivss_cmd * 1e12, self.ivss * 1e3,
symbol=symbol, pen=pen,
symbolSize=6, symbolPen=pen,
symbolBrush=filledbrush)
if (len(self.ivpk) > 0 and
self.ctrl.IVCurve_showHide_lrpk.isChecked()):
self.IV_plot.plot(self.ivpk_cmd * 1e12, self.ivpk * 1e3,
symbol=symbol, pen=pen,
symbolSize=6, symbolPen=pen,
symbolBrush=emptybrush)
self.label_up(self.IV_plot, 'I (pA)', 'V (mV)', 'I-V (CC)')
if self.Clamps.data_mode in self.dataModel.vc_modes:
if (len(self.ivss) > 0 and
self.ctrl.IVCurve_showHide_lrss.isChecked()):
self.IV_plot.plot(self.ivss_cmd * 1e3, self.ivss * 1e9,
symbol=symbol, pen=pen,
symbolSize=6, symbolPen=pen,
symbolBrush=filledbrush)
if (len(self.ivpk) > 0 and
self.ctrl.IVCurve_showHide_lrpk.isChecked()):
self.IV_plot.plot(self.ivpk_cmd * 1e3, self.ivpk * 1e9,
symbol=symbol, pen=pen,
symbolSize=6, symbolPen=pen,
symbolBrush=emptybrush)
self.label_up(self.IV_plot, 'V (mV)', 'I (nA)', 'I-V (VC)')
def update_RMPPlot(self):
"""
Draw the RMP to the I-V window
Note: x axis can be I, T, or # spikes
"""
if self.ctrl.IVCurve_KeepAnalysis.isChecked() is False:
self.RMP_plot.clear()
if len(self.ivbaseline) > 0:
(pen, filledbrush, emptybrush, symbol, n, clearFlag) = \
self.map_symbol()
mode = self.ctrl.IVCurve_RMPMode.currentIndex()
if self.Clamps.data_mode in self.dataModel.ic_modes:
sf = 1e3
self.RMP_plot.setLabel('left', 'V mV')
else:
sf = 1e12
self.RMP_plot.setLabel('left', 'I (pA)')
if mode == 0:
self.RMP_plot.plot(self.Clamps.trace_StartTimes, sf * np.array(self.ivbaseline),
symbol=symbol, pen=pen,
symbolSize=6, symbolPen=pen,
symbolBrush=filledbrush)
self.RMP_plot.setLabel('bottom', 'T (s)')
elif mode == 1:
self.RMP_plot.plot(self.Clamps.commandLevels,
1.e3 * np.array(self.ivbaseline), symbolSize=6,
symbol=symbol, pen=pen,
symbolPen=pen, symbolBrush=filledbrush)
self.RMP_plot.setLabel('bottom', 'I (pA)')
elif mode == 2:
self.RMP_plot.plot(self.spikecount,
1.e3 * np.array(self.ivbaseline), symbolSize=6,
symbol=symbol, pen=pen,
symbolPen=pen, symbolBrush=emptybrush)
self.RMP_plot.setLabel('bottom', 'Spikes')
else:
pass
def update_SpikePlots(self):
"""
Draw the spike counts to the FI and FSL windows
Note: x axis can be I, T, or # spikes
"""
if self.Clamps.data_mode in self.dataModel.vc_modes:
self.fiPlot.clear() # no plots of spikes in VC
self.fslPlot.clear()
return
(pen, filledbrush, emptybrush, symbol, n, clearFlag) = self.map_symbol()
mode = self.ctrl.IVCurve_RMPMode.currentIndex() # get x axis mode
self.spcmd = self.Clamps.commandLevels[self.spk] # get command levels iwth spikes
iscale = 1.0e12 # convert to pA
yfslsc = 1.0 # convert to msec
if mode == 0: # plot with time as x axis
xfi = self.Clamps.trace_StartTimes
xfsl = self.Clamps.trace_StartTimes
select = range(len(self.Clamps.trace_StartTimes))
xlabel = 'T (s)'
elif mode == 1: # plot with current as x
select = self.spk
xfi = self.Clamps.commandLevels * iscale
xfsl = self.spcmd * iscale
xlabel = 'I (pA)'
elif mode == 2: # plot with spike counts as x
xfi = self.spikecount
xfsl = self.spikecount
select = range(len(self.spikecount))
xlabel = 'Spikes (N)'
else:
return # mode not in available list
self.fiPlot.plot(x=xfi, y=self.spikecount, clear=clearFlag,
symbolSize=6,
symbol=symbol, pen=pen,
symbolPen=pen, symbolBrush=filledbrush)
fslmax = 0.
if self.showFISI:
self.fslPlot.plot(x=xfsl, y=self.fsl[select] * yfslsc, clear=clearFlag,
symbolSize=6,
symbol=symbol, pen=pen,
symbolPen=pen, symbolBrush=filledbrush)
self.fslPlot.plot(x=xfsl, y=self.fisi[select] * yfslsc, symbolSize=6,
symbol=symbol, pen=pen,
symbolPen=pen, symbolBrush=emptybrush)
if len(xfsl) > 0:
self.fslPlot.setXRange(0.0, np.max(xfsl))
self.fslPlot.setYRange(0., max(max(self.fsl[select]), max(self.fisi[select])))
ylabel = 'Fsl/Fisi (ms)'
xfsllabel = xlabel
self.fslPlot.setTitle('FSL/FISI')
else:
maxspk = 0
maxisi = 0.
clear = clearFlag
for i, k in enumerate(self.allisi.keys()):
nspk = len(self.allisi[k])
xisi = np.arange(nspk)
self.fslPlot.plot(x=xisi, y=self.allisi[k] * yfslsc, clear=clear,
symbolSize=6,
symbol=symbol, pen=pen,
symbolPen=pen, symbolBrush=filledbrush)
clear = False
maxspk = max(nspk, maxspk)
maxisi = max(np.max(self.allisi[k]), maxisi)
self.fslPlot.setXRange(0.0, maxspk)
self.fslPlot.setYRange(0.0, maxisi)
xfsllabel = 'Spike Number'
ylabel = 'ISI (s)'
self.fslPlot.setTitle('ISI vs. Spike Number')
self.fiPlot.setLabel('bottom', xlabel)
self.fslPlot.setLabel('bottom', xfsllabel)
self.fslPlot.setLabel('left', ylabel)
def printAnalysis(self, printnow=True, script_header=True, copytoclipboard=False):
"""
Print the analysis summary information (Cell, protocol, etc)
in a nice formatted version to the terminal.
The output can be copied to another program (excel, prism) for further analysis
Parameters
----------
printnow : Boolean, optional
Set true to print to terminal, default: True
script_header : Boolean, optional
Set to print the header line, default: True
copytoclipboard : Boolean, optional
copy the text to the system clipboard, default: False
Return
------
ltxt : string
The text that would be printed. Might be useful to capture for other purposes
"""
# Dictionary structure: key = information about
if self.Clamps.data_mode in self.dataModel.ic_modes or self.Clamps.data_mode == 'vc':
data_template = self.data_template
else:
data_template = (
OrderedDict([('ElapsedTime', '{:>8.2f}'), ('HoldV', '{:>5.1f}'), ('JP', '{:>5.1f}'),
('Rs', '{:>6.2f}'), ('Cm', '{:>6.1f}'), ('Ru', '{:>6.2f}'),
('Erev', '{:>6.2f}'),
('gsyn_Erev', '{:>9.2f}'), ('gsyn_60', '{:>7.2f}'), ('gsyn_13', '{:>7.2f}'),
# ('p0', '{:6.3e}'), ('p1', '{:6.3e}'), ('p2', '{:6.3e}'), ('p3', '{:6.3e}'),
('I_ionic+', '{:>8.3f}'), ('I_ionic-', '{:>8.3f}'), ('ILeak', '{:>7.3f}'),
('win1Start', '{:>9.3f}'), ('win1End', '{:>7.3f}'),
('win2Start', '{:>9.3f}'), ('win2End', '{:>7.3f}'),
('win0Start', '{:>9.3f}'), ('win0End', '{:>7.3f}'),
]))
# summary table header is written anew for each cell
htxt = ''
if script_header:
htxt = '{:34s}\t{:15s}\t{:24s}\t'.format("Cell", "Genotype", "Protocol")
for k in data_template.keys():
cnv = '{:<%ds}' % (data_template[k][0])
# print 'cnv: ', cnv
htxt += (cnv + '\t').format(k)
script_header = False
htxt += '\n'
ltxt = ''
if 'Genotype' not in self.analysis_summary.keys():
self.analysis_summary['Genotype'] = 'Unknown'
ltxt += '{:34s}\t{:15s}\t{:24s}\t'.format(self.analysis_summary['CellID'], self.analysis_summary['Genotype'], self.analysis_summary['Protocol'])
for a in data_template.keys():
if a in self.analysis_summary.keys():
txt = self.analysis_summary[a]
if a in ['Description', 'Notes']:
txt = txt.replace('\n', ' ').replace('\r', '') # remove line breaks from output, replace \n with space
#print a, data_template[a]
ltxt += (data_template[a][1]).format(txt) + ' \t'
else:
ltxt += ('{:>%ds}' % (data_template[a][0]) + '\t').format('NaN')
ltxt = ltxt.replace('\n', ' ').replace('\r', '') # remove line breaks
ltxt = htxt + ltxt
if printnow:
print(ltxt)
if copytoclipboard:
clipb = Qt.QApplication.clipboard()
clipb.clear(mode=clipb.Clipboard)
clipb.setText(ltxt, mode=clipb.Clipboard)
return ltxt
def dbStoreClicked(self):
"""
Store data into the current database for further analysis
"""
#self.updateAnalysis()
if self.loaded is None:
return
self.dbIdentity = 'IVCurve' # type of data in the database
db = self._host_.dm.currentDatabase()
# print 'dir (db): ', dir(db)
# print 'dir (db.db): ', dir(db.db)
# print 'db.listTables: ', db.listTables()
# print 'db.tables: ', db.tables
#
table = self.dbIdentity
columns = OrderedDict([
# ('ProtocolDir', 'directory:Protocol'),
('AnalysisDate', 'text'),
('ProtocolSequenceDir', 'directory:ProtocolSequence'),
('Dir', 'text'),
('Protocol', 'text'),
('Genotype', 'text'),
('Celltype', 'text'),
('UseData', 'int'),
('RMP', 'real'),
('R_in', 'real'),
('tau_m', 'real'),
('iHold', 'real'),
('PulseDuration', 'real'),
('neg_cmd', 'real'),
('neg_pk', 'real'),
('neg_ss', 'real'),
('h_tau', 'real'),
('h_g', 'real'),
('SpikeThreshold', 'real'),
('AdaptRatio', 'real'),
('FiringRate', 'real'),
('AP1_HalfWidth', 'real'),
('AP1_Latency', 'real'),
('AP2_HalfWidth', 'real'),
('AP2_Latency', 'real'),
('AHP_Depth', 'real'),
('FI_Curve', 'text'),
('IV_Curve_pk', 'text'),
('IV_Curve_ss', 'text'),
])
if table not in db.tables:
db.createTable(table, columns, owner=self.dbIdentity)
try:
z = self.neg_cmd
except:
self.neg_cmd = 0.
self.neg_pk = 0.
self.neg_ss = 0.
self.tau2 = 0.
self.Gh = 0.
if 'Genotype' not in self.analysis_summary:
self.analysis_summary['Genotype'] = 'Unknown'
# print 'genytope: ', self.analysis_summary['Genotype']
if 'Celltype' not in self.Script.analysis_parameters:
self.analysis_summary['Celltype'] = 'Unknown'
data = {
'AnalysisDate': time.strftime("%Y-%m-%d %H:%M:%S"),
'ProtocolSequenceDir': self.loaded,
# 'ProtocolSequenceDir': self.dataModel.getParent(self.loaded, 'ProtocolSequence'),
'Dir': self.loaded.parent().name(),
'Protocol': self.loaded.name(),
'Genotype': self.analysis_summary['Genotype'],
'Celltype': self.Script.analysis_parameters['Celltype'], # uses global info, not per cell info
'UseData' : 1,
'RMP': self.rmp / 1000.,
'R_in': self.r_in,
'tau_m': self.tau,
'iHold': self.analysis_summary['iHold'],
'PulseDuration': self.analysis_summary['pulseDuration'],
'AdaptRatio': self.adapt_ratio,
'neg_cmd': self.neg_cmd,
'neg_pk': self.neg_pk,
'neg_ss': self.neg_ss,
'h_tau': self.analysis_summary['tauh'],
'h_g': self.analysis_summary['Gh'],
'SpikeThreshold': self.analysis_summary['SpikeThreshold'],
'FiringRate': self.analysis_summary['FiringRate'],
'AP1_HalfWidth': self.analysis_summary['AP1_HalfWidth'],
'AP1_Latency': self.analysis_summary['AP1_Latency'],
'AP2_HalfWidth': self.analysis_summary['AP2_HalfWidth'],
'AP2_Latency': self.analysis_summary['AP2_Latency'],
'AHP_Depth': self.analysis_summary['AHP_Depth'],
'FI_Curve': repr(self.analysis_summary['FI_Curve'].tolist()), # convert array to string for storage
'IV_Curve_pk': repr(np.array(self.analysis_summary['IV_Curve_pk']).tolist()),
'IV_Curve_ss': repr(np.array(self.analysis_summary['IV_Curve_ss']).tolist()),
}
## If only one record was given, make it into a list of one record
if isinstance(data, dict):
data = [data]
## Make sure target table exists and has correct columns, links to input file
fields = db.describeData(data)
## override directory fields since describeData can't guess these for us
# fields['ProtocolDir'] = 'directory:Protocol'
fields['ProtocolSequenceDir'] = 'directory:ProtocolSequence'
with db.transaction():
db.checkTable(table, owner=self.dbIdentity, columns=fields, create=True, addUnknownColumns=True, indexes=[['ProtocolSequenceDir'],])
dirtable = db.dirTableName(self.loaded) # set up the DirTable Protocol Sequence directory.
if not db.hasTable(dirtable):
db.createDirTable(self.loaded)
# delete old
for source in set([d['ProtocolSequenceDir'] for d in data]):
db.delete(table, where={'ProtocolSequenceDir': source})
# write new
with pg.ProgressDialog("Storing IV Results..", 0, 100) as dlg:
for n, nmax in db.iterInsert(table, data, chunkSize=30):
dlg.setMaximum(nmax)
dlg.setValue(n)
if dlg.wasCanceled():
raise HelpfulException("Scan store canceled by user.", msgType='status')
#db.close()
#db.open()
print("Updated record for ", self.loaded.name())
# ---- Helpers ----
# Some of these would normally live in a pyqtgraph-related module, but are
# just stuck here to get the job done.
#
@staticmethod
def label_up(plot, xtext, ytext, title):
"""helper to label up the plot"""
plot.setLabel('bottom', xtext)
plot.setLabel('left', ytext)
plot.setTitle(title)
| mit |
marqh/iris | lib/iris/tests/experimental/test_animate.py | 6 | 2918 | # (C) British Crown Copyright 2013 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test the animation of cubes within iris.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import numpy as np
import iris
from iris.coord_systems import GeogCS
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import iris.experimental.animate as animate
import iris.plot as iplt
@tests.skip_plot
class IntegrationTest(tests.GraphicsTest):
def setUp(self):
super(IntegrationTest, self).setUp()
cube = iris.cube.Cube(np.arange(36, dtype=np.int32).reshape((3, 3, 4)))
cs = GeogCS(6371229)
coord = iris.coords.DimCoord(
points=np.array([1, 2, 3], dtype=np.int32), long_name='time')
cube.add_dim_coord(coord, 0)
coord = iris.coords.DimCoord(
points=np.array([-1, 0, 1], dtype=np.int32),
standard_name='latitude',
units='degrees',
coord_system=cs)
cube.add_dim_coord(coord, 1)
coord = iris.coords.DimCoord(
points=np.array([-1, 0, 1, 2], dtype=np.int32),
standard_name='longitude',
units='degrees',
coord_system=cs)
cube.add_dim_coord(coord, 2)
self.cube = cube
def test_cube_animation(self):
# This follows :meth:`~matplotlib.animation.FuncAnimation.save`
# to ensure that each frame corresponds to known accepted frames for
# the animation.
cube_iter = self.cube.slices(('latitude', 'longitude'))
ani = animate.animate(cube_iter, iplt.contourf)
# Disconnect the first draw callback to stop the animation
ani._fig.canvas.mpl_disconnect(ani._first_draw_id)
ani = [ani]
# Extract frame data
for data in zip(*[a.new_saved_frame_seq() for a in ani]):
# Draw each frame
for anim, d in zip(ani, data):
anim._draw_next_frame(d, blit=False)
self.check_graphic()
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
henry-ngo/VIP | vip_hci/negfc/mcmc_sampling.py | 1 | 39691 | #! /usr/bin/env python
"""
Module with the MCMC (``emcee``) sampling for NEGFC parameter estimation.
"""
from __future__ import print_function
__author__ = 'O. Wertz, C. Gomez @ ULg'
__all__ = ['lnprior',
'lnlike',
'mcmc_negfc_sampling',
'chain_zero_truncated',
'show_corner_plot',
'show_walk_plot',
'confidence']
import numpy as np
import os
import emcee
from math import isinf, floor, ceil
import inspect
import datetime
import corner
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib.mlab import normpdf
from scipy.stats import norm
from ..fits import open_adicube, open_fits
from ..phot import cube_inject_companions
from ..conf import time_ini, timing, sep
from .simplex_fmerit import get_values_optimize
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
def lnprior(param, bounds):
"""
Define the prior log-function.
Parameters
----------
param: tuple
The model parameters.
bounds: list
The bounds for each model parameter.
Ex: bounds = [(10,20),(0,360),(0,5000)]
Returns
-------
out: float.
0 -- All the model parameters satisfy the prior
conditions defined here.
-np.inf -- At least one model parameters is out of bounds.
"""
try:
r, theta, flux = param
except TypeError:
print('paraVector must be a tuple, {} was given'.format(type(param)))
try:
r_bounds, theta_bounds, flux_bounds = bounds
except TypeError:
print('bounds must be a list of tuple, {} was given'.format(type(param)))
if r_bounds[0] <= r <= r_bounds[1] and \
theta_bounds[0] <= theta <= theta_bounds[1] and \
flux_bounds[0] <= flux <= flux_bounds[1]:
return 0.0
else:
return -np.inf
def lnlike(param, cube, angs, plsc, psf_norm, fwhm, annulus_width,
ncomp, aperture_radius, initial_state, cube_ref=None, svd_mode='lapack',
scaling='temp-mean', fmerit='sum', collapse='median', debug=False):
""" Define the likelihood log-function.
Parameters
----------
param: tuple
The model parameters, typically (r, theta, flux).
cube: numpy.array
The cube of fits images expressed as a numpy.array.
angs: numpy.array
The parallactic angle fits image expressed as a numpy.array.
plsc: float
The platescale, in arcsec per pixel.
psf_norm: numpy.array
The scaled psf expressed as a numpy.array.
annulus_width: float
The width of the annulus of interest in terms of the FWHM.
ncomp: int
The number of principal components.
fwhm : float
The FHWM in pixels.
aperture_radius: float
The radius of the circular aperture in terms of the FWHM.
initial_state: numpy.array
The initial guess for the position and the flux of the planet.
cube_ref: array_like, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
Switch for different ways of computing the SVD and selected PCs.
scaling : {'temp-mean', 'temp-standard'} or None, optional
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done and with
"temp-standard" temporal mean centering plus scaling to unit variance
is done.
fmerit : {'sum', 'stddev'}, string optional
Chooses the figure of merit to be used. stddev works better for close in
companions sitting on top of speckle noise.
collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
Sets the way of collapsing the frames for producing a final image. If
None then the cube of residuals is used when measuring the function of
merit (instead of a single final frame).
debug: boolean
If True, the cube is returned along with the likelihood log-function.
Returns
-------
out: float
The log of the likelihood.
"""
# Create the cube with the negative fake companion injected
cube_negfc = cube_inject_companions(cube, psf_norm, angs, flevel=-param[2],
plsc=plsc, rad_dists=[param[0]], n_branches=1,
theta=param[1], verbose=False, imlib='opencv')
# Perform PCA and extract the zone of interest
values = get_values_optimize(cube_negfc,angs,ncomp,annulus_width*fwhm,
aperture_radius*fwhm, initial_state[0],
initial_state[1], cube_ref=cube_ref,
svd_mode=svd_mode, scaling=scaling,
collapse=collapse)
# Function of merit
if fmerit=='sum':
lnlikelihood = -0.5 * np.sum(np.abs(values))
elif fmerit=='stddev':
values = values[values!=0]
lnlikelihood = -1*np.std(np.abs(values))
else:
raise RuntimeError('fmerit choice not recognized')
if debug:
return lnlikelihood, cube_negfc
else:
return lnlikelihood
def lnprob(param,bounds, cube, angs, plsc, psf_norm, fwhm,
annulus_width, ncomp, aperture_radius, initial_state, cube_ref=None,
svd_mode='lapack', scaling='temp-mean', fmerit='sum',
collapse='median',display=False):
""" Define the probability log-function as the sum between the prior and
likelihood log-funtions.
Parameters
----------
param: tuple
The model parameters.
bounds: list
The bounds for each model parameter.
Ex: bounds = [(10,20),(0,360),(0,5000)]
cube: numpy.array
The cube of fits images expressed as a numpy.array.
angs: numpy.array
The parallactic angle fits image expressed as a numpy.array.
plsc: float
The platescale, in arcsec per pixel.
psf_norm: numpy.array
The scaled psf expressed as a numpy.array.
fwhm : float
The FHWM in pixels.
annulus_width: float
The width in pixel of the annulus on wich the PCA is performed.
ncomp: int
The number of principal components.
aperture_radius: float
The radius of the circular aperture.
initial_state: numpy.array
The initial guess for the position and the flux of the planet.
cube_ref : array_like, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
Switch for different ways of computing the SVD and selected PCs.
scaling : {'temp-mean', 'temp-standard'} or None, optional
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done and with
"temp-standard" temporal mean centering plus scaling to unit variance
is done.
fmerit : {'sum', 'stddev'}, string optional
Chooses the figure of merit to be used. stddev works better for close in
companions sitting on top of speckle noise.
collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
Sets the way of collapsing the frames for producing a final image. If
None then the cube of residuals is used when measuring the function of
merit (instead of a single final frame).
display: boolean
If True, the cube is displayed with ds9.
Returns
-------
out: float
The probability log-function.
"""
if initial_state is None:
initial_state = param
lp = lnprior(param, bounds)
if isinf(lp):
return -np.inf
return lp + lnlike(param, cube, angs, plsc, psf_norm, fwhm,
annulus_width, ncomp, aperture_radius, initial_state,
cube_ref, svd_mode, scaling, fmerit, collapse, display)
def gelman_rubin(x):
"""
Determine the Gelman-Rubin \hat{R} statistical test between Markov chains.
Parameters
----------
x: numpy.array
The numpy.array on which the Gelman-Rubin test is applied. This array
should contain at least 2 set of data, i.e. x.shape >= (2,).
Returns
-------
out: float
The Gelman-Rubin \hat{R}.
Example
-------
>>> x1 = np.random.normal(0.0,1.0,(1,100))
>>> x2 = np.random.normal(0.1,1.3,(1,100))
>>> x = np.vstack((x1,x2))
>>> gelman_rubin(x)
1.0366629898991262
>>> gelman_rubin(np.vstack((x1,x1)))
0.99
"""
if np.shape(x) < (2,):
raise ValueError(
'Gelman-Rubin diagnostic requires multiple chains of the same length.')
try:
m, n = np.shape(x)
except ValueError:
print("Bad shape for the chains")
return
# Calculate between-chain variance
B_over_n = np.sum((np.mean(x, 1) - np.mean(x)) ** 2) / (m - 1)
# Calculate within-chain variances
W = np.sum([(x[i] - xbar) ** 2 for i, xbar in enumerate(np.mean(x,
1))]) / (m * (n - 1))
# (over) estimate of variance
s2 = W * (n - 1) / n + B_over_n
# Pooled posterior variance estimate
V = s2 + B_over_n / m
# Calculate PSRF
R = V / W
return R
def gelman_rubin_from_chain(chain, burnin):
"""
Pack the MCMC chain and determine the Gelman-Rubin \hat{R} statistical test.
In other words, two sub-sets are extracted from the chain (burnin parts are
taken into account) and the Gelman-Rubin statistical test is performed.
Parameters
----------
chain: numpy.array
The MCMC chain with the shape walkers x steps x model_parameters
burnin: float \in [0,1]
The fraction of a walker which is discarded.
Returns
-------
out: float
The Gelman-Rubin \hat{R}.
"""
dim = chain.shape[2]
k = chain.shape[1]
threshold0 = int(floor(burnin*k))
threshold1 = int(floor((1-burnin)*k*0.25))
rhat = np.zeros(dim)
for j in range(dim):
part1 = chain[:,threshold0:threshold0+threshold1,j].reshape((-1))
part2 = chain[:,threshold0+3*threshold1:threshold0+4*threshold1,j].reshape((-1))
series = np.vstack((part1,part2))
rhat[j] = gelman_rubin(series)
return rhat
def mcmc_negfc_sampling(cubes, angs, psfn, ncomp, plsc, initial_state,
fwhm=4, annulus_width=3, aperture_radius=4, cube_ref=None,
svd_mode='lapack', scaling='temp-mean', fmerit='sum',
collapse='median', nwalkers=1000, bounds=None, a=2.0,
burnin=0.3, rhat_threshold=1.01, rhat_count_threshold=1,
niteration_min=0, niteration_limit=1e02,
niteration_supp=0, check_maxgap=1e04, nproc=1,
output_file=None, display=False, verbose=True, save=False):
""" Runs an affine invariant mcmc sampling algorithm in order to determine
the position and the flux of the planet using the 'Negative Fake Companion'
technique. The result of this procedure is a chain with the samples from the
posterior distributions of each of the 3 parameters.
This technique can be summarized as follows:
1) We inject a negative fake companion (one candidate) at a given
position and characterized by a given flux, both close to the expected
values.
2) We run PCA on an full annulus which pass through the initial guess,
regardless of the position of the candidate.
3) We extract the intensity values of all the pixels contained in a
circular aperture centered on the initial guess.
4) We calculate the function of merit. The associated chi^2 is given by
chi^2 = sum(|I_j|) where j \in {1,...,N} with N the total number of
pixels contained in the circular aperture.
The steps 1) to 4) are looped. At each iteration, the candidate model
parameters are defined by the emcee Affine Invariant algorithm.
Parameters
----------
cubes: str or numpy.array
The relative path to the cube of fits images OR the cube itself.
angs: str or numpy.array
The relative path to the parallactic angle fits image or the angs itself.
psfn: str or numpy.array
The relative path to the instrumental PSF fits image or the PSF itself.
The PSF must be centered and the flux in a 1*FWHM aperture must equal 1.
ncomp: int
The number of principal components.
plsc: float
The platescale, in arcsec per pixel.
annulus_width: float, optional
The width in pixel of the annulus on which the PCA is performed.
aperture_radius: float, optional
The radius of the circular aperture.
nwalkers: int optional
The number of Goodman & Weare 'walkers'.
initial_state: numpy.array
The first guess for the position and flux of the planet, respectively.
Each walker will start in a small ball around this preferred position.
cube_ref : array_like, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
Switch for different ways of computing the SVD and selected PCs.
'randsvd' is not recommended for the negative fake companion technique.
scaling : {'temp-mean', 'temp-standard'} or None, optional
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done and with
"temp-standard" temporal mean centering plus scaling to unit variance
is done.
fmerit : {'sum', 'stddev'}, string optional
Chooses the figure of merit to be used. stddev works better for close in
companions sitting on top of speckle noise.
collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
Sets the way of collapsing the frames for producing a final image. If
None then the cube of residuals is used when measuring the function of
merit (instead of a single final frame).
bounds: numpy.array or list, default=None, optional
The prior knowledge on the model parameters. If None, large bounds will
be automatically estimated from the initial state.
a: float, default=2.0
The proposal scale parameter. See notes.
burnin: float, default=0.3
The fraction of a walker which is discarded.
rhat_threshold: float, default=0.01
The Gelman-Rubin threshold used for the test for nonconvergence.
rhat_count_threshold: int, optional
The Gelman-Rubin test must be satisfied 'rhat_count_threshold' times in
a row before claiming that the chain has converged.
niteration_min: int, optional
Steps per walker lower bound. The simulation will run at least this
number of steps per walker.
niteration_limit: int, optional
Steps per walker upper bound. If the simulation runs up to
'niteration_limit' steps without having reached the convergence
criterion, the run is stopped.
niteration_supp: int, optional
Number of iterations to run after having "reached the convergence".
check_maxgap: int, optional
Maximum number of steps per walker between two Gelman-Rubin test.
nproc: int, optional
The number of processes to use for parallelization.
output_file: str
The name of the ouput file which contains the MCMC results
(if save is True).
display: boolean
If True, the walk plot is displayed at each evaluation of the Gelman-
Rubin test.
verbose: boolean
Display informations in the shell.
save: boolean
If True, the MCMC results are pickled.
Returns
-------
out : numpy.array
The MCMC chain.
Notes
-----
The parameter 'a' must be > 1. For more theoretical information concerning
this parameter, see Goodman & Weare, 2010, Comm. App. Math. Comp. Sci.,
5, 65, Eq. [9] p70.
The parameter 'rhat_threshold' can be a numpy.array with individual
threshold value for each model parameter.
"""
if verbose:
start_time = time_ini()
print(" MCMC sampler for the NEGFC technique ")
print(sep)
# If required, one create the output folder.
if save:
if not os.path.exists('results'):
os.makedirs('results')
if output_file is None:
datetime_today = datetime.datetime.today()
output_file = str(datetime_today.year)+str(datetime_today.month)+\
str(datetime_today.day)+'_'+str(datetime_today.hour)+\
str(datetime_today.minute)+str(datetime_today.second)
if not os.path.exists('results/'+output_file):
os.makedirs('results/'+output_file)
# #########################################################################
# If required, one opens the source files
# #########################################################################
if isinstance(cubes,str) and isinstance(angs,str):
if angs is None:
cubes, angs = open_adicube(cubes, verbose=False)
else:
cubes = open_fits(cubes)
angs = open_fits(angs, verbose=False)
if isinstance(psfn,str):
psfn = open_fits(psfn)
if verbose:
print('The data has been loaded. Let''s continue !')
# #########################################################################
# Initialization of the variables
# #########################################################################
dim = 3 # There are 3 model parameters, resp. the radial and angular
# position of the planet and its flux.
itermin = niteration_min
limit = niteration_limit
supp = niteration_supp
maxgap = check_maxgap
initial_state = np.array(initial_state)
if itermin > limit:
itermin = 0
print("'niteration_min' must be < 'niteration_limit'.")
fraction = 0.3
geom = 0
lastcheck = 0
konvergence = np.inf
rhat_count = 0
chain = np.empty([nwalkers,1,dim])
isamples = np.empty(0)
pos = initial_state + np.random.normal(0,1e-01,(nwalkers,3))
nIterations = limit + supp
rhat = np.zeros(dim)
stop = np.inf
if bounds is None:
bounds = [(initial_state[0]-annulus_width/2.,initial_state[0]+annulus_width/2.), #radius
(initial_state[1]-10,initial_state[1]+10), #angle
(0,2*initial_state[2])] #flux
sampler = emcee.EnsembleSampler(nwalkers,dim,lnprob,a,
args =([bounds, cubes, angs, plsc, psfn,
fwhm, annulus_width, ncomp,
aperture_radius, initial_state,
cube_ref, svd_mode, scaling, fmerit,
collapse]),
threads=nproc)
start = datetime.datetime.now()
# #########################################################################
# Affine Invariant MCMC run
# #########################################################################
if verbose:
print('')
print('Start of the MCMC run ...')
print('Step | Duration/step (sec) | Remaining Estimated Time (sec)')
for k, res in enumerate(sampler.sample(pos,iterations=nIterations,
storechain=True)):
elapsed = (datetime.datetime.now()-start).total_seconds()
if verbose:
if k == 0:
q = 0.5
else:
q = 1
print('{}\t\t{:.5f}\t\t\t{:.5f}'.format(k,elapsed*q,elapsed*(limit-k-1)*q))
start = datetime.datetime.now()
# ---------------------------------------------------------------------
# Store the state manually in order to handle with dynamical sized chain.
# ---------------------------------------------------------------------
## Check if the size of the chain is long enough.
s = chain.shape[1]
if k+1 > s: #if not, one doubles the chain length
empty = np.zeros([nwalkers,2*s,dim])
chain = np.concatenate((chain,empty),axis=1)
## Store the state of the chain
chain[:,k] = res[0]
# ---------------------------------------------------------------------
# If k meets the criterion, one tests the non-convergence.
# ---------------------------------------------------------------------
criterion = np.amin([ceil(itermin*(1+fraction)**geom),\
lastcheck+floor(maxgap)])
if k == criterion:
if verbose:
print('')
print(' Gelman-Rubin statistic test in progress ...')
geom += 1
lastcheck = k
if display:
show_walk_plot(chain)
if save:
import pickle
with open('results/'+output_file+'/'+output_file+'_temp_k{}'.format(k),'wb') as fileSave:
myPickler = pickle.Pickler(fileSave)
myPickler.dump({'chain':sampler.chain,
'lnprob':sampler.lnprobability,
'AR':sampler.acceptance_fraction})
## We only test the rhat if we have reached the minimum number of steps.
if (k+1) >= itermin and konvergence == np.inf:
threshold0 = int(floor(burnin*k))
threshold1 = int(floor((1-burnin)*k*0.25))
# We calculate the rhat for each model parameter.
for j in range(dim):
part1 = chain[:,threshold0:threshold0+threshold1,j].reshape((-1))
part2 = chain[:,threshold0+3*threshold1:threshold0+4*threshold1,j].reshape((-1))
series = np.vstack((part1,part2))
rhat[j] = gelman_rubin(series)
if verbose:
print(' r_hat = {}'.format(rhat))
print(' r_hat <= threshold = {}'.format(rhat <= rhat_threshold))
print('')
# We test the rhat.
if (rhat <= rhat_threshold).all(): #and rhat_count < rhat_count_threshold:
rhat_count += 1
if rhat_count < rhat_count_threshold:
print("Gelman-Rubin test OK {}/{}".format(rhat_count,rhat_count_threshold))
elif rhat_count >= rhat_count_threshold:
print('... ==> convergence reached')
konvergence = k
stop = konvergence + supp
#elif (rhat <= rhat_threshold).all() and rhat_count >= rhat_count_threshold:
# print '... ==> convergence reached'
# konvergence = k
# stop = konvergence + supp
else:
rhat_count = 0
if (k+1) >= stop: #Then we have reached the maximum number of steps for our Markov chain.
print('We break the loop because we have reached convergence')
break
if k == nIterations-1:
print("We have reached the limit number of steps without having converged")
# #########################################################################
# Construction of the independent samples
# #########################################################################
temp = np.where(chain[0,:,0] == 0.0)[0]
if len(temp) != 0:
idxzero = temp[0]
else:
idxzero = chain.shape[1]
idx = np.amin([np.floor(2e05/nwalkers),np.floor(0.1*idxzero)])
if idx == 0:
isamples = chain[:,0:idxzero,:]
else:
isamples = chain[:,idxzero-idx:idxzero,:]
if save:
import pickle
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
input_parameters = {j : values[j] for j in args[1:]}
output = {'isamples':isamples,
'chain': chain_zero_truncated(chain),
'input_parameters': input_parameters,
'AR': sampler.acceptance_fraction,
'lnprobability': sampler.lnprobability}
with open('results/'+output_file+'/MCMC_results','wb') as fileSave:
myPickler = pickle.Pickler(fileSave)
myPickler.dump(output)
print('')
print("The file MCMC_results has been stored in the folder {}".format('results/'+output_file+'/'))
if verbose:
timing(start_time)
return chain_zero_truncated(chain)
def chain_zero_truncated(chain):
"""
Return the Markov chain with the dimension: walkers x steps* x parameters,
where steps* is the last step before having 0 (not yet constructed chain).
Parameters
----------
chain: numpy.array
The MCMC chain.
Returns
-------
out: numpy.array
The truncated MCMC chain, that is to say, the chain which only contains
relevant information.
"""
try:
idxzero = np.where(chain[0,:,0] == 0.0)[0][0]
except:
idxzero = chain.shape[1]
return chain[:,0:idxzero,:]
def show_walk_plot(chain, save=False, **kwargs):
"""
Display or save a figure showing the path of each walker during the MCMC run
Parameters
----------
chain: numpy.array
The Markov chain. The shape of chain must be nwalkers x length x dim.
If a part of the chain is filled with zero values, the method will
discard these steps.
save: boolean, default: False
If True, a pdf file is created.
kwargs:
Additional attributs are passed to the matplotlib plot method.
Returns
-------
Display the figure or create a pdf file named walk_plot.pdf in the working
directory.
"""
temp = np.where(chain[0,:,0] == 0.0)[0]
if len(temp) != 0:
chain = chain[:,:temp[0],:]
labels = kwargs.pop('labels',["$r$",r"$\theta$","$f$"])
fig, axes = plt.subplots(3, 1, sharex=True, figsize=kwargs.pop('figsize',(8,6)))
axes[2].set_xlabel(kwargs.pop('xlabel','step number'))
axes[2].set_xlim(kwargs.pop('xlim',[0,chain.shape[1]]))
color = kwargs.pop('color','k')
alpha = kwargs.pop('alpha',0.4)
for j in range(3):
axes[j].plot(chain[:,:,j].T, color=color,
alpha=alpha,
**kwargs)
axes[j].yaxis.set_major_locator(MaxNLocator(5))
axes[j].set_ylabel(labels[j])
fig.tight_layout(h_pad=0.0)
if save:
plt.savefig('walk_plot.pdf')
plt.close(fig)
else:
plt.show()
def show_corner_plot(chain, burnin=0.5, save=False, **kwargs):
"""
Display or save a figure showing the corner plot (pdfs + correlation plots)
Parameters
----------
chain: numpy.array
The Markov chain. The shape of chain must be nwalkers x length x dim.
If a part of the chain is filled with zero values, the method will
discard these steps.
burnin: float, default: 0
The fraction of a walker we want to discard.
save: boolean, default: False
If True, a pdf file is created.
kwargs:
Additional attributs are passed to the corner.corner() method.
Returns
-------
Display the figure or create a pdf file named walk_plot.pdf in the working
directory.
Raises
------
ImportError
"""
#burnin = kwargs.pop('burnin',0)
try:
temp = np.where(chain[0,:,0] == 0.0)[0]
if len(temp) != 0:
chain = chain[:,:temp[0],:]
length = chain.shape[1]
chain = chain[:,int(np.floor(burnin*(length-1))):length,:].reshape((-1,3))
except IndexError:
pass
if chain.shape[0] == 0:
print("It seems that the chain is empty. Have you already run the MCMC ?")
else:
fig = corner.corner(chain, labels=kwargs.pop('labels',["$r$",r"$\theta$","$f$"]), **kwargs)
if save:
plt.savefig('corner_plot.pdf')
plt.close(fig)
else:
plt.show()
def writeText(document,text):
"""
Write a line of text in a txt file.
Parameters
----------
document: str
The path to the file to append or create.
text: str
The text to write.
Returns
-------
None
"""
with open(document,'a') as fileObject:
if isinstance(text,str):
fileObject.write("%s \n" % text)
elif isinstance(text,tuple):
defFormat = "%s"
for k in range(1,len(text)):
defFormat += "\t %s"
fileObject.write(defFormat % text)
def confidence(isamples, cfd=68.27, bins=100, gaussianFit=False, weights=None,
verbose=True, save=False, **kwargs):
"""
Determine the highly probable value for each model parameter, as well as
the 1-sigma confidence interval.
Parameters
----------
isamples: numpy.array
The independent samples for each model parameter.
cfd: float, optional
The confidence level given in percentage.
bins: int, optional
The number of bins used to sample the posterior distributions.
gaussianFit: boolean, optional
If True, a gaussian fit is performed in order to determine (\mu,\sigma)
weights : (n, ) array_like or None, optional
An array of weights for each sample.
verbose: boolean, optional
Display information in the shell.
save: boolean, optional
If "True", a txt file with the results is saved in the output repository.
kwargs: optional
Additional attributes are passed to the matplotlib hist() method.
Returns
-------
out: tuple
A 2 elements tuple with the highly probable solution and the confidence
interval.
"""
plsc = kwargs.pop('plsc',0.001)
title = kwargs.pop('title',None)
output_file = kwargs.pop('filename','confidence.txt')
try:
l = isamples.shape[1]
except Exception:
l = 1
confidenceInterval = dict()
val_max = dict()
pKey = ['r','theta','f']
if cfd == 100:
cfd = 99.9
#########################################
## Determine the confidence interval ##
#########################################
if gaussianFit:
mu = np.zeros(3)
sigma = np.zeros_like(mu)
if gaussianFit:
fig,ax = plt.subplots(2,3, figsize=(12,8))
else:
fig,ax = plt.subplots(1,3, figsize=(12,4))
for j in range(l):
label_file = ['r','theta','flux']
label = [r'$\Delta r$',r'$\Delta \theta$',r'$\Delta f$']
if gaussianFit:
n, bin_vertices, _ = ax[0][j].hist(isamples[:,j],bins=bins,
weights=weights, histtype='step',
edgecolor='gray')
else:
n, bin_vertices, _ = ax[j].hist(isamples[:,j],bins=bins,
weights=weights, histtype='step',
edgecolor='gray')
bins_width = np.mean(np.diff(bin_vertices))
surface_total = np.sum(np.ones_like(n)*bins_width * n)
n_arg_sort = np.argsort(n)[::-1]
test = 0
pourcentage = 0
for k,jj in enumerate(n_arg_sort):
test = test + bins_width*n[jj]
pourcentage = test/surface_total*100.
if pourcentage > cfd:
if verbose:
print('percentage for {}: {}%'.format(label_file[j],pourcentage))
break
n_arg_min = n_arg_sort[:k].min()
n_arg_max = n_arg_sort[:k+1].max()
if n_arg_min == 0: n_arg_min += 1
if n_arg_max == bins: n_arg_max -= 1
val_max[pKey[j]] = bin_vertices[n_arg_sort[0]]+bins_width/2.
confidenceInterval[pKey[j]] = np.array([bin_vertices[n_arg_min-1],
bin_vertices[n_arg_max+1]]-val_max[pKey[j]])
arg = (isamples[:,j]>=bin_vertices[n_arg_min-1])*(isamples[:,j]<=bin_vertices[n_arg_max+1])
if gaussianFit:
_ = ax[0][j].hist(isamples[arg,j],bins=bin_vertices,
facecolor='gray', edgecolor='darkgray',
histtype='stepfilled', alpha=0.5)
ax[0][j].vlines(val_max[pKey[j]], 0, n[n_arg_sort[0]],
linestyles='dashed', color='red')
ax[0][j].set_xlabel(label[j])
if j==0: ax[0][j].set_ylabel('Counts')
else:
_ = ax[j].hist(isamples[arg,j],bins=bin_vertices, facecolor='gray',
edgecolor='darkgray', histtype='stepfilled',
alpha=0.5)
ax[j].vlines(val_max[pKey[j]], 0, n[n_arg_sort[0]],
linestyles='dashed', color='red')
ax[j].set_xlabel(label[j])
if j==0: ax[j].set_ylabel('Counts')
if gaussianFit:
(mu[j], sigma[j]) = norm.fit(isamples[:,j])
n_fit, bins_fit = np.histogram(isamples[:,j], bins, normed=1,
weights=weights)
_= ax[1][j].hist(isamples[:,j], bins, normed=1, weights=weights,
facecolor='gray', edgecolor='darkgray',
histtype='step')
y = normpdf( bins_fit, mu[j], sigma[j])
ax[1][j].plot(bins_fit, y, 'r--', linewidth=2, alpha=0.7)
ax[1][j].set_xlabel(label[j])
if j==0: ax[1][j].set_ylabel('Counts')
if title is not None:
msg = r"$\mu$ = {:.4f}, $\sigma$ = {:.4f}"
ax[1][j].set_title(title+' '+msg.format(mu[j],sigma[j]),
fontsize=10)
else:
if title is not None:
ax[1].set_title(title, fontsize=10)
if save:
if gaussianFit:
plt.savefig('confi_hist_flux_r_theta_gaussfit.pdf')
else:
plt.savefig('confi_hist_flux_r_theta.pdf')
plt.tight_layout(w_pad=0.001)
if verbose:
print('')
print('Confidence intervals:')
print('r: {} [{},{}]'.format(val_max['r'],
confidenceInterval['r'][0],
confidenceInterval['r'][1]))
print('theta: {} [{},{}]'.format(val_max['theta'],
confidenceInterval['theta'][0],
confidenceInterval['theta'][1]))
print('flux: {} [{},{}]'.format(val_max['f'],
confidenceInterval['f'][0],
confidenceInterval['f'][1]))
if gaussianFit:
print('')
print('Gaussian fit results:')
print('r: {} +-{}'.format(mu[0],sigma[0]))
print('theta: {} +-{}'.format(mu[1],sigma[1]))
print('f: {} +-{}'.format(mu[2],sigma[2]))
##############################################
## Write inference results in a text file ##
##############################################
if save:
try:
fileObject = open(output_file,'r')
except IOError: # if the file doesn't exist, we create it (empty)
answer = 'y'
if answer == 'y':
fileObject = open(output_file,'w')
elif answer == 'n':
msg = "The file has not been created. The object cannot be "
msg += "created neither."
print(msg)
raise IOError("No such file has been found")
else:
msg = "You must choose between 'y' for yes and 'n' for no. The "
msg += "file has not been created. The object cannot be "
msg += "created neither."
print()
raise IOError("No such file has been found")
finally:
fileObject.close()
writeText(output_file,'###########################')
writeText(output_file,'#### INFERENCE TEST ###')
writeText(output_file,'###########################')
writeText(output_file,' ')
writeText(output_file,'Results of the MCMC fit')
writeText(output_file,'----------------------- ')
writeText(output_file,' ')
writeText(output_file,'>> Position and flux of the planet (highly probable):')
writeText(output_file,'{} % confidence interval'.format(cfd))
writeText(output_file,' ')
for i in range(3):
confidenceMax = confidenceInterval[pKey[i]][1]
confidenceMin = -confidenceInterval[pKey[i]][0]
if i == 2:
text = '{}: \t\t\t{:.3f} \t-{:.3f} \t+{:.3f}'
else:
text = '{}: \t\t\t{:.3f} \t\t-{:.3f} \t\t+{:.3f}'
writeText(output_file,text.format(pKey[i],val_max[pKey[i]],
confidenceMin,confidenceMax))
writeText(output_file,' ')
writeText(output_file,'Platescale = {} mas'.format(plsc*1000))
text = '{}: \t\t{:.2f} \t\t-{:.2f} \t\t+{:.2f}'
writeText(output_file,text.format('r (mas)', val_max[pKey[0]]*plsc*1000,
-confidenceInterval[pKey[0]][0]*plsc*1000,
confidenceInterval[pKey[0]][1]*plsc*1000))
if gaussianFit:
return (mu,sigma)
else:
return (val_max,confidenceInterval)
| mit |
yade/trunk | examples/adaptiveintegrator/simple-scene-plot-RungeKuttaCashKarp54.py | 2 | 2589 | #!/usr/bin/python
# Burak ER
# [email protected]
# github.com/burak-er
# Mechanical Engineering Department
# Bursa Technical University
#
# -*- coding: utf-8 -*-
from __future__ import print_function
import matplotlib
matplotlib.use('TkAgg')
# Use an integrator engine that is derived from the interface Integrator.
#RungeKuttaCashKarp54Integrator integrator performs one step of simulation for the given tolerances. Whether the time step is given, it completes it then stops.
integrator=RungeKuttaCashKarp54Integrator([
ForceResetter(),
GeneralIntegratorInsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Box_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Box_Sphere_ScGeom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom_FrictPhys_CundallStrack()]
),
GravityEngine(gravity=Vector3(0,0,-9.81)),
PyRunner(virtPeriod=1e-99,command='myAddPlotData()')#use virtPeriod on this integrator.
]);
#Tolerances can be set for the optimum accuracy
integrator.rel_err=1e-6;
integrator.abs_err=1e-6;
O.engines=[integrator,
]
O.bodies.append(box(center=[0,0,0],extents=[.5,.5,.5],fixed=True,color=[1,0,0]))
O.bodies.append(sphere([0,0,2],1,color=[0,1,0]))
O.dt=1e-2# this signifies the endpoint. It is not much important for the accuracy of the integration where accuracy is defined by rel_err and abs_err of the integrator.
############################################
##### now the part pertaining to plots #####
############################################
from yade import plot
## we will have 2 plots:
## 1. t as function of i (joke test function)
## 2. i as function of t on left y-axis ('|||' makes the separation) and z_sph, v_sph (as green circles connected with line) and z_sph_half again as function of t
plot.plots={'i':('t'),'t':('z_sph',None,('v_sph','go-'),'z_sph_half')}
## this function is called by plotDataCollector
## it should add data with the labels that we will plot
## if a datum is not specified (but exists), it will be NaN and will not be plotted
def myAddPlotData():
sph=O.bodies[1]
## store some numbers under some labels
plot.addData(t=O.time,i=O.iter,z_sph=sph.state.pos[2],z_sph_half=.5*sph.state.pos[2],v_sph=sph.state.vel.norm())
print("Now calling plot.plot() to show the figures. The timestep is artificially low so that you can watch graphs being updated live.")
plot.liveInterval=.2
plot.plot(subPlots=False)
print("Number of threads ", os.environ['OMP_NUM_THREADS'])
O.run(int(5./O.dt));
#plot.saveGnuplot('/tmp/a')
## you can also access the data in plot.data['i'], plot.data['t'] etc, under the labels they were saved.
| gpl-2.0 |
elizabethswann/RR_fitter | codes/emcee/examples/line.py | 2 | 4516 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import emcee
import corner
import numpy as np
import scipy.optimize as op
import matplotlib.pyplot as pl
from matplotlib.ticker import MaxNLocator
# Reproducible results!
np.random.seed(123)
# Choose the "true" parameters.
m_true = -0.9594
b_true = 4.294
f_true = 0.534
# Generate some synthetic data from the model.
N = 50
x = np.sort(10*np.random.rand(N))
yerr = 0.1+0.5*np.random.rand(N)
y = m_true*x+b_true
y += np.abs(f_true*y) * np.random.randn(N)
y += yerr * np.random.randn(N)
# Plot the dataset and the true model.
xl = np.array([0, 10])
pl.errorbar(x, y, yerr=yerr, fmt=".k")
pl.plot(xl, m_true*xl+b_true, "k", lw=3, alpha=0.6)
pl.ylim(-9, 9)
pl.xlabel("$x$")
pl.ylabel("$y$")
pl.tight_layout()
pl.savefig("line-data.png")
# Do the least-squares fit and compute the uncertainties.
A = np.vstack((np.ones_like(x), x)).T
C = np.diag(yerr * yerr)
cov = np.linalg.inv(np.dot(A.T, np.linalg.solve(C, A)))
b_ls, m_ls = np.dot(cov, np.dot(A.T, np.linalg.solve(C, y)))
print("""Least-squares results:
m = {0} ± {1} (truth: {2})
b = {3} ± {4} (truth: {5})
""".format(m_ls, np.sqrt(cov[1, 1]), m_true, b_ls, np.sqrt(cov[0, 0]), b_true))
# Plot the least-squares result.
pl.plot(xl, m_ls*xl+b_ls, "--k")
pl.savefig("line-least-squares.png")
# Define the probability function as likelihood * prior.
def lnprior(theta):
m, b, lnf = theta
if -5.0 < m < 0.5 and 0.0 < b < 10.0 and -10.0 < lnf < 1.0:
return 0.0
return -np.inf
def lnlike(theta, x, y, yerr):
m, b, lnf = theta
model = m * x + b
inv_sigma2 = 1.0/(yerr**2 + model**2*np.exp(2*lnf))
return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2)))
def lnprob(theta, x, y, yerr):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
# Find the maximum likelihood value.
chi2 = lambda *args: -2 * lnlike(*args)
result = op.minimize(chi2, [m_true, b_true, np.log(f_true)], args=(x, y, yerr))
m_ml, b_ml, lnf_ml = result["x"]
print("""Maximum likelihood result:
m = {0} (truth: {1})
b = {2} (truth: {3})
f = {4} (truth: {5})
""".format(m_ml, m_true, b_ml, b_true, np.exp(lnf_ml), f_true))
# Plot the maximum likelihood result.
pl.plot(xl, m_ml*xl+b_ml, "k", lw=2)
pl.savefig("line-max-likelihood.png")
# Set up the sampler.
ndim, nwalkers = 3, 100
pos = [result["x"] + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(x, y, yerr))
# Clear and run the production chain.
print("Running MCMC...")
sampler.run_mcmc(pos, 500, rstate0=np.random.get_state())
print("Done.")
pl.clf()
fig, axes = pl.subplots(3, 1, sharex=True, figsize=(8, 9))
axes[0].plot(sampler.chain[:, :, 0].T, color="k", alpha=0.4)
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].axhline(m_true, color="#888888", lw=2)
axes[0].set_ylabel("$m$")
axes[1].plot(sampler.chain[:, :, 1].T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].axhline(b_true, color="#888888", lw=2)
axes[1].set_ylabel("$b$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].axhline(f_true, color="#888888", lw=2)
axes[2].set_ylabel("$f$")
axes[2].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig("line-time.png")
# Make the triangle plot.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
fig = corner.corner(samples, labels=["$m$", "$b$", "$\ln\,f$"],
truths=[m_true, b_true, np.log(f_true)])
fig.savefig("line-triangle.png")
# Plot some samples onto the data.
pl.figure()
for m, b, lnf in samples[np.random.randint(len(samples), size=100)]:
pl.plot(xl, m*xl+b, color="k", alpha=0.1)
pl.plot(xl, m_true*xl+b_true, color="r", lw=2, alpha=0.8)
pl.errorbar(x, y, yerr=yerr, fmt=".k")
pl.ylim(-9, 9)
pl.xlabel("$x$")
pl.ylabel("$y$")
pl.tight_layout()
pl.savefig("line-mcmc.png")
# Compute the quantiles.
samples[:, 2] = np.exp(samples[:, 2])
m_mcmc, b_mcmc, f_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print("""MCMC result:
m = {0[0]} +{0[1]} -{0[2]} (truth: {1})
b = {2[0]} +{2[1]} -{2[2]} (truth: {3})
f = {4[0]} +{4[1]} -{4[2]} (truth: {5})
""".format(m_mcmc, m_true, b_mcmc, b_true, f_mcmc, f_true))
| mit |
zodiacnan/Masterarbeit | moduls/results/psinew.py | 1 | 1051 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 07 09:53:26 2017
@author: DINGNAN
"""
#Psid_q
import matplotlib.pyplot as plt
import pandas as pd
file_1 = "C:\\Users\\DINGNAN\\Desktop\\NanDing\\machines\\res\\Flux_d.dat"
file_2 = "C:\\Users\\DINGNAN\\Desktop\\NanDing\\machines\\res\\Flux_q.dat"
with open(file_1, "r") as fig:
x = []
count = 0
y = []
for line in fig:
count+=1
if count%2 == 0:
data = line.split()
x.append((float(data[0])*float(3000)))
y.append((float(data[1])))
else:
pass
with open(file_2, "r") as fig:
n = []
count = 0
for line in fig:
count+=1
if count%2 == 0:
data = line.split()
n.append((float(data[1])))
else:
pass
plt.figure(figsize = (16,9),dpi=98)
plt.plot(x,y,'red',label = 'Flux_d')
plt.plot(x,n,'blue',label = 'Flux_q')
plt.xlabel('Rotor Position -- [deg]')
plt.ylabel('Psi_d/q')
plt.grid(True)
plt.legend()
plt.savefig('Fluxdq.png')
plt.show()
| gpl-3.0 |
MalkIPP/openfisca-france-data | openfisca_france_data/scripts/read_dbf.py | 5 | 3205 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# This file is inspired by code found here
# https://github.com/GeoDaSandbox/sandbox/blob/master/pyGDsandbox/dataIO.py
# The aformementionned code is distributed under the following license
# Copyright (c) 2007-2011, GeoDa Center for Geospatial Analysis and Computation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the GeoDa Center for Geospatial Analysis and Computation
# nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import pysal as ps
import pandas as pd
def read_dbf(dbf_path, index = None, cols = False, incl_index = False):
'''
Read a dbf file as a pandas.DataFrame, optionally selecting the index
variable and which columns are to be loaded.
__author__ = "Dani Arribas-Bel <[email protected]> "
...
Arguments
---------
dbf_path : str
Path to the DBF file to be read
index : str
Name of the column to be used as the index of the DataFrame
cols : list
List with the names of the columns to be read into the
DataFrame. Defaults to False, which reads the whole dbf
incl_index : Boolean
If True index is included in the DataFrame as a
column too. Defaults to False
Returns
-------
df : DataFrame
pandas.DataFrame object created
'''
db = ps.open(dbf_path)
if cols:
if incl_index:
cols.append(index)
vars_to_read = cols
else:
vars_to_read = db.header
data = dict([(var, db.by_col(var)) for var in vars_to_read])
if index:
index = db.by_col(index)
db.close()
return pd.DataFrame(data, index=index)
else:
db.close()
return pd.DataFrame(data)
| agpl-3.0 |
bikong2/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
RachitKansal/scikit-learn | sklearn/ensemble/weight_boosting.py | 71 | 40664 | """Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype)
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| bsd-3-clause |
alexandrebarachant/mne-python | mne/viz/tests/test_circle.py | 16 | 5177 | # Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
#
# License: Simplified BSD
import numpy as np
from numpy.testing import assert_raises
from mne.viz import plot_connectivity_circle, circular_layout
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
def test_plot_connectivity_circle():
"""Test plotting connectivity circle
"""
import matplotlib.pyplot as plt
node_order = ['frontalpole-lh', 'parsorbitalis-lh',
'lateralorbitofrontal-lh', 'rostralmiddlefrontal-lh',
'medialorbitofrontal-lh', 'parstriangularis-lh',
'rostralanteriorcingulate-lh', 'temporalpole-lh',
'parsopercularis-lh', 'caudalanteriorcingulate-lh',
'entorhinal-lh', 'superiorfrontal-lh', 'insula-lh',
'caudalmiddlefrontal-lh', 'superiortemporal-lh',
'parahippocampal-lh', 'middletemporal-lh',
'inferiortemporal-lh', 'precentral-lh',
'transversetemporal-lh', 'posteriorcingulate-lh',
'fusiform-lh', 'postcentral-lh', 'bankssts-lh',
'supramarginal-lh', 'isthmuscingulate-lh', 'paracentral-lh',
'lingual-lh', 'precuneus-lh', 'inferiorparietal-lh',
'superiorparietal-lh', 'pericalcarine-lh',
'lateraloccipital-lh', 'cuneus-lh', 'cuneus-rh',
'lateraloccipital-rh', 'pericalcarine-rh',
'superiorparietal-rh', 'inferiorparietal-rh', 'precuneus-rh',
'lingual-rh', 'paracentral-rh', 'isthmuscingulate-rh',
'supramarginal-rh', 'bankssts-rh', 'postcentral-rh',
'fusiform-rh', 'posteriorcingulate-rh',
'transversetemporal-rh', 'precentral-rh',
'inferiortemporal-rh', 'middletemporal-rh',
'parahippocampal-rh', 'superiortemporal-rh',
'caudalmiddlefrontal-rh', 'insula-rh', 'superiorfrontal-rh',
'entorhinal-rh', 'caudalanteriorcingulate-rh',
'parsopercularis-rh', 'temporalpole-rh',
'rostralanteriorcingulate-rh', 'parstriangularis-rh',
'medialorbitofrontal-rh', 'rostralmiddlefrontal-rh',
'lateralorbitofrontal-rh', 'parsorbitalis-rh',
'frontalpole-rh']
label_names = ['bankssts-lh', 'bankssts-rh', 'caudalanteriorcingulate-lh',
'caudalanteriorcingulate-rh', 'caudalmiddlefrontal-lh',
'caudalmiddlefrontal-rh', 'cuneus-lh', 'cuneus-rh',
'entorhinal-lh', 'entorhinal-rh', 'frontalpole-lh',
'frontalpole-rh', 'fusiform-lh', 'fusiform-rh',
'inferiorparietal-lh', 'inferiorparietal-rh',
'inferiortemporal-lh', 'inferiortemporal-rh', 'insula-lh',
'insula-rh', 'isthmuscingulate-lh', 'isthmuscingulate-rh',
'lateraloccipital-lh', 'lateraloccipital-rh',
'lateralorbitofrontal-lh', 'lateralorbitofrontal-rh',
'lingual-lh', 'lingual-rh', 'medialorbitofrontal-lh',
'medialorbitofrontal-rh', 'middletemporal-lh',
'middletemporal-rh', 'paracentral-lh', 'paracentral-rh',
'parahippocampal-lh', 'parahippocampal-rh',
'parsopercularis-lh', 'parsopercularis-rh',
'parsorbitalis-lh', 'parsorbitalis-rh',
'parstriangularis-lh', 'parstriangularis-rh',
'pericalcarine-lh', 'pericalcarine-rh', 'postcentral-lh',
'postcentral-rh', 'posteriorcingulate-lh',
'posteriorcingulate-rh', 'precentral-lh', 'precentral-rh',
'precuneus-lh', 'precuneus-rh',
'rostralanteriorcingulate-lh',
'rostralanteriorcingulate-rh', 'rostralmiddlefrontal-lh',
'rostralmiddlefrontal-rh', 'superiorfrontal-lh',
'superiorfrontal-rh', 'superiorparietal-lh',
'superiorparietal-rh', 'superiortemporal-lh',
'superiortemporal-rh', 'supramarginal-lh',
'supramarginal-rh', 'temporalpole-lh', 'temporalpole-rh',
'transversetemporal-lh', 'transversetemporal-rh']
group_boundaries = [0, len(label_names) / 2]
node_angles = circular_layout(label_names, node_order, start_pos=90,
group_boundaries=group_boundaries)
con = np.random.RandomState(0).randn(68, 68)
plot_connectivity_circle(con, label_names, n_lines=300,
node_angles=node_angles, title='test',
)
assert_raises(ValueError, circular_layout, label_names, node_order,
group_boundaries=[-1])
assert_raises(ValueError, circular_layout, label_names, node_order,
group_boundaries=[20, 0])
plt.close('all')
| bsd-3-clause |
daodaoliang/neural-network-animation | video.py | 3 | 1722 | import parameters
from matplotlib import pyplot, animation, rcParams
def generate_writer():
FFMpegWriter = animation.writers['ffmpeg']
writer = FFMpegWriter(fps=parameters.frames_per_second, metadata=parameters.metadata)
fig = pyplot.figure()
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)
pyplot.xlim(0, parameters.width)
pyplot.ylim(0, parameters.height)
axis = pyplot.gca()
axis.set_axis_bgcolor('black')
axis.axes.get_xaxis().set_visible(False)
axis.axes.get_yaxis().set_visible(False)
rcParams['font.size'] = 12
rcParams['text.color'] = 'white'
return fig, writer
def annotate_frame(i, e, average_error, example):
pyplot.text(1, parameters.height - 1, "Iteration #" + str(i))
pyplot.text(1, parameters.height - 2, "Training example #" + str(e + 1))
pyplot.text(1, parameters.output_y_position, "Desired output:")
pyplot.text(1, parameters.output_y_position - 1, str(example.output))
pyplot.text(1, parameters.bottom_margin + 1, "Inputs:")
pyplot.text(1, parameters.bottom_margin, str(example.inputs))
if average_error:
error_bar(average_error)
def error_bar(average_error):
pyplot.text(parameters.error_bar_x_position, parameters.height - 1, "Average Error " + str(average_error) + "%")
border = pyplot.Rectangle((parameters.error_bar_x_position, parameters.height - 3), 10, 1, color='white', fill=False)
pyplot.gca().add_patch(border)
rectangle = pyplot.Rectangle((parameters.error_bar_x_position, parameters.height - 3), 10 * average_error / 100, 1, color='red')
pyplot.gca().add_patch(rectangle)
def take_still(image_file_name):
pyplot.savefig(image_file_name)
| mit |
pompiduskus/scikit-learn | sklearn/manifold/locally_linear.py | 206 | 25061 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
hsaputra/DDF | python/tests/test_ml.py | 3 | 1794 | from __future__ import unicode_literals
import unittest
import pandas as pd
from py4j.java_gateway import Py4JJavaError
import test_base
from ddf import ml
class TestMl(test_base.BaseTest):
"""
Test ML functions
"""
def testKmeans(self):
model = ml.kmeans(self.mtcars, 2, 5, 10)
self.assertIsInstance(model, ml.KMeansModel)
self.assertIsInstance(model.centers, pd.DataFrame)
self.assertEqual(len(model.centers), 2)
self.assertItemsEqual(model.centers.columns.tolist(), self.mtcars.colnames)
self.assertIsInstance(model.predict(range(0, self.mtcars.ncol)), float)
with self.assertRaises(Py4JJavaError):
model.predict([0, 1, 2])
def testLinearRegression(self):
model = ml.linear_regression_gd(self.mtcars, 0.1, 0.1, 10)
self.assertIsInstance(model, ml.LinearRegressionModel)
self.assertIsInstance(model.weights, pd.DataFrame)
self.assertEqual(len(model.weights), 1)
self.assertEqual(len(model.weights.columns), self.mtcars.ncol)
self.assertIsInstance(model.predict(range(0, self.mtcars.ncol - 1)), float)
with self.assertRaises(Py4JJavaError):
model.predict([0, 1, 2])
def testLogisticRegression(self):
model = ml.logistic_regression_gd(self.mtcars, 0.1, 10)
self.assertIsInstance(model, ml.LogisticRegressionModel)
self.assertIsInstance(model.weights, pd.DataFrame)
self.assertEqual(len(model.weights), 1)
self.assertEqual(len(model.weights.columns), self.mtcars.ncol)
self.assertIsInstance(model.predict(range(0, self.mtcars.ncol - 1)), float)
with self.assertRaises(Py4JJavaError):
model.predict([0, 1, 2])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
mfjb/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
rxa254/VibrationsAndWaves | Simulations/plotInterference.py | 1 | 3479 | #!/usr/bin/env python
from __future__ import division
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 19 11:09:27 2016
https://github.com/michellejw/Interference_demo
@author: michw
Modified by rxa254, 21-Nov-2017
"""
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
from timeit import default_timer as timer
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('Nsources', metavar='N', type=int, nargs='+',
help='the number of radiating sources')
args = parser.parse_args()
makeimage = 1
makevideo = 1
f = 5000 # frequency (Hz)
c = 1500 # sound speed (m/s)
l = c / f # wavelength (m)
dx = l * 15 # element separation (m)
n = args.Nsources[0] # number of elements
k = 2 * np.pi*f/c # wavenumber
# Set acoustic element coordinates
sy = np.zeros(n)
sx = np.arange(0, dx*n, dx)
# Construct background coordinates
res = 0.01
xmin = -3
xmax = np.max(sx)+3
#xmax = 3
ymin = -6
ymax = 0
xvals = np.arange(xmin, xmax, res)
yvals = np.arange(ymin, ymax, res)
gridx, gridy = np.meshgrid(xvals,yvals)
# Compute ranges from each element to each grid cell
ranges0 = []
for sdex in np.arange(n):
ranges0.append(np.sqrt(((sx[sdex]-gridx)**2) + ((sy[sdex]-gridy)**2)))
ranges = np.array(ranges0)
# Compute summed complex amplitude at each grid cell
ampsum = sum(np.exp(1j*(k*ranges))/ranges)
ampsum = np.flipud(ampsum)
# Compute summed absolute amplitude at each grid cell
memap = 'inferno'
if makeimage:
plt.figure(121)
plt.clf()
plt.imshow(np.real(ampsum),
extent = [min(xvals),max(xvals),
min(yvals),max(yvals)],
vmin = -1, vmax = 1, cmap = memap)
plt.scatter(sx,sy,
s=5, c='k', edgecolor='black', linewidth=4, marker='s')
plt.autoscale(tight = True)
plt.gca().set_aspect('equal', adjustable='box')
plt.xlabel('horizontal distance [m]')
plt.ylabel('depth [m]')
# Save figure
filename = 'IMGintpattern_f' + str(np.round(f)) + '_n' + str(n) + '.png'
print "Saving Image..."
plt.savefig(filename)
print "Done."
if makevideo:
start = timer()
# Create a movie by looping through angular steps
FFMpegWriter = manimation.writers['ffmpeg']
metadata = dict(title='Movie Test', artist='Matplotlib',
comment='Get your popcorn!')
writer = FFMpegWriter(fps = 15, metadata=metadata)
tsteps = 50
T = np.linspace(0, 2*np.pi, tsteps)
T = T[0:-1]
reps = 10
T = np.tile(T, reps)
T = np.flipud(T)
#
fig = plt.figure(221)
moviename = 'intpattern_f' + str(np.round(f)) + '_n' + str(n) + '.mp4'
print("making the movie...")
with writer.saving(fig, moviename, 300):
for tdex in np.arange(len(T)):
ampsum2 = np.real(ampsum*np.exp(1j*T[tdex]))
plt.clf()
plt.imshow(ampsum2,
extent = [min(xvals),max(xvals),
min(yvals),max(yvals)],
vmin = -1, vmax = 1,cmap = memap)
plt.scatter(sx, sy,
s=120, c='w', edgecolor='black', linewidth=4)
plt.autoscale(tight = True)
plt.xlabel('horizontal distance [m]')
plt.ylabel('depth [m]')
writer.grab_frame()
end = timer()
print(str(round(end-start, 1)) + " sec to make the movie.")
| mit |
lispc/Paddle | python/paddle/utils/plotcurve.py | 18 | 5166 | #!/usr/bin/python
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plot training and testing curve from paddle log.
It takes input from a file or stdin, and output to a file or stdout.
Note: must have numpy and matplotlib installed in order to use this tool.
usage: Plot training and testing curves from paddle log file.
[-h] [-i INPUT] [-o OUTPUT] [--format FORMAT] [key [key ...]]
positional arguments:
key keys of scores to plot, the default will be AvgCost
optional arguments:
-h, --help show this help message and exit
-i INPUT, --input INPUT
input filename of paddle log, default will be standard
input
-o OUTPUT, --output OUTPUT
output filename of figure, default will be standard
output
--format FORMAT figure format(png|pdf|ps|eps|svg)
The keys must be in the order of paddle output(!!!).
For example, paddle.INFO contrains the following log
I0406 21:26:21.325584 3832 Trainer.cpp:601] Pass=0 Batch=7771 AvgCost=0.624935 Eval: error=0.260972
To use this script to generate plot for AvgCost, error:
python plotcurve.py -i paddle.INFO -o figure.png AvgCost error
"""
import sys
import matplotlib
# the following line is added immediately after import matplotlib
# and before import pylot. The purpose is to ensure the plotting
# works even under remote login (i.e. headless display)
matplotlib.use('Agg')
from matplotlib import cm
import matplotlib.pyplot as pyplot
import numpy
import argparse
import re
import os
def plot_paddle_curve(keys, inputfile, outputfile, format='png',
show_fig=False):
"""Plot curves from paddle log and save to outputfile.
:param keys: a list of strings to be plotted, e.g. AvgCost
:param inputfile: a file object for input
:param outputfile: a file object for output
:return: None
"""
pass_pattern = r"Pass=([0-9]*)"
test_pattern = r"Test samples=([0-9]*)"
if not keys:
keys = ['AvgCost']
for k in keys:
pass_pattern += r".*?%s=([0-9e\-\.]*)" % k
test_pattern += r".*?%s=([0-9e\-\.]*)" % k
data = []
test_data = []
compiled_pattern = re.compile(pass_pattern)
compiled_test_pattern = re.compile(test_pattern)
for line in inputfile:
found = compiled_pattern.search(line)
found_test = compiled_test_pattern.search(line)
if found:
data.append([float(x) for x in found.groups()])
if found_test:
test_data.append([float(x) for x in found_test.groups()])
x = numpy.array(data)
x_test = numpy.array(test_data)
if x.shape[0] <= 0:
sys.stderr.write("No data to plot. Exiting!\n")
return
m = len(keys) + 1
for i in xrange(1, m):
pyplot.plot(
x[:, 0],
x[:, i],
color=cm.jet(1.0 * (i - 1) / (2 * m)),
label=keys[i - 1])
if (x_test.shape[0] > 0):
pyplot.plot(
x[:, 0],
x_test[:, i],
color=cm.jet(1.0 - 1.0 * (i - 1) / (2 * m)),
label="Test " + keys[i - 1])
pyplot.xlabel('number of epoch')
pyplot.legend(loc='best')
if show_fig:
pyplot.show()
pyplot.savefig(outputfile, bbox_inches='tight')
pyplot.clf()
def main(argv):
"""
main method of plotting curves.
"""
cmdparser = argparse.ArgumentParser(
"Plot training and testing curves from paddle log file.")
cmdparser.add_argument(
'key', nargs='*', help='keys of scores to plot, the default is AvgCost')
cmdparser.add_argument(
'-i',
'--input',
help='input filename of paddle log, '
'default will be standard input')
cmdparser.add_argument(
'-o',
'--output',
help='output filename of figure, '
'default will be standard output')
cmdparser.add_argument('--format', help='figure format(png|pdf|ps|eps|svg)')
args = cmdparser.parse_args(argv)
keys = args.key
if args.input:
inputfile = open(args.input)
else:
inputfile = sys.stdin
format = args.format
if args.output:
outputfile = open(args.output, 'wb')
if not format:
format = os.path.splitext(args.output)[1]
if not format:
format = 'png'
else:
outputfile = sys.stdout
plot_paddle_curve(keys, inputfile, outputfile, format)
inputfile.close()
outputfile.close()
if __name__ == "__main__":
main(sys.argv[1:])
| apache-2.0 |
jpinedaf/pyspeckit | pyspeckit/spectrum/models/ammonia.py | 4 | 54171 | """
========================================
Ammonia inversion transition TROT fitter
========================================
Ammonia inversion transition TROT fitter translated from Erik Rosolowsky's
https://github.com/low-sky/nh3fit
.. moduleauthor:: Adam Ginsburg <[email protected]>
Module API
^^^^^^^^^^
"""
from __future__ import division
import numpy as np
from ...mpfit import mpfit
from ...spectrum.parinfo import ParinfoList,Parinfo
from . import fitter
from . import model
import matplotlib.cbook as mpcb
import copy
from astropy import log
from six import iteritems
from . import mpfit_messages
import operator
import string
import warnings
from .ammonia_constants import (line_names, freq_dict, aval_dict, ortho_dict,
voff_lines_dict, tau_wts_dict)
TCMB = 2.7315 # K
def ammonia(xarr, trot=20, tex=None, ntot=14, width=1, xoff_v=0.0, fortho=0.0,
tau=None, fillingfraction=None, return_tau=False,
return_tau_profile=False, background_tb=TCMB, verbose=False,
return_components=False, debug=False, line_names=line_names,
ignore_neg_models=False):
"""
Generate a model Ammonia spectrum based on input temperatures, column, and
gaussian parameters. The returned model will be in Kelvin (brightness
temperature) units.
Note that astropy units are not used internally for performance reasons. A
wrapped version of this module including those units would be a good idea,
as it is definitely possible to implement this with unit support and good
performance.
Parameters
----------
xarr: `pyspeckit.spectrum.units.SpectroscopicAxis`
Array of wavelength/frequency values
trot: float
The rotational temperature of the lines. This is the excitation
temperature that governs the relative populations of the rotational
states.
tex: float or None
Excitation temperature. Assumed LTE if unspecified (``None``) or if
tex>trot. This is the excitation temperature for *all* of the modeled
lines, which means we are explicitly assuming T_ex is the same for all
lines.
ntot: float
Total log column density of NH3. Can be specified as a float in the
range 5-25
width: float
Line width (Gaussian sigma) in km/s
xoff_v: float
Line offset in km/s
fortho: float
Fraction of NH3 molecules in ortho state. Default assumes all para
(fortho=0).
tau: None or float
If tau (optical depth in the 1-1 line) is specified, ntot is NOT fit
but is set to a fixed value. The optical depths of the other lines are
fixed relative to tau_oneone
fillingfraction: None or float
fillingfraction is an arbitrary scaling factor to apply to the model
return_tau: bool
Return a dictionary of the optical depths in each line instead of a
synthetic spectrum
return_tau_profile: bool
Return a dictionary of the optical depth profiles in each line, i.e.,
the optical depths that will be used in conjunction with T_ex to produce
the synthetic spectrum
return_components: bool
Return a list of arrays, one for each hyperfine component, instead of
just one array
background_tb : float
The background brightness temperature. Defaults to TCMB.
ignore_neg_models: bool
Normally if background=TCMB and the model is negative, an exception
will be raised. This parameter will simply skip that exception. Use
with extreme caution: negative models (absorption spectra against the
CMB) are not physical! You may want to allow this in some cases
because there can be numerical issues where the model goes negative
when it shouldn't.
verbose: bool
More messages
debug: bool
For debugging.
Returns
-------
spectrum: `numpy.ndarray`
Synthetic spectrum with same shape as ``xarr``
component_list: list
List of `numpy.ndarray`'s, one for each hyperfine component
tau_dict: dict
Dictionary of optical depth values for the various lines
(if ``return_tau`` is set)
"""
from .ammonia_constants import (ckms, ccms, h, kb,
Jortho, Jpara, Brot, Crot)
# Convert X-units to frequency in GHz
if xarr.unit.to_string() != 'GHz':
xarr = xarr.as_unit('GHz')
if tex is None:
log.warning("Assuming tex=trot")
tex = trot
elif isinstance(tex, dict):
for k in tex:
assert k in line_names,"{0} not in line list".format(k)
line_names = tex.keys()
elif tex > trot:
warnings.warn("tex > trot in the ammonia model. "
"This is unphysical and "
"suggests that you may need to constrain tex. See "
"ammonia_model_restricted_tex.")
if width < 0:
return np.zeros(xarr.size)*np.nan
elif width == 0:
return np.zeros(xarr.size)
from .ammonia_constants import line_name_indices, line_names as original_line_names
# recreate line_names keeping only lines with a specified tex
# using this loop instead of tex.keys() preserves the order & data type
line_names = [k for k in original_line_names if k in line_names]
if 5 <= ntot <= 25:
# allow ntot to be specified as a logarithm. This is
# safe because ntot < 1e10 gives a spectrum of all zeros, and the
# plausible range of columns is not outside the specified range
lin_ntot = 10**ntot
else:
raise ValueError("ntot, the logarithmic total column density,"
" must be in the range 5 - 25")
tau_dict = {}
"""
Column density is the free parameter. It is used in conjunction with
the full partition function to compute the optical depth in each band
"""
Zpara = (2*Jpara+1)*np.exp(-h*(Brot*Jpara*(Jpara+1)+
(Crot-Brot)*Jpara**2)/(kb*trot))
Zortho = 2*(2*Jortho+1)*np.exp(-h*(Brot*Jortho*(Jortho+1)+
(Crot-Brot)*Jortho**2)/(kb*trot))
Qpara = Zpara.sum()
Qortho = Zortho.sum()
log.debug("Partition Function: Q_ortho={0}, Q_para={1}".format(Qortho, Qpara))
for linename in line_names:
if ortho_dict[linename]:
# define variable "ortho_or_para_frac" that will be the ortho
# fraction in the case of an ortho transition or the para
# fraction for a para transition
ortho_or_parafrac = fortho
Z = Zortho
Qtot = Qortho
else:
ortho_or_parafrac = 1.0-fortho
Z = Zpara
Qtot = Qpara
# for a complete discussion of these equations, please see
# https://github.com/keflavich/pyspeckit/blob/ammonia_equations/examples/AmmoniaLevelPopulation.ipynb
# https://github.com/pyspeckit/pyspeckit/blob/master/examples/AmmoniaLevelPopulation.ipynb
# and
# http://low-sky.github.io/ammoniacolumn/
# and
# https://github.com/pyspeckit/pyspeckit/pull/136
# short variable names for readability
frq = freq_dict[linename]
partition = Z[line_name_indices[linename]]
aval = aval_dict[linename]
# Total population of the higher energy inversion transition
population_rotstate = lin_ntot * ortho_or_parafrac * partition/Qtot
if isinstance(tex, dict):
expterm = ((1-np.exp(-h*frq/(kb*tex[linename]))) /
(1+np.exp(-h*frq/(kb*tex[linename]))))
else:
expterm = ((1-np.exp(-h*frq/(kb*tex))) /
(1+np.exp(-h*frq/(kb*tex))))
fracterm = (ccms**2 * aval / (8*np.pi*frq**2))
widthterm = (ckms/(width*frq*(2*np.pi)**0.5))
tau_i = population_rotstate * fracterm * expterm * widthterm
tau_dict[linename] = tau_i
log.debug("Line {0}: tau={1}, expterm={2}, pop={3},"
" partition={4}"
.format(linename, tau_i, expterm, population_rotstate,
partition))
# allow tau(11) to be specified instead of ntot
# in the thin case, this is not needed: ntot plays no role
# this process allows you to specify tau without using the approximate equations specified
# above. It should remove ntot from the calculations anyway...
if tau is not None:
tau11_temp = tau_dict['oneone']
# re-scale all optical depths so that tau is as specified, but the relative taus
# are sest by the kinetic temperature and partition functions
for linename,t in iteritems(tau_dict):
tau_dict[linename] = t * tau/tau11_temp
if return_tau:
return tau_dict
model_spectrum = _ammonia_spectrum(xarr, tex, tau_dict, width, xoff_v,
fortho, line_names,
background_tb=background_tb,
fillingfraction=fillingfraction,
return_components=return_components,
return_tau_profile=return_tau_profile
)
if not return_tau_profile and model_spectrum.min() < 0 and background_tb == TCMB and not ignore_neg_models:
raise ValueError("Model dropped below zero. That is not possible "
" normally. Here are the input values: "+
("tex: {0} ".format(tex)) +
("trot: %f " % trot) +
("ntot: %f " % ntot) +
("width: %f " % width) +
("xoff_v: %f " % xoff_v) +
("fortho: %f " % fortho)
)
if verbose or debug:
log.info("trot: %g tex: %s ntot: %g width: %g xoff_v: %g "
"fortho: %g fillingfraction: %g" % (trot, tex, ntot, width,
xoff_v, fortho,
fillingfraction))
return model_spectrum
def cold_ammonia(xarr, tkin, **kwargs):
"""
Generate a model Ammonia spectrum based on input temperatures, column, and
gaussian parameters
Parameters
----------
xarr: `pyspeckit.spectrum.units.SpectroscopicAxis`
Array of wavelength/frequency values
tkin: float
The kinetic temperature of the lines in K. Will be converted to
rotational temperature following the scheme of Swift et al 2005
(http://esoads.eso.org/abs/2005ApJ...620..823S, eqn A6) and further
discussed in Equation 7 of Rosolowsky et al 2008
(http://adsabs.harvard.edu/abs/2008ApJS..175..509R)
"""
dT0 = 41.18 # Energy difference between (2,2) and (1,1) in K
trot = tkin * (1 + (tkin/dT0)*np.log(1 + 0.6*np.exp(-15.7/tkin)))**-1
log.debug("Cold ammonia turned T_K = {0} into T_rot = {1}".format(tkin,trot))
return ammonia(xarr, trot=trot, **kwargs)
def ammonia_thin(xarr, tkin=20, tex=None, ntot=14, width=1, xoff_v=0.0,
fortho=0.0, tau=None, return_tau=False, **kwargs):
"""
Use optical depth in the 1-1 line as a free parameter
The optical depths of the other lines are then set by the kinetic
temperature
tkin is used to compute trot assuming a 3-level system consisting of (1,1),
(2,1), and (2,2) as in Swift et al, 2005 [2005ApJ...620..823S]
"""
tau_dict = {}
tex = tkin
dT0 = 41.5 # Energy diff between (2,2) and (1,1) in K
trot = tkin/(1+tkin/dT0*np.log(1+0.6*np.exp(-15.7/tkin)))
tau_dict['oneone'] = tau
tau_dict['twotwo'] = tau*(23.722/23.694)**2*4/3.*5/3.*np.exp(-41.5/trot)
tau_dict['threethree'] = tau*(23.8701279/23.694)**2*3/2.*14./3.*np.exp(-101.1/trot)
tau_dict['fourfour'] = tau*(24.1394169/23.694)**2*8/5.*9/3.*np.exp(-177.34/trot)
line_names = tau_dict.keys()
# TODO: Raise a warning if tkin > (some value), probably 50 K, because
# the 3-level system approximation used here will break down.
if return_tau:
return tau_dict
else:
return _ammonia_spectrum(xarr, tex, tau_dict, width, xoff_v, fortho,
line_names, **kwargs)
def _ammonia_spectrum(xarr, tex, tau_dict, width, xoff_v, fortho, line_names,
background_tb=TCMB, fillingfraction=None,
return_components=False, return_tau_profile=False):
"""
Helper function: given a dictionary of ammonia optical depths,
an excitation tmeperature etc, produce the spectrum.
The default return units are brightness temperature in Kelvin. If
``return_tau_profile`` is specified, the returned "spectrum" will be
a spectrum of optical depths, not an intensity spectrum.
If ``return_components`` is specified, a list of spectra will be returned,
where each spectrum represents one of the hyperfine components of the
particular ammonia line being modeled.
"""
from .ammonia_constants import (ckms, h, kb)
# fillingfraction is an arbitrary scaling for the data
# The model will be (normal model) * fillingfraction
if fillingfraction is None:
fillingfraction = 1.0
# "runspec" means "running spectrum": it is accumulated over a loop
runspec = np.zeros(len(xarr))
if return_components:
components = []
if return_tau_profile:
tau_profile = {}
for linename in line_names:
voff_lines = np.array(voff_lines_dict[linename])
tau_wts = np.array(tau_wts_dict[linename])
lines = (1-voff_lines/ckms)*freq_dict[linename]/1e9
tau_wts = tau_wts / (tau_wts).sum()
nuwidth = np.abs(width/ckms*lines)
nuoff = xoff_v/ckms*lines
# tau array
tauprof = np.zeros(len(xarr))
for kk,nuo in enumerate(nuoff):
tauprof_ = (tau_dict[linename] * tau_wts[kk] *
np.exp(-(xarr.value+nuo-lines[kk])**2 /
(2.0*nuwidth[kk]**2)))
if return_components:
components.append(tauprof_)
tauprof += tauprof_
if return_tau_profile:
tau_profile[linename] = tauprof
T0 = (h*xarr.value*1e9/kb) # "temperature" of wavelength
if isinstance(tex, dict):
runspec = ((T0/(np.exp(T0/tex[linename])-1) -
T0/(np.exp(T0/background_tb)-1)) *
(1-np.exp(-tauprof)) * fillingfraction + runspec)
else:
runspec = ((T0/(np.exp(T0/tex)-1) -
T0/(np.exp(T0/background_tb)-1)) *
(1-np.exp(-tauprof)) * fillingfraction + runspec)
if return_components:
if isinstance(tex, dict):
term1 = [(T0/(np.exp(T0/tex[linename])-1)-T0/(np.exp(T0/background_tb)-1))
for linename in line_names]
else:
term1 = (T0/(np.exp(T0/tex)-1)-T0/(np.exp(T0/background_tb)-1))
return term1*(1-np.exp(-1*np.array(components)))
elif return_tau_profile:
return tau_profile
else:
return runspec
class ammonia_model(model.SpectralModel):
"""
The basic Ammonia (NH3) model with 6 free parameters:
Trot, Tex, ntot, width, xoff_v, and fortho
Trot is the rotational temperature. It governs the relative populations of
the rotational states, i.e., the relative strength of different transitions
Tex is the excitation temperature. It is assumed constant across all
states, which is not always a good assumption - a radiative transfer and
excitation model is required to constrain this, though.
ntot is the total column density of p-NH3 integrated over all states.
width is the linewidth (the Gaussian sigma)
xoff_v is the velocity offset / line of sight velocity
fortho is the ortho fraction (northo / (northo+npara))
"""
def __init__(self,npeaks=1,npars=6,
parnames=['trot','tex','ntot','width','xoff_v','fortho'],
**kwargs):
npeaks = self.npeaks = int(npeaks)
npars = self.npars = int(npars)
self._default_parnames = parnames
self.parnames = copy.copy(self._default_parnames)
# all fitters must have declared modelfuncs, which should take the fitted pars...
self.modelfunc = ammonia
self.n_modelfunc = self.n_ammonia
# for fitting ammonia simultaneously with a flat background
self.onepeakammonia = fitter.vheightmodel(ammonia)
#self.onepeakammoniafit = self._fourparfitter(self.onepeakammonia)
self.default_parinfo = None
self.default_parinfo, kwargs = self._make_parinfo(**kwargs)
# Remove keywords parsed by parinfo and ignored by the fitter
for kw in ('tied','partied'):
if kw in kwargs:
kwargs.pop(kw)
# enforce ammonia-specific parameter limits
for par in self.default_parinfo:
if 'tex' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],TCMB), par.limits[1])
if 'trot' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],TCMB), par.limits[1])
if 'width' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],0), par.limits[1])
if 'fortho' in par.parname.lower():
par.limited = (True,True)
if par.limits[1] != 0:
par.limits = (max(par.limits[0],0), min(par.limits[1],1))
else:
par.limits = (max(par.limits[0],0), 1)
if 'ntot' in par.parname.lower():
par.limited = (True,par.limited[1])
par.limits = (max(par.limits[0],0), par.limits[1])
self.parinfo = copy.copy(self.default_parinfo)
self.modelfunc_kwargs = kwargs
# lower case? self.modelfunc_kwargs.update({'parnames':self.parinfo.parnames})
self.use_lmfit = kwargs.pop('use_lmfit') if 'use_lmfit' in kwargs else False
self.fitunit = 'GHz'
def __call__(self,*args,**kwargs):
return self.multinh3fit(*args,**kwargs)
def n_ammonia(self, pars=None, parnames=None, **kwargs):
"""
Returns a function that sums over N ammonia line profiles, where N is the length of
trot,tex,ntot,width,xoff_v,fortho *OR* N = len(pars) / 6
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
*pars* [ list ]
a list with len(pars) = (6-nfixed)n, assuming
trot,tex,ntot,width,xoff_v,fortho repeated
*parnames* [ list ]
len(parnames) must = len(pars). parnames determine how the ammonia
function parses the arguments
"""
npeaks = self.npeaks
npars = len(self.default_parinfo)
if hasattr(pars,'values'):
# important to treat as Dictionary, since lmfit params & parinfo both have .items
parnames,parvals = zip(*pars.items())
parnames = [p.lower() for p in parnames]
parvals = [p.value for p in parvals]
elif parnames is None:
parvals = pars
parnames = self.parnames
else:
parvals = pars
if len(pars) != len(parnames):
# this should only be needed when other codes are changing the number of peaks
# during a copy, as opposed to letting them be set by a __call__
# (n_modelfuncs = n_ammonia can be called directly)
# n_modelfuncs doesn't care how many peaks there are
if len(pars) % len(parnames) == 0:
parnames = [p for ii in range(len(pars)//len(parnames)) for p in parnames]
npeaks = int(len(parvals) / npars)
log.debug("Setting npeaks={0} npars={1}".format(npeaks, npars))
else:
raise ValueError("Wrong array lengths passed to n_ammonia!")
self._components = []
def L(x):
v = np.zeros(len(x))
for jj in range(int(npeaks)):
modelkwargs = kwargs.copy()
for ii in range(int(npars)):
name = parnames[ii+jj*int(npars)].strip('0123456789').lower()
modelkwargs.update({name:parvals[ii+jj*int(npars)]})
v += self.modelfunc(x,**modelkwargs)
return v
return L
def components(self, xarr, pars, hyperfine=False,
return_hyperfine_components=False, **kwargs):
"""
Ammonia components don't follow the default, since in Galactic
astronomy the hyperfine components should be well-separated.
If you want to see the individual components overlaid, you'll need to
pass hyperfine to the plot_fit call
"""
comps=[]
for ii in range(self.npeaks):
if hyperfine or return_hyperfine_components:
modelkwargs = dict(zip(self.parnames[ii*self.npars:(ii+1)*self.npars],
pars[ii*self.npars:(ii+1)*self.npars]))
comps.append(self.modelfunc(xarr, return_components=True,
**modelkwargs))
else:
modelkwargs = dict(zip(self.parnames[ii*self.npars:(ii+1)*self.npars],
pars[ii*self.npars:(ii+1)*self.npars]))
comps.append([self.modelfunc(xarr, return_components=False,
**modelkwargs)])
modelcomponents = np.concatenate(comps)
return modelcomponents
def multinh3fit(self, xax, data, err=None,
parinfo=None,
quiet=True, shh=True,
debug=False,
maxiter=200,
use_lmfit=False,
veryverbose=False, **kwargs):
"""
Fit multiple nh3 profiles (multiple can be 1)
Parameters
----------
xax : array
x axis
data : array
y axis
npeaks : int
How many nh3 profiles to fit? Default 1 (this could supersede onedgaussfit)
err : array
error corresponding to data
params : list
Fit parameters: [trot, tex, ntot (or tau), width, offset, ortho fraction] * npeaks
If len(params) % 6 == 0, npeaks will be set to len(params) / 6.
These parameters (and the related fixed, limited, min/max, names
below) need to have length = 6*npeaks. If npeaks > 1 and length =
6, they will be replicated npeaks times, otherwise they will be
reset to defaults:
fixed : list
Is parameter fixed?
limitedmin : list
minpars : list
set lower limits on each parameter (default: width>0, Tex and trot > Tcmb)
limitedmax : list
maxpars : list
set upper limits on each parameter
parnames : list
default parameter names, important for setting kwargs in model
['trot','tex','ntot','width','xoff_v','fortho']
quiet : bool
should MPFIT output each iteration?
shh : bool
output final parameters?
Returns
-------
mpp : model parameter object
Fit parameters
model : array
The model array
errors : array
the fit errors
chi2 : float
the chi^2 value of the fit
"""
if parinfo is None:
parinfo = self.parinfo = self.make_parinfo(**kwargs)
else:
if isinstance(parinfo, ParinfoList):
if not quiet:
log.info("Using user-specified parinfo.")
self.parinfo = parinfo
else:
if not quiet:
log.info("Using something like a user-specified parinfo, but not.")
self.parinfo = ParinfoList([p if isinstance(p,Parinfo) else Parinfo(p)
for p in parinfo],
preserve_order=True)
fitfun_kwargs = dict((x,y) for (x,y) in kwargs.items()
if x not in ('npeaks', 'params', 'parnames',
'fixed', 'limitedmin', 'limitedmax',
'minpars', 'maxpars', 'tied',
'max_tem_step'))
fitfun_kwargs.update(self.modelfunc_kwargs)
if 'use_lmfit' in fitfun_kwargs:
raise KeyError("use_lmfit was specified in a location where it "
"is unacceptable")
# not used: npars = len(parinfo)/self.npeaks
self._validate_parinfo()
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None):
return [0,(y-self.n_ammonia(pars=p,
parnames=parinfo.parnames,
**fitfun_kwargs)(x))]
else:
def f(p,fjac=None):
return [0,(y-self.n_ammonia(pars=p,
parnames=parinfo.parnames,
**fitfun_kwargs)(x))/err]
return f
if veryverbose:
log.info("GUESSES: ")
log.info(str(parinfo))
#log.info "\n".join(["%s: %s" % (p['parname'],p['value']) for p in parinfo])
if use_lmfit:
return self.lmfitter(xax, data, err=err,
parinfo=parinfo,
quiet=quiet,
debug=debug)
else:
mp = mpfit(mpfitfun(xax,data,err),
parinfo=parinfo,
maxiter=maxiter,
quiet=quiet,
debug=debug)
mpp = mp.params
if mp.perror is not None:
mpperr = mp.perror
else:
mpperr = mpp*0
chi2 = mp.fnorm
if mp.status == 0:
raise Exception(mp.errmsg)
for i,p in enumerate(mpp):
parinfo[i]['value'] = p
parinfo[i]['error'] = mpperr[i]
if not shh:
log.info("Fit status: {0}".format(mp.status))
log.info("Fit message: {0}".format(mpfit_messages[mp.status]))
log.info("Fit error message: {0}".format(mp.errmsg))
log.info("Final fit values: ")
for i,p in enumerate(mpp):
log.info(" ".join((parinfo[i]['parname'], str(p), " +/- ",
str(mpperr[i]))))
log.info(" ".join(("Chi2: ", str(mp.fnorm)," Reduced Chi2: ",
str(mp.fnorm/len(data)), " DOF:",
str(len(data)-len(mpp)))))
self.mp = mp
self.parinfo = parinfo
self.mpp = self.parinfo.values
self.mpperr = self.parinfo.errors
self.mppnames = self.parinfo.names
self.model = self.n_ammonia(pars=self.mpp, parnames=self.mppnames,
**fitfun_kwargs)(xax)
indiv_parinfo = [self.parinfo[jj*self.npars:(jj+1)*self.npars]
for jj in range(int(len(self.parinfo)/self.npars))]
modelkwargs = [dict([(p['parname'].strip("0123456789").lower(),
p['value']) for p in pi])
for pi in indiv_parinfo]
self.tau_list = [self.modelfunc(xax, return_tau=True,**mk)
for mk in modelkwargs]
return self.mpp,self.model,self.mpperr,chi2
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# trot, TEX, ntot, width, center, ortho fraction
return [20,10, 15, 1.0, 0.0, 1.0]
def annotations(self):
from decimal import Decimal # for formatting
tex_key = {'trot':'T_R', 'tkin': 'T_K', 'tex':'T_{ex}', 'ntot':'N',
'fortho':'F_o', 'width':'\\sigma', 'xoff_v':'v',
'fillingfraction':'FF', 'tau':'\\tau_{1-1}',
'background_tb':'T_{BG}', 'delta':'T_R-T_{ex}'}
# small hack below: don't quantize if error > value. We want to see the values.
label_list = []
for pinfo in self.parinfo:
parname = tex_key[pinfo['parname'].strip("0123456789").lower()]
parnum = int(pinfo['parname'][-1])
if pinfo['fixed']:
formatted_value = "%s" % pinfo['value']
pm = ""
formatted_error=""
else:
formatted_value = Decimal("%g" % pinfo['value']).quantize(Decimal("%0.2g" % (min(pinfo['error'],pinfo['value']))))
pm = "$\\pm$"
formatted_error = Decimal("%g" % pinfo['error']).quantize(Decimal("%0.2g" % pinfo['error']))
label = "$%s(%i)$=%8s %s %8s" % (parname, parnum, formatted_value, pm, formatted_error)
label_list.append(label)
labels = tuple(mpcb.flatten(label_list))
return labels
def make_parinfo(self, quiet=True,
npeaks=1,
params=(20,20,14,1.0,0.0,0.5), parnames=None,
fixed=(False,False,False,False,False,False),
limitedmin=(True,True,True,True,False,True),
limitedmax=(False,False,True,False,False,True),
minpars=(TCMB,TCMB,5,0,0,0),
maxpars=(0,0,25,0,0,1),
tied=('',)*6,
max_tem_step=1.,
**kwargs
):
if not quiet:
log.info("Creating a 'parinfo' from guesses.")
self.npars = int(len(params) / npeaks)
if len(params) != npeaks and (len(params) / self.npars) > npeaks:
npeaks = len(params) / self.npars
npeaks = self.npeaks = int(npeaks)
if isinstance(params,np.ndarray):
params=params.tolist()
# this is actually a hack, even though it's decently elegant
# somehow, parnames was being changed WITHOUT being passed as a variable
# this doesn't make sense - at all - but it happened.
# (it is possible for self.parnames to have npars*npeaks elements where
# npeaks > 1 coming into this function even though only 6 pars are specified;
# _default_parnames is the workaround)
if parnames is None:
parnames = copy.copy(self._default_parnames)
partype_dict = dict(zip(['params', 'parnames', 'fixed',
'limitedmin', 'limitedmax', 'minpars',
'maxpars', 'tied'],
[params, parnames, fixed, limitedmin,
limitedmax, minpars, maxpars, tied]))
# make sure all various things are the right length; if they're
# not, fix them using the defaults
# (you can put in guesses of length 12 but leave the rest length 6;
# this code then doubles the length of everything else)
for partype,parlist in iteritems(partype_dict):
if len(parlist) != self.npars*self.npeaks:
# if you leave the defaults, or enter something that can be
# multiplied by npars to get to the right number of
# gaussians, it will just replicate
if len(parlist) == self.npars:
partype_dict[partype] *= npeaks
elif len(parlist) > self.npars:
# DANGER: THIS SHOULD NOT HAPPEN!
log.warning("WARNING! Input parameters were longer than allowed for variable {0}".format(parlist))
partype_dict[partype] = partype_dict[partype][:self.npars]
elif parlist==params: # this instance shouldn't really be possible
partype_dict[partype] = [20,20,1e10,1.0,0.0,0.5] * npeaks
elif parlist==fixed:
partype_dict[partype] = [False] * len(params)
elif parlist==limitedmax: # only fortho, fillingfraction have upper limits
partype_dict[partype] = (np.array(parnames) == 'fortho') + (np.array(parnames) == 'fillingfraction')
elif parlist==limitedmin: # no physical values can be negative except velocity
partype_dict[partype] = (np.array(parnames) != 'xoff_v')
elif parlist==minpars:
# all have minima of zero except kinetic temperature, which can't be below CMB.
# Excitation temperature technically can be, but not in this model
partype_dict[partype] = ((np.array(parnames) == 'trot') + (np.array(parnames) == 'tex')) * TCMB
elif parlist==maxpars: # fractions have upper limits of 1.0
partype_dict[partype] = ((np.array(parnames) == 'fortho') + (np.array(parnames) == 'fillingfraction')).astype('float')
elif parlist==parnames: # assumes the right number of parnames (essential)
partype_dict[partype] = list(parnames) * self.npeaks
elif parlist==tied:
partype_dict[partype] = [_increment_string_number(t, ii*self.npars)
for t in tied
for ii in range(self.npeaks)]
if len(parnames) != len(partype_dict['params']):
raise ValueError("Wrong array lengths AFTER fixing them")
# used in components. Is this just a hack?
self.parnames = partype_dict['parnames']
parinfo = [{'n':ii, 'value':partype_dict['params'][ii],
'limits':[partype_dict['minpars'][ii],partype_dict['maxpars'][ii]],
'limited':[partype_dict['limitedmin'][ii],partype_dict['limitedmax'][ii]], 'fixed':partype_dict['fixed'][ii],
'parname':partype_dict['parnames'][ii]+str(int(ii/int(self.npars))),
'tied':partype_dict['tied'][ii],
'mpmaxstep':max_tem_step*float(partype_dict['parnames'][ii] in ('tex','trot')), # must force small steps in temperature (True = 1.0)
'error': 0}
for ii in range(len(partype_dict['params']))
]
# hack: remove 'fixed' pars
#parinfo_with_fixed = parinfo
#parinfo = [p for p in parinfo_with_fixed if not p['fixed']]
#fixed_kwargs = dict((p['parname'].strip("0123456789").lower(),
# p['value'])
# for p in parinfo_with_fixed if p['fixed'])
# don't do this - it breaks the NEXT call because npars != len(parnames) self.parnames = [p['parname'] for p in parinfo]
# this is OK - not a permanent change
#parnames = [p['parname'] for p in parinfo]
# not OK self.npars = len(parinfo)/self.npeaks
parinfo = ParinfoList([Parinfo(p) for p in parinfo], preserve_order=True)
#import pdb; pdb.set_trace()
return parinfo
def _validate_parinfo(self,
must_be_limited={'trot': [True,False],
'tex': [False,False],
'ntot': [True, True],
'width': [True, False],
'xoff_v': [False, False],
'tau': [False, False],
'fortho': [True, True]},
required_limits={'trot': [0, None],
'ntot': [5, 25],
'width': [0, None],
'fortho': [0,1]}):
"""
Make sure the input parameters are all legitimate
"""
for par in self.parinfo:
limited = par.limited
parname = par.parname.strip(string.digits).lower()
mbl = must_be_limited[parname]
for a,b,ul in zip(limited, mbl, ('a lower','an upper')):
if b and not a:
raise ValueError("Parameter {0} must have {1} limit "
"but no such limit is set.".format(
parname, ul))
if parname in required_limits:
limits = par.limits
rlimits = required_limits[parname]
for a,b,op,ul in zip(limits, rlimits, (operator.lt,
operator.gt),
('a lower','an upper')):
if b is not None and op(a,b):
raise ValueError("Parameter {0} must have {1} limit "
"at least {2} but it is set to {3}."
.format(parname, ul, b, a))
def parse_3par_guesses(self, guesses):
"""
Try to convert a set of interactive guesses (peak, center, width) into
guesses appropriate to the model.
For NH3 models, we add in several extra parameters:
tex = 2.73 * peak
trot = tex * 2
fortho = 0.5
ntot = 15
ntot is set to a constant ~10^15 because this results in optical depths
near 1, so it forces the emission to be approximately significant.
trot > tex so that we're in a physical regime to begin with.
We assume tex = peak + 2.73 because most spectra are shown
background-subtracted (single dish are always that way, interferometric
data are intrinsically that way...) and otherwise the guessing will
crash if you guess a number < 2.73.
"""
gauss_npars = 3
if len(guesses) % gauss_npars != 0:
raise ValueError("Guesses passed to parse_3par_guesses must have "
"length % 3 == 0")
npeaks = len(guesses) // gauss_npars
npars = 6
new_guesses = [-1, -1, 15, -1, -1, 0.5] * npeaks
for ii in range(npeaks):
peak = guesses[ii * gauss_npars + 0]
center = guesses[ii * gauss_npars + 1]
width = guesses[ii * gauss_npars + 2]
new_guesses[ii*npars + 0] = (2.73+peak) * 2
new_guesses[ii*npars + 1] = (2.73+peak)
new_guesses[ii*npars + 3] = width
new_guesses[ii*npars + 4] = center
return new_guesses
class ammonia_model_vtau(ammonia_model):
def __init__(self,
parnames=['trot', 'tex', 'tau', 'width', 'xoff_v', 'fortho'],
**kwargs):
super(ammonia_model_vtau, self).__init__(parnames=parnames,
**kwargs)
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# trot, TEX, ntot, width, center, ortho fraction
return [20, 10, 10, 1.0, 0.0, 1.0]
def _validate_parinfo(self,
must_be_limited={'trot': [True,False],
'tex': [False,False],
'tau': [True, False],
'width': [True, False],
'xoff_v': [False, False],
'fortho': [True, True]},
required_limits={'trot': [0, None],
'tex': [None,None],
'width': [0, None],
'tau': [0, None],
'xoff_v': [None,None],
'fortho': [0,1]}):
supes = super(ammonia_model_vtau, self)
supes._validate_parinfo(must_be_limited=must_be_limited,
required_limits=required_limits)
return supes
def make_parinfo(self,
params=(20,14,0.5,1.0,0.0,0.5),
fixed=(False,False,False,False,False,False),
limitedmin=(True,True,True,True,False,True),
limitedmax=(False,False,False,False,False,True),
minpars=(TCMB,TCMB,0,0,0,0),
maxpars=(0,0,0,0,0,1),
tied=('',)*6,
**kwargs
):
"""
parnames=['trot', 'tex', 'tau', 'width', 'xoff_v', 'fortho']
"""
return super(ammonia_model_vtau, self).make_parinfo(params=params,
fixed=fixed,
limitedmax=limitedmax,
limitedmin=limitedmin,
minpars=minpars,
maxpars=maxpars,
tied=tied,
**kwargs)
class ammonia_model_vtau_thin(ammonia_model_vtau):
def __init__(self,parnames=['tkin', 'tau', 'width', 'xoff_v', 'fortho'],
**kwargs):
super(ammonia_model_vtau_thin, self).__init__(parnames=parnames,
npars=5,
**kwargs)
self.modelfunc = ammonia_thin
def _validate_parinfo(self,
must_be_limited={'tkin': [True,False],
'tex': [False,False],
'ntot': [True, True],
'width': [True, False],
'xoff_v': [False, False],
'tau': [False, False],
'fortho': [True, True]},
required_limits={'tkin': [0, None],
'ntot': [5, 25],
'width': [0, None],
'fortho': [0,1]}):
supes = super(ammonia_model_vtau_thin, self)
return supes._validate_parinfo(must_be_limited=must_be_limited,
required_limits=required_limits)
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# trot, tau, width, center, ortho fraction
return [20, 1, 1.0, 0.0, 1.0]
def __call__(self,*args,**kwargs):
return self.multinh3fit(*args, **kwargs)
def make_parinfo(self,
params=(20,14,1.0,0.0,0.5),
fixed=(False,False,False,False,False),
limitedmin=(True,True,True,False,True),
limitedmax=(False,False,False,False,True),
minpars=(TCMB,0,0,0,0),
maxpars=(0,0,0,0,1),
tied=('',)*5,
**kwargs
):
return super(ammonia_model_vtau_thin, self).make_parinfo(params=params,
fixed=fixed,
limitedmax=limitedmax,
limitedmin=limitedmin,
minpars=minpars,
maxpars=maxpars,
tied=tied,
**kwargs)
class ammonia_model_background(ammonia_model):
def __init__(self,**kwargs):
super(ammonia_model_background,self).__init__(npars=7,
parnames=['trot', 'tex',
'ntot',
'width',
'xoff_v',
'fortho',
'background_tb'])
def moments(self, Xax, data, negamp=None, veryverbose=False, **kwargs):
"""
Returns a very simple and likely incorrect guess
"""
# trot, TEX, ntot, width, center, ortho fraction
return [20,10, 10, 1.0, 0.0, 1.0, TCMB]
def make_parinfo(self, npeaks=1, err=None,
params=(20,20,14,1.0,0.0,0.5,TCMB), parnames=None,
fixed=(False,False,False,False,False,False,True),
limitedmin=(True,True,True,True,False,True,True),
limitedmax=(False,False,False,False,False,True,True),
minpars=(TCMB,TCMB,0,0,0,0,TCMB), parinfo=None,
maxpars=(0,0,0,0,0,1,TCMB),
tied=('',)*7,
quiet=True, shh=True,
veryverbose=False, **kwargs):
return super(ammonia_model_background,
self).make_parinfo(npeaks=npeaks, err=err, params=params,
parnames=parnames, fixed=fixed,
limitedmin=limitedmin,
limitedmax=limitedmax, minpars=minpars,
parinfo=parinfo, maxpars=maxpars,
tied=tied, quiet=quiet, shh=shh,
veryverbose=veryverbose, **kwargs)
def multinh3fit(self, xax, data, npeaks=1, err=None,
params=(20,20,14,1.0,0.0,0.5,TCMB), parnames=None,
fixed=(False,False,False,False,False,False,True),
limitedmin=(True,True,True,True,False,True,True),
limitedmax=(False,False,False,False,False,True,True),
minpars=(TCMB,TCMB,0,0,0,0,TCMB), parinfo=None,
maxpars=(0,0,0,0,0,1,TCMB),
tied=('',)*7,
quiet=True, shh=True,
veryverbose=False, **kwargs):
return super(ammonia_model_background,
self).multinh3fit(xax, data, npeaks=npeaks, err=err,
params=params, parnames=parnames,
fixed=fixed, limitedmin=limitedmin,
limitedmax=limitedmax, minpars=minpars,
parinfo=parinfo, maxpars=maxpars,
tied=tied, quiet=quiet, shh=shh,
veryverbose=veryverbose, **kwargs)
class cold_ammonia_model(ammonia_model):
def __init__(self,
parnames=['tkin', 'tex', 'ntot', 'width', 'xoff_v', 'fortho'],
**kwargs):
super(cold_ammonia_model, self).__init__(parnames=parnames, **kwargs)
self.modelfunc = cold_ammonia
def _validate_parinfo(self,
must_be_limited={'tkin': [True,False],
'tex': [False,False],
'ntot': [True, False],
'width': [True, False],
'xoff_v': [False, False],
'fortho': [True, True]},
required_limits={'tkin': [0, None],
'tex': [None,None],
'width': [0, None],
'ntot': [0, None],
'xoff_v': [None,None],
'fortho': [0,1]}):
supes = super(cold_ammonia_model, self)
return supes._validate_parinfo(must_be_limited=must_be_limited,
required_limits=required_limits)
class ammonia_model_restricted_tex(ammonia_model):
"""
Ammonia model with an explicitly restricted excitation temperature
such that tex <= trot, set by the "delta" parameter (tex = trot - delta)
with delta > 0. You can choose the ammonia funciton when you initialize
it (e.g., ``ammonia_model_restricted_tex(ammonia_func=ammonia)`` or
``ammonia_model_restricted_tex(ammonia_func=cold_ammonia)``)
"""
def __init__(self,
parnames=['trot', 'tex', 'ntot', 'width', 'xoff_v', 'fortho',
'delta'],
ammonia_func=ammonia,
**kwargs):
super(ammonia_model_restricted_tex, self).__init__(npars=7,
parnames=parnames,
**kwargs)
def ammonia_dtex(*args, **kwargs):
"""
Strip out the 'delta' keyword
"""
# for py2 compatibility, must strip out manually
delta = kwargs.pop('delta') if 'delta' in kwargs else None
np.testing.assert_allclose(kwargs['trot'] - kwargs['tex'],
delta)
return ammonia_func(*args, **kwargs)
self.modelfunc = ammonia_dtex
def n_ammonia(self, pars=None, parnames=None, **kwargs):
if parnames is not None:
for ii,pn in enumerate(parnames):
if ii % 7 == 1 and 'tex' not in pn:
raise ValueError('bad parameter names')
if ii % 7 == 6 and 'delta' not in pn:
raise ValueError('bad parameter names')
if pars is not None:
assert len(pars) % 7 == 0
for ii in range(int(len(pars)/7)):
try:
# Case A: they're param objects
# (setting the param directly can result in recursion errors)
pars[1+ii*7].value = pars[0+ii*7].value - pars[6+ii*7].value
except AttributeError:
# Case B: they're just lists of values
pars[1+ii*7] = pars[0+ii*7] - pars[6+ii*7]
supes = super(ammonia_model_restricted_tex, self)
return supes.n_ammonia(pars=pars, parnames=parnames, **kwargs)
def _validate_parinfo(self,
must_be_limited={'trot': [True,False],
'tex': [False,False],
'ntot': [True, False],
'width': [True, False],
'xoff_v': [False, False],
'fortho': [True, True],
'delta': [True, False],
},
required_limits={'trot': [0, None],
'tex': [None,None],
'width': [0, None],
'ntot': [0, None],
'xoff_v': [None,None],
'fortho': [0,1],
'delta': [0, None],
}):
supes = super(ammonia_model_restricted_tex, self)
return supes._validate_parinfo(must_be_limited=must_be_limited,
required_limits=required_limits)
def make_parinfo(self,
params=(20,20,0.5,1.0,0.0,0.5,0),
fixed=(False,False,False,False,False,False,False),
limitedmin=(True,True,True,True,False,True,True),
limitedmax=(False,False,False,False,False,True,False),
minpars=(TCMB,TCMB,0,0,0,0,0),
maxpars=(0,0,0,0,0,1,0),
tied=('','p[0]-p[6]','','','','',''),
**kwargs
):
"""
parnames=['trot', 'tex', 'ntot', 'width', 'xoff_v', 'fortho', 'delta']
'delta' is the difference between tex and trot
"""
supes = super(ammonia_model_restricted_tex, self)
return supes.make_parinfo(params=params, fixed=fixed,
limitedmax=limitedmax, limitedmin=limitedmin,
minpars=minpars, maxpars=maxpars, tied=tied,
**kwargs)
def _increment_string_number(st, count):
"""
Increment a number in a string
Expects input of the form: p[6]
"""
import re
dig = re.compile('[0-9]+')
if dig.search(st):
n = int(dig.search(st).group())
result = dig.sub(str(n+count), st)
return result
else:
return st
| mit |
dfst/deepforge | src/plugins/GenerateJob/templates/backend_deepforge.py | 1 | 10781 | """
This is a fully functional do nothing backend to provide a template to
backend writers. It is fully functional in that you can select it as
a backend with
import matplotlib
matplotlib.use('Template')
and your matplotlib scripts will (should!) run without error, though
no output is produced. This provides a nice starting point for
backend writers because you can selectively implement methods
(draw_rectangle, draw_lines, etc...) and slowly see your figure come
to life w/o having to have a full blown implementation before getting
any results.
Copy this to backend_xxx.py and replace all instances of 'template'
with 'xxx'. Then implement the class methods and functions below, and
add 'xxx' to the switchyard in matplotlib/backends/__init__.py and
'xxx' to the backends list in the validate_backend methon in
matplotlib/__init__.py and you're off. You can use your backend with::
import matplotlib
matplotlib.use('xxx')
from pylab import *
plot([1,2,3])
show()
matplotlib also supports external backends, so you can place you can
use any module in your PYTHONPATH with the syntax::
import matplotlib
matplotlib.use('module://my_backend')
where my_backend.py is your module name. This syntax is also
recognized in the rc file and in the -d argument in pylab, e.g.,::
python simple_plot.py -dmodule://my_backend
If your backend implements support for saving figures (i.e. has a print_xyz()
method) you can register it as the default handler for a given file type
from matplotlib.backend_bases import register_backend
register_backend('xyz', 'my_backend', 'XYZ File Format')
...
plt.savefig("figure.xyz")
The files that are most relevant to backend_writers are
matplotlib/backends/backend_your_backend.py
matplotlib/backend_bases.py
matplotlib/backends/__init__.py
matplotlib/__init__.py
matplotlib/_pylab_helpers.py
Naming Conventions
* classes Upper or MixedUpperCase
* variables lower or lowerUpper
* functions lower or underscore_separated
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import (
FigureCanvasBase, FigureManagerBase, GraphicsContextBase, RendererBase)
from matplotlib.figure import Figure
import simplejson as json
class RendererTemplate(RendererBase):
"""
The renderer handles drawing/rendering operations.
This is a minimal do-nothing class that can be used to get started when
writing a new backend. Refer to backend_bases.RendererBase for
documentation of the classes methods.
"""
def __init__(self, dpi):
self.dpi = dpi
def draw_path(self, gc, path, transform, rgbFace=None):
pass
# draw_markers is optional, and we get more correct relative
# timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_markers(self, gc, marker_path, marker_trans, path, trans,
# rgbFace=None):
# pass
# draw_path_collection is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_path_collection(self, gc, master_transform, paths,
# all_transforms, offsets, offsetTrans,
# facecolors, edgecolors, linewidths, linestyles,
# antialiaseds):
# pass
# draw_quad_mesh is optional, and we get more correct
# relative timings by leaving it out. backend implementers concerned with
# performance will probably want to implement it
# def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
# coordinates, offsets, offsetTrans, facecolors,
# antialiased, edgecolors):
# pass
def draw_image(self, gc, x, y, im):
pass
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
pass
def flipy(self):
return True
def get_canvas_width_height(self):
return 100, 100
def get_text_width_height_descent(self, s, prop, ismath):
return 1, 1, 1
def new_gc(self):
return GraphicsContextTemplate()
def points_to_pixels(self, points):
# if backend doesn't have dpi, e.g., postscript or svg
return points
# elif backend assumes a value for pixels_per_inch
#return points/72.0 * self.dpi.get() * pixels_per_inch/72.0
# else
#return points/72.0 * self.dpi.get()
class GraphicsContextTemplate(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc... See the gtk
and postscript backends for examples of mapping the graphics context
attributes (cap styles, join styles, line widths, colors) to a particular
backend. In GTK this is done by wrapping a gtk.gdk.GC object and
forwarding the appropriate calls to it using a dictionary mapping styles
to gdk constants. In Postscript, all the work is done by the renderer,
mapping line styles to postscript calls.
If it's more appropriate to do the mapping at the renderer level (as in
the postscript backend), you don't need to override any of the GC methods.
If it's more appropriate to wrap an instance (as in the GTK backend) and
do the mapping here, you'll need to override several of the setter
methods.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, e.g., (0.5, 0.0, 1.0). You may need to map this to colors
appropriate for your backend.
"""
pass
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For image backends - is not required
For GUI backends - this should be overridden if drawing should be done in
interactive python mode
"""
def show(block=None):
"""
For image backends - is not required
For GUI backends - show() is usually the last line of a pylab script and
tells the backend that it is time to draw. In interactive mode, this may
be a do nothing func. See the GTK backend for an example of how to handle
interactive versus batch mode
"""
for manager in Gcf.get_all_fig_managers():
manager.canvas.send_deepforge_update()
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# May be implemented via the `_new_figure_manager_template` helper.
# If a main-level app must be created, this (and
# new_figure_manager_given_figure) is the usual place to do it -- see
# backend_wx, backend_wxagg and backend_tkagg for examples. Not all GUIs
# require explicit instantiation of a main-level app (egg backend_gtk,
# backend_gtkagg) for pylab.
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
# May be implemented via the `_new_figure_manager_template` helper.
canvas = FigureCanvasTemplate(figure)
manager = FigureManagerTemplate(canvas, num)
return manager
class FigureCanvasTemplate(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Note GUI templates will want to connect events for button presses,
mouse movements and key presses to functions that call the base
class methods button_press_event, button_release_event,
motion_notify_event, key_press_event, and key_release_event. See,
e.g., backend_gtk.py, backend_wx.py and backend_tkagg.py
Attributes
----------
figure : `matplotlib.figure.Figure`
A high-level Figure instance
"""
def draw(self):
"""
Draw the figure using the renderer
"""
self.send_deepforge_update()
renderer = RendererTemplate(self.figure.dpi)
self.figure.draw(renderer)
def send_deepforge_update(self):
state = self.figure_to_state()
# Probably should do some diff-ing if the state hasn't changed...
# TODO
print('deepforge-cmd PLOT ' + json.dumps(state, ignore_nan=True))
def figure_to_state(self):
figure = self.figure
state = {}
state['id'] = self.manager.num
#state['title'] = self.manager.num
state['axes'] = []
# Get the data points
for axes in figure.get_axes():
axes_data = {}
axes_data['title'] = axes.get_title()
axes_data['xlabel'] = axes.get_xlabel()
axes_data['ylabel'] = axes.get_ylabel()
axes_data['lines'] = []
for i, line in enumerate(axes.lines):
lineDict = {}
lineDict['points'] = line.get_xydata().tolist()
lineDict['label'] = ''
default_label = ('_line' + str(i))
if line.get_label() != default_label:
lineDict['label'] = line.get_label()
axes_data['lines'].append(lineDict)
state['axes'].append(axes_data)
return state
# You should provide a print_xxx function for every file format
# you can write.
# If the file type is not in the base set of filetypes,
# you should add it to the class-scope filetypes dictionary as follows:
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['foo'] = 'My magic Foo format'
def print_foo(self, filename, *args, **kwargs):
"""
Write out format foo. The dpi, facecolor and edgecolor are restored
to their original values after this call, so you don't need to
save and restore them.
"""
pass
def get_default_filetype(self):
return 'foo'
class FigureManagerTemplate(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
For non interactive backends, the base class does all the work
"""
pass
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureCanvas = FigureCanvasTemplate
FigureManager = FigureManagerTemplate
| apache-2.0 |
arvinsahni/ml4 | flask/app/viz_1.py | 2 | 3249 | from __future__ import division
from flask import render_template, request, Response, jsonify, send_from_directory
from app import app
import json
import psycopg2
import os
import sys
import psycopg2.extras
import pandas as pd
module_path = os.path.abspath(os.path.join('../'))
if module_path not in sys.path:
sys.path.append(module_path)
from learn import forall as fa
from learn import utils
@app.route('/index')
def index():
return render_template('home.html')
@app.route('/viz')
def viz():
return render_template('viz.html')
def to_csv(d, fields):
d.insert(0, fields)
return Response('\n'.join([",".join(map(str, e)) for e in d]), mimetype='text/csv')
@app.route('/hist_data', methods=['GET', 'POST'])
def hist_data():
website = request.args.get('website')
person = request.args.get('person')
db = psycopg2.connect(host='ec2-54-208-219-223.compute-1.amazonaws.com',
database='election',
user='elections',
password='election2016')
curs = db.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
DEC2FLOAT = psycopg2.extensions.new_type(
psycopg2.extensions.DECIMAL.values,
'DEC2FLOAT',
lambda value, curs: float(value) if value is not None else None)
psycopg2.extensions.register_type(DEC2FLOAT)
if website:
sql = """select a.bin, sum(coalesce(count,0)) from histogram_bins a
left join (select * from data_binned where website = '%s' and person = '%s') b on a.bin = b.bin
group by 1 order by 1""" % (website, person)
else:
sql = """select a.bin, sum(coalesce(count,0)) from histogram_bins a
left join (select * from data_binned where person = '%s') b on a.bin = b.bin
group by 1 order by 1""" % person
print(sql)
curs.execute(sql)
d = curs.fetchall()
print(d)
fields = ('bin', 'sum')
return jsonify(data=d)
@app.route('/dataset', methods=['POST'])
def dataset():
# print(request.get_data())
print(request.files)
dtrain = request.files['train']
dtest = request.files['test']
#Save input data files in input folder
dtrain.save("input/" + dtrain.filename)
dtest.save("input/" + dtest.filename)
df_train = pd.read_csv("input/" + dtrain.filename)
# print(df_train.head())
df_test = pd.read_csv("input/" + dtest.filename)
# print(df_test.head())
#From Jason's ML module
X, y = utils.X_y_split(X_train=df_train, X_test=df_test)
model = fa.All()
model.fit(X, y)
#Append prediction column to test set
predictions = model.predict(df_test)
df_test['prediction'] = predictions
#Save prediction in output folder
print(df_test.head())
df_test.to_csv("output/" + "prediction.csv", index=False)
print("%s: %.3f (%s)" % ("Jacky's data:", model.score, model.score_type))
return '{ "fake_json":100}', 200
@app.route('/download')
def download(filename=None):
# uploads = os.path.join(current_app.root_path, app.config['UPLOAD_FOLDER'])
return send_from_directory(directory=os.path.abspath(os.path.join('../flask/output')), filename="prediction.csv")
# return '{ "fake_json":100}'
| mit |
rvraghav93/scikit-learn | examples/manifold/plot_compare_methods.py | 52 | 3878 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`sphx_glr_auto_examples_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 5, 10)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
pgroth/independence-indicators | Temporal-Coauthor-Networks/vincent/examples/map_examples.py | 11 | 6721 | # -*- coding: utf-8 -*-
"""
Vincent Map Examples
"""
#Build a map from scratch
from vincent import *
world_topo = r'world-countries.topo.json'
state_topo = r'us_states.topo.json'
lake_topo = r'lakes_50m.topo.json'
county_geo = r'us_counties.geo.json'
county_topo = r'us_counties.topo.json'
or_topo = r'or_counties.topo.json'
vis = Visualization(width=960, height=500)
vis.data['countries'] = Data(
name='countries',
url=world_topo,
format={'type': 'topojson', 'feature': 'world-countries'}
)
geo_transform = Transform(
type='geopath', value="data", projection='winkel3', scale=200,
translate=[480, 250]
)
geo_from = MarkRef(data='countries', transform=[geo_transform])
enter_props = PropertySet(
stroke=ValueRef(value='#000000'),
path=ValueRef(field='path')
)
update_props = PropertySet(fill=ValueRef(value='steelblue'))
mark_props = MarkProperties(enter=enter_props, update=update_props)
vis.marks.append(
Mark(type='path', from_=geo_from, properties=mark_props)
)
vis.to_json('vega.json')
#Convenience Method
geo_data = [{'name': 'countries',
'url': world_topo,
'feature': 'world-countries'}]
vis = Map(geo_data=geo_data, scale=200)
vis.to_json('vega.json')
#States & Counties
geo_data = [{'name': 'counties',
'url': county_topo,
'feature': 'us_counties.geo'},
{'name': 'states',
'url': state_topo,
'feature': 'us_states.geo'}
]
vis = Map(geo_data=geo_data, scale=1000, projection='albersUsa')
del vis.marks[1].properties.update
vis.marks[0].properties.update.fill.value = '#084081'
vis.marks[1].properties.enter.stroke.value = '#fff'
vis.marks[0].properties.enter.stroke.value = '#7bccc4'
vis.to_json('vega.json')
#Choropleth
import json
import pandas as pd
#Map the county codes we have in our geometry to those in the
#county_data file, which contains additional rows we don't need
with open('us_counties.topo.json', 'r') as f:
get_id = json.load(f)
#A little FIPS code munging
new_geoms = []
for geom in get_id['objects']['us_counties.geo']['geometries']:
geom['properties']['FIPS'] = int(geom['properties']['FIPS'])
new_geoms.append(geom)
get_id['objects']['us_counties.geo']['geometries'] = new_geoms
with open('us_counties.topo.json', 'w') as f:
json.dump(get_id, f)
#Grab the FIPS codes and load them into a dataframe
geometries = get_id['objects']['us_counties.geo']['geometries']
county_codes = [x['properties']['FIPS'] for x in geometries]
county_df = pd.DataFrame({'FIPS': county_codes}, dtype=str)
county_df = county_df.astype(int)
#Read into Dataframe, cast to int for consistency
df = pd.read_csv('data/us_county_data.csv', na_values=[' '])
df['FIPS'] = df['FIPS'].astype(int)
#Perform an inner join, pad NA's with data from nearest county
merged = pd.merge(df, county_df, on='FIPS', how='inner')
merged = merged.fillna(method='pad')
geo_data = [{'name': 'counties',
'url': county_topo,
'feature': 'us_counties.geo'}]
vis = Map(data=merged, geo_data=geo_data, scale=1100, projection='albersUsa',
data_bind='Employed_2011', data_key='FIPS',
map_key={'counties': 'properties.FIPS'})
vis.marks[0].properties.enter.stroke_opacity = ValueRef(value=0.5)
#Change our domain for an even inteager
vis.scales['color'].domain = [0, 189000]
vis.legend(title='Number Employed 2011')
vis.to_json('vega.json')
#Lets look at different stats
vis.rebind(column='Civilian_labor_force_2011', brew='BuPu')
vis.to_json('vega.json')
vis.rebind(column='Unemployed_2011', brew='PuBu')
vis.to_json('vega.json')
vis.rebind(column='Unemployment_rate_2011', brew='YlGnBu')
vis.to_json('vega.json')
vis.rebind(column='Median_Household_Income_2011', brew='RdPu')
vis.to_json('vega.json')
#Mapping US State Level Data
state_data = pd.read_csv('data/US_Unemployment_Oct2012.csv')
geo_data = [{'name': 'states',
'url': state_topo,
'feature': 'us_states.geo'}]
vis = Map(data=state_data, geo_data=geo_data, scale=1000,
projection='albersUsa', data_bind='Unemployment', data_key='NAME',
map_key={'states': 'properties.NAME'})
vis.legend(title='Unemployment (%)')
vis.to_json('vega.json')
#Iterating State Level Data
yoy = pd.read_table('data/State_Unemp_YoY.txt', delim_whitespace=True)
#Standardize State names to match TopoJSON for keying
names = []
for row in yoy.iterrows():
pieces = row[1]['NAME'].split('_')
together = ' '.join(pieces)
names.append(together.title())
yoy['NAME'] = names
geo_data = [{'name': 'states',
'url': state_topo,
'feature': 'us_states.geo'}]
vis = Map(data=yoy, geo_data=geo_data, scale=1000,
projection='albersUsa', data_bind='AUG_2012', data_key='NAME',
map_key={'states': 'properties.NAME'}, brew='YlGnBu')
#Custom threshold scale
vis.scales[0].type='threshold'
vis.scales[0].domain = [0, 2, 4, 6, 8, 10, 12]
vis.legend(title='Unemployment (%)')
vis.to_json('vega.json')
#Rebind and set our scale again
vis.rebind(column='AUG_2013', brew='YlGnBu')
vis.scales[0].type='threshold'
vis.scales[0].domain = [0, 2, 4, 6, 8, 10, 12]
vis.to_json('vega.json')
vis.rebind(column='CHANGE', brew='YlGnBu')
vis.scales[0].type='threshold'
vis.scales[0].domain = [-1.5, -1.3, -1.1, 0, 0.1, 0.3, 0.5, 0.8]
vis.legends[0].title = "YoY Change in Unemployment (%)"
vis.to_json('vega.json')
#Oregon County-level population data
or_data = pd.read_table('data/OR_County_Data.txt', delim_whitespace=True)
or_data['July_2012_Pop']= or_data['July_2012_Pop'].astype(int)
#Standardize keys
with open('or_counties.topo.json', 'r') as f:
counties = json.load(f)
def split_county(name):
parts = name.split(' ')
parts.pop(-1)
return ''.join(parts).upper()
#A little FIPS code munging
new_geoms = []
for geom in counties['objects']['or_counties.geo']['geometries']:
geom['properties']['COUNTY'] = split_county(geom['properties']['COUNTY'])
new_geoms.append(geom)
counties['objects']['or_counties.geo']['geometries'] = new_geoms
with open('or_counties.topo.json', 'w') as f:
json.dump(counties, f)
geo_data = [{'name': 'states',
'url': state_topo,
'feature': 'us_states.geo'},
{'name': 'or_counties',
'url': or_topo,
'feature': 'or_counties.geo'}]
vis = Map(data=or_data, geo_data=geo_data, scale=3700,
translate=[1480, 830],
projection='albersUsa', data_bind='July_2012_Pop', data_key='NAME',
map_key={'or_counties': 'properties.COUNTY'})
vis.marks[0].properties.update.fill.value = '#c2c2c2'
vis.to_json('vega.json')
| gpl-2.0 |
maahn/pyOptimalEstimation | pyOptimalEstimation/tests/test_pyoe.py | 1 | 7099 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pn
import scipy
from copy import deepcopy
import pyOptimalEstimation as pyOE
def forward_simple(X):
z = np.linspace(1, 99, 50)
N = X['N']
R = X['R']
W = X['W']
F = N * np.exp(-((z-R)/W)**2)
return F
class TestFullRetrieval(object):
def test_simple(self):
x_vars = ['N', 'R', 'W']
x_truth = pn.Series([300., 60., 10.], index=x_vars)
x_a = pn.Series([200., 50., 15.], index=x_vars)
x_cov = pn.DataFrame([[200.**2, 0., 0.],
[0., 50.**2, 0.],
[0., 0., 5.**2]],
index=x_vars,
columns=x_vars,
)
y_vars = ['z%02i' % i for i in range(50)]
y_cov = pn.DataFrame(np.identity(50) * 100**2,
index=y_vars,
columns=y_vars)
np.random.seed(1)
y_obs = forward_simple(x_truth) + \
np.random.normal(loc=0, scale=100, size=50)
oe = pyOE.optimalEstimation(
x_vars,
x_a,
x_cov,
y_vars,
y_obs,
y_cov,
forward_simple,
forwardKwArgs={},
x_truth=x_truth,
)
oe.doRetrieval()
chi2passed, chi2value, chi2critical = oe.chiSquareTest()
linearity, trueLinearityChi2, trueLinearityChi2Critical = oe.linearityTest()
print('np', np.__version__)
print('pn', pn.__version__)
print('pyOE', pyOE.__version__)
print('scipy', scipy.__version__)
assert np.all(np.isclose(oe.x_op.values, np.array(
[230.75639479, 58.49351178, 12.32118448])))
assert np.all(np.isclose(oe.x_op_err.values, np.array(
[42.940902, 2.05667214, 2.4442318])))
assert np.all(
np.isclose(
oe.y_op.values,
np.array(
[8.07132255e-08, 3.57601384e-07,
1.50303019e-06, 5.99308237e-06,
2.26697545e-05, 8.13499728e-05,
2.76937671e-04, 8.94377132e-04,
2.74014386e-03, 7.96416170e-03,
2.19594168e-02, 5.74401494e-02,
1.42535928e-01, 3.35542210e-01,
7.49348775e-01, 1.58757726e+00,
3.19080133e+00, 6.08385266e+00,
1.10045335e+01, 1.88833313e+01,
3.07396995e+01, 4.74716852e+01,
6.95478498e+01, 9.66600005e+01,
1.27445324e+02, 1.59409810e+02,
1.89156040e+02, 2.12931252e+02,
2.27390663e+02, 2.30366793e+02,
2.21401803e+02, 2.01862876e+02,
1.74600622e+02, 1.43267982e+02,
1.11523535e+02, 8.23565110e+01,
5.76956909e+01, 3.83444793e+01,
2.41755486e+01, 1.44598524e+01,
8.20475101e+00, 4.41652793e+00,
2.25533253e+00, 1.09258243e+00,
5.02125028e-01, 2.18919049e-01,
9.05459987e-02, 3.55278561e-02,
1.32246067e-02, 4.66993202e-03]
)
)
)
assert np.isclose(oe.dgf, (2.7132392503933556))
assert np.isclose(oe.trueLinearity, 0.41529831393972894)
assert np.all(np.array(oe.linearity) < 1)
assert np.all(chi2passed)
def test_simple_withB(self):
x_vars = ['N', 'R']
b_vars = ['W']
x_truth = pn.Series([300., 60.], index=x_vars)
x_a = pn.Series([200., 50.], index=x_vars)
x_cov = pn.DataFrame([[200.**2, 0.],
[0., 50.**2]],
index=x_vars,
columns=x_vars,
)
b_vars = ['W']
b_param = pn.Series(15, index=b_vars)
b_cov = pn.DataFrame([[5**2]], index=b_vars, columns=b_vars)
y_vars = ['z%02i' % i for i in range(50)]
y_cov = pn.DataFrame(np.identity(50) * 100**2,
index=y_vars,
columns=y_vars)
np.random.seed(1)
y_obs = forward_simple(pn.concat((x_truth, b_param))) + \
np.random.normal(loc=0, scale=100, size=50)
oe = pyOE.optimalEstimation(
x_vars,
x_a,
x_cov,
y_vars,
y_obs,
y_cov,
forward_simple,
forwardKwArgs={},
x_truth=x_truth,
b_vars=b_vars,
b_p=b_param,
S_b=b_cov,
)
oe.doRetrieval()
chi2passed, chi2value, chi2critical = oe.chiSquareTest()
linearity, trueLinearityChi2, trueLinearityChi2Critical = oe.linearityTest()
print('np', np.__version__)
print('pn', pn.__version__)
print('pyOE', pyOE.__version__)
print('scipy', scipy.__version__)
assert np.all(np.isclose(oe.x_op.values, np.array(
[255.30992222, 58.68130862])))
assert np.all(np.isclose(oe.x_op_err.values, np.array(
[38.59979542, 2.0071929])))
assert np.all(
np.isclose(
oe.y_op.values,
np.array([9.66145500e-05, 2.64647052e-04,
6.99600329e-04, 1.78480744e-03,
4.39431462e-03, 1.04411743e-02,
2.39423053e-02, 5.29835436e-02,
1.13155184e-01, 2.33220288e-01,
4.63891718e-01, 8.90482377e-01,
1.64965245e+00, 2.94929357e+00,
5.08864283e+00, 8.47313942e+00,
1.36158624e+01, 2.11156461e+01,
3.16025417e+01, 4.56455107e+01,
6.36256959e+01, 8.55904756e+01,
1.11116039e+02, 1.39215145e+02,
1.68327329e+02, 1.96417962e+02,
2.21190355e+02, 2.40386232e+02,
2.52122389e+02, 2.55194702e+02,
2.49281652e+02, 2.34999746e+02,
2.13797631e+02, 1.87714060e+02,
1.59055663e+02, 1.30064833e+02,
1.02642934e+02, 7.81729764e+01,
5.74569615e+01, 4.07555802e+01,
2.78990827e+01, 1.84310971e+01,
1.17508929e+01, 7.23017763e+00,
4.29324314e+00, 2.46025669e+00,
1.36061037e+00, 7.26182114e-01,
3.74038011e-01, 1.85927808e-01])
)
)
assert np.isclose(oe.dgf, 1.9611398655015124)
assert np.isclose(oe.trueLinearity, 0.039634853402863594)
assert np.all(np.array(oe.linearity) < 1)
assert np.all(chi2passed)
| gpl-3.0 |
hpi-xnor/BMXNet | example/kaggle-ndsb1/gen_img_list.py | 42 | 7000 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import csv
import os
import sys
import random
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='generate train/test image list files form input directory. If training it will also split into tr and va sets.')
parser.add_argument('--image-folder', type=str, default="data/train/",
help='the input data directory')
parser.add_argument('--out-folder', type=str, default="data/",
help='the output folder')
parser.add_argument('--out-file', type=str, default="train.lst",
help='the output lst file')
parser.add_argument('--train', action='store_true',
help='if we are generating training list and hence we have to loop over subdirectories')
## These options are only used if we are doing training lst
parser.add_argument('--percent-val', type=float, default=0.25,
help='the percentage of training list to use as validation')
parser.add_argument('--stratified', action='store_true',
help='if True it will split train lst into tr and va sets using stratified sampling')
args = parser.parse_args()
random.seed(888)
fo_name=os.path.join(args.out_folder+args.out_file)
fo = csv.writer(open(fo_name, "w"), delimiter='\t', lineterminator='\n')
if args.train:
tr_fo_name=os.path.join(args.out_folder+"tr.lst")
va_fo_name=os.path.join(args.out_folder+"va.lst")
tr_fo = csv.writer(open(tr_fo_name, "w"), delimiter='\t', lineterminator='\n')
va_fo = csv.writer(open(va_fo_name, "w"), delimiter='\t', lineterminator='\n')
#check sampleSubmission.csv from kaggle website to view submission format
head = "acantharia_protist_big_center,acantharia_protist_halo,acantharia_protist,amphipods,appendicularian_fritillaridae,appendicularian_s_shape,appendicularian_slight_curve,appendicularian_straight,artifacts_edge,artifacts,chaetognath_non_sagitta,chaetognath_other,chaetognath_sagitta,chordate_type1,copepod_calanoid_eggs,copepod_calanoid_eucalanus,copepod_calanoid_flatheads,copepod_calanoid_frillyAntennae,copepod_calanoid_large_side_antennatucked,copepod_calanoid_large,copepod_calanoid_octomoms,copepod_calanoid_small_longantennae,copepod_calanoid,copepod_cyclopoid_copilia,copepod_cyclopoid_oithona_eggs,copepod_cyclopoid_oithona,copepod_other,crustacean_other,ctenophore_cestid,ctenophore_cydippid_no_tentacles,ctenophore_cydippid_tentacles,ctenophore_lobate,decapods,detritus_blob,detritus_filamentous,detritus_other,diatom_chain_string,diatom_chain_tube,echinoderm_larva_pluteus_brittlestar,echinoderm_larva_pluteus_early,echinoderm_larva_pluteus_typeC,echinoderm_larva_pluteus_urchin,echinoderm_larva_seastar_bipinnaria,echinoderm_larva_seastar_brachiolaria,echinoderm_seacucumber_auricularia_larva,echinopluteus,ephyra,euphausiids_young,euphausiids,fecal_pellet,fish_larvae_deep_body,fish_larvae_leptocephali,fish_larvae_medium_body,fish_larvae_myctophids,fish_larvae_thin_body,fish_larvae_very_thin_body,heteropod,hydromedusae_aglaura,hydromedusae_bell_and_tentacles,hydromedusae_h15,hydromedusae_haliscera_small_sideview,hydromedusae_haliscera,hydromedusae_liriope,hydromedusae_narco_dark,hydromedusae_narco_young,hydromedusae_narcomedusae,hydromedusae_other,hydromedusae_partial_dark,hydromedusae_shapeA_sideview_small,hydromedusae_shapeA,hydromedusae_shapeB,hydromedusae_sideview_big,hydromedusae_solmaris,hydromedusae_solmundella,hydromedusae_typeD_bell_and_tentacles,hydromedusae_typeD,hydromedusae_typeE,hydromedusae_typeF,invertebrate_larvae_other_A,invertebrate_larvae_other_B,jellies_tentacles,polychaete,protist_dark_center,protist_fuzzy_olive,protist_noctiluca,protist_other,protist_star,pteropod_butterfly,pteropod_theco_dev_seq,pteropod_triangle,radiolarian_chain,radiolarian_colony,shrimp_caridean,shrimp_sergestidae,shrimp_zoea,shrimp-like_other,siphonophore_calycophoran_abylidae,siphonophore_calycophoran_rocketship_adult,siphonophore_calycophoran_rocketship_young,siphonophore_calycophoran_sphaeronectes_stem,siphonophore_calycophoran_sphaeronectes_young,siphonophore_calycophoran_sphaeronectes,siphonophore_other_parts,siphonophore_partial,siphonophore_physonect_young,siphonophore_physonect,stomatopod,tornaria_acorn_worm_larvae,trichodesmium_bowtie,trichodesmium_multiple,trichodesmium_puff,trichodesmium_tuft,trochophore_larvae,tunicate_doliolid_nurse,tunicate_doliolid,tunicate_partial,tunicate_salp_chains,tunicate_salp,unknown_blobs_and_smudges,unknown_sticks,unknown_unclassified".split(',')
# make image list
img_lst = []
cnt = 0
if args.train:
for i in xrange(len(head)):
path = args.image_folder + head[i]
lst = os.listdir(args.image_folder + head[i])
for img in lst:
img_lst.append((cnt, i, path + '/' + img))
cnt += 1
else:
lst = os.listdir(args.image_folder)
for img in lst:
img_lst.append((cnt, 0, args.image_folder + img))
cnt += 1
# shuffle
random.shuffle(img_lst)
#write
for item in img_lst:
fo.writerow(item)
## If training, split into train and validation lists (tr.lst and va.lst)
## Optional stratified sampling
if args.train:
img_lst=np.array(img_lst)
if args.stratified:
from sklearn.cross_validation import StratifiedShuffleSplit
## Stratified sampling to generate train and validation sets
labels_train=img_lst[:,1]
# unique_train, counts_train = np.unique(labels_train, return_counts=True) # To have a look at the frecuency distribution
sss = StratifiedShuffleSplit(labels_train, 1, test_size=args.percent_val, random_state=0)
for tr_idx, va_idx in sss:
print("Train subset has ", len(tr_idx), " cases. Validation subset has ", len(va_idx), "cases")
else:
(nRows, nCols) = img_lst.shape
splitat=int(round(nRows*(1-args.percent_val),0))
tr_idx=range(0,splitat)
va_idx=range(splitat,nRows)
print("Train subset has ", len(tr_idx), " cases. Validation subset has ", len(va_idx), "cases")
tr_lst=img_lst[tr_idx,:].tolist()
va_lst=img_lst[va_idx,:].tolist()
for item in tr_lst:
tr_fo.writerow(item)
for item in va_lst:
va_fo.writerow(item)
| apache-2.0 |
dmargala/lyabao | bin/analysis_prep.py | 1 | 5168 | #!/usr/bin/env python
import argparse
import numpy as np
import numpy.ma as ma
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import h5py
# def sum_chunk(x, chunk_size, axis=-1):
# shape = x.shape
# if axis < 0:
# axis += x.ndim
# shape = shape[:axis] + (-1, chunk_size) + shape[axis+1:]
# x = x.reshape(shape)
# return x.sum(axis=axis+1)
def combine_pixels(loglam, flux, ivar, num_combine, trim_front=True):
'''
Combines neighboring pixels of inner most axis using ivar weighted average
'''
shape = flux.shape
num_pixels = flux.shape[-1]
assert len(loglam) == num_pixels
ndim = flux.ndim
new_shape = shape[:ndim-1] + (-1, num_combine)
num_leftover = num_pixels % num_combine
s = slice(num_leftover, None) if trim_front else slice(0, -num_leftover)
flux = flux[..., s].reshape(new_shape)
ivar = ivar[..., s].reshape(new_shape)
loglam = loglam[s].reshape(-1, num_combine)
flux, ivar = ma.average(flux, weights=ivar, axis=ndim, returned=True)
loglam = ma.average(loglam, axis=1)
return loglam, flux, ivar
def main():
# parse command-line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
## targets to fit
parser.add_argument('--name', type=str, default=None,
help='file name base')
parser.add_argument('--num-combine', type=int, default=3,
help='number of pixels to combine')
parser.add_argument('--wave-min', type=float, default=3600,
help='minimum observed wavelength')
args = parser.parse_args()
# import data
skim = h5py.File(args.name + '-skim.hdf5', 'r')
skim_norm = skim['norm'][:][:,np.newaxis]
assert not np.any(skim_norm <= 0)
skim_flux = np.ma.MaskedArray(
skim['flux'][:], mask=skim['mask'][:]
)/skim_norm
skim_ivar = np.ma.MaskedArray(
skim['ivar'][:], mask=skim['mask'][:]
)*skim_norm*skim_norm
skim_loglam = skim['loglam'][:]
skim_wave = np.power(10.0, skim_loglam)
good_waves = skim_wave > args.wave_min
print('Combining input pixels...')
loglam, flux, ivar = combine_pixels(
skim_loglam[good_waves],
skim_flux[:, good_waves],
skim_ivar[:, good_waves],
args.num_combine
)
wave = np.power(10.0, loglam)
outfile = h5py.File(args.name + '-cskim.hdf5', 'w')
dataset_kwargs = {
'compression': 'gzip'
}
# save pixel flux, ivar, and mask
outfile.create_dataset('flux', data=flux.data, **dataset_kwargs)
outfile.create_dataset('ivar', data=ivar.data, **dataset_kwargs)
outfile.create_dataset('mask', data=ivar.mask, **dataset_kwargs)
# save uniform wavelength grid
outfile.create_dataset('loglam', data=loglam, **dataset_kwargs)
# save redshifts from input target list
outfile.copy(skim['z'], 'z')
# save additional quantities
outfile.copy(skim['norm'], 'norm')
# save meta data
outfile.copy(skim['meta'], 'meta')
# copy attrs
for attr_key in skim.attrs:
outfile.attrs[attr_key] = skim.attrs[attr_key]
outfile.attrs['coeff0'] = loglam[0]
outfile.attrs['coeff1'] = args.num_combine * skim.attrs['coeff1']
outfile.attrs['max_fid_index'] = len(loglam)
outfile.attrs['wave_min'] = args.wave_min
outfile.close()
# verify combined pixels
print('Computing mean and variance of input pixels...')
skim_flux_mean = np.ma.average(skim_flux, axis=0, weights=skim_ivar)
skim_flux_var = np.ma.average(
(skim_flux - skim_flux_mean)**2, axis=0, weights=skim_ivar)
print('Computing mean and variance of combined pixels...')
flux_mean = np.ma.average(flux, axis=0, weights=ivar)
flux_var = np.ma.average((flux - flux_mean)**2, axis=0, weights=ivar)
savefig_kwargs = {
'dpi': 100,
'bbox_inches': 'tight'
}
print('Making comparison plots...')
plt.figure(figsize=(12, 9))
plt.plot(skim_wave, skim_flux_mean, label='Pipeline pixels')
plt.plot(wave, flux_mean, label='Analysis pixels')
plt.ylim(0.5, 1.5)
plt.ylabel(r'Normalized Flux Mean (arb. units)')
plt.xlabel(r'Observed Wavelength ($\AA$)')
plt.legend()
plt.grid(True)
plt.savefig(args.name + '-flux-mean.png', **savefig_kwargs)
plt.close()
plt.figure(figsize=(12, 9))
plt.plot(skim_wave, skim_flux_var, label='Pipeline pixels')
plt.plot(wave, flux_var, label='Analysis pixels')
plt.ylim(0, 0.45)
plt.ylabel(r'Normalized Flux Variance (arb. units)')
plt.xlabel(r'Observed Wavelength ($\AA$)')
plt.legend()
plt.grid(True)
plt.savefig(args.name + '-flux-var.png', **savefig_kwargs)
plt.close()
plt.figure(figsize=(12, 9))
plt.plot(skim_wave, np.sum(skim_ivar, axis=0), label='Pipeline pixels')
plt.plot(wave, np.sum(ivar, axis=0), label='Analysis pixels')
plt.ylabel(r'Inv. Var. Total (arb. units)')
plt.xlabel(r'Observed Wavelength ($\AA$)')
plt.legend()
plt.grid(True)
plt.savefig(args.name + '-ivar-total.png', **savefig_kwargs)
plt.close()
if __name__ == '__main__':
main()
| mit |
Srisai85/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
bsipocz/statsmodels | statsmodels/tools/data.py | 23 | 3369 | """
Compatibility tools for various data structure inputs
"""
from statsmodels.compat.python import range
import numpy as np
import pandas as pd
def _check_period_index(x, freq="M"):
from pandas import PeriodIndex, DatetimeIndex
if not isinstance(x.index, (DatetimeIndex, PeriodIndex)):
raise ValueError("The index must be a DatetimeIndex or PeriodIndex")
from statsmodels.tsa.base.datetools import _infer_freq
inferred_freq = _infer_freq(x.index)
if not inferred_freq.startswith(freq):
raise ValueError("Expected frequency {}. Got {}".format(inferred_freq,
freq))
def is_data_frame(obj):
return isinstance(obj, pd.DataFrame)
def is_design_matrix(obj):
from patsy import DesignMatrix
return isinstance(obj, DesignMatrix)
def _is_structured_ndarray(obj):
return isinstance(obj, np.ndarray) and obj.dtype.names is not None
def interpret_data(data, colnames=None, rownames=None):
"""
Convert passed data structure to form required by estimation classes
Parameters
----------
data : ndarray-like
colnames : sequence or None
May be part of data structure
rownames : sequence or None
Returns
-------
(values, colnames, rownames) : (homogeneous ndarray, list)
"""
if isinstance(data, np.ndarray):
if _is_structured_ndarray(data):
if colnames is None:
colnames = data.dtype.names
values = struct_to_ndarray(data)
else:
values = data
if colnames is None:
colnames = ['Y_%d' % i for i in range(values.shape[1])]
elif is_data_frame(data):
# XXX: hack
data = data.dropna()
values = data.values
colnames = data.columns
rownames = data.index
else: # pragma: no cover
raise Exception('cannot handle other input types at the moment')
if not isinstance(colnames, list):
colnames = list(colnames)
# sanity check
if len(colnames) != values.shape[1]:
raise ValueError('length of colnames does not match number '
'of columns in data')
if rownames is not None and len(rownames) != len(values):
raise ValueError('length of rownames does not match number '
'of rows in data')
return values, colnames, rownames
def struct_to_ndarray(arr):
return arr.view((float, len(arr.dtype.names)))
def _is_using_ndarray_type(endog, exog):
return (type(endog) is np.ndarray and
(type(exog) is np.ndarray or exog is None))
def _is_using_ndarray(endog, exog):
return (isinstance(endog, np.ndarray) and
(isinstance(exog, np.ndarray) or exog is None))
def _is_using_pandas(endog, exog):
klasses = (pd.Series, pd.DataFrame, pd.WidePanel)
return (isinstance(endog, klasses) or isinstance(exog, klasses))
def _is_array_like(endog, exog):
try: # do it like this in case of mixed types, ie., ndarray and list
endog = np.asarray(endog)
exog = np.asarray(exog)
return True
except:
return False
def _is_using_patsy(endog, exog):
# we get this when a structured array is passed through a formula
return (is_design_matrix(endog) and
(is_design_matrix(exog) or exog is None))
| bsd-3-clause |
backmari/moose | python/peacock/tests/postprocessor_tab/test_VectorPostprocessorViewer.py | 1 | 4296 | #!/usr/bin/env python
import sys
import os
import unittest
import shutil
import glob
from PyQt5 import QtCore, QtWidgets
from peacock.PostprocessorViewer.VectorPostprocessorViewer import VectorPostprocessorViewer
from peacock.utils import Testing
class TestVectorPostprocessorViewer(Testing.PeacockImageTestCase):
"""
Test class for the ArtistToggleWidget which toggles postprocessor lines.
"""
#: QApplication: The main App for QT, this must be static to work correctly.
qapp = QtWidgets.QApplication(sys.argv)
def setUp(self):
"""
Creates the GUI containing the ArtistGroupWidget and the matplotlib figure axes.
"""
self._filename = "{}_test_*.csv".format(self.__class__.__name__)
self._widget = VectorPostprocessorViewer(timeout=None)
self._widget.initialize([self._filename])
def copyfiles(self):
"""
Copy to temprary location.
"""
for i in [0, 1, 3, 5, 7, 9]:
shutil.copyfile('../input/vpp2_000{}.csv'.format(i), "{}_test_000{}.csv".format(self.__class__.__name__, i))
for data in self._widget._data[0]:
data.load()
def tearDown(self):
"""
Remove temporary.
"""
for filename in glob.glob(self._filename):
if os.path.exists(filename):
os.remove(filename)
def write(self, filename):
"""
Overload the write method.
"""
self._widget.currentWidget().OutputPlugin.write.emit(filename)
def plot(self):
"""
Create plot with all widgets modified.
"""
widget = self._widget.currentWidget()
# Plot some data
toggle = widget.PostprocessorSelectPlugin._groups[0]._toggles['t*x**2']
toggle.CheckBox.setCheckState(QtCore.Qt.Checked)
toggle.PlotAxis.setCurrentIndex(1)
toggle.LineStyle.setCurrentIndex(1)
toggle.LineWidth.setValue(5)
toggle.clicked.emit()
# Add title and legend
ax = widget.AxesSettingsPlugin
ax.Title.setText('Data Plot')
ax.Title.editingFinished.emit()
ax.Legend2.setCheckState(QtCore.Qt.Checked)
ax.Legend2.clicked.emit(True)
ax.Legend2Location.setCurrentIndex(4)
ax.Legend2Location.currentIndexChanged.emit(4)
ax.onAxesModified()
# Set limits and axis titles (y2-only)
ax = widget.AxisTabsPlugin.Y2AxisTab
ax.Label.setText('y2y2y2y2y2y2y2y2y2y2y2y2y2')
ax.Label.editingFinished.emit()
ax.RangeMinimum.setText('1')
ax.RangeMinimum.editingFinished.emit()
def testEmpty(self):
"""
Test that empty plot is working.
"""
self.assertImage('testEmpty.png')
def testWidgets(self):
"""
Test that the widgets contained in PostprocessorPlotWidget are working.
"""
self.copyfiles()
self.plot()
self.assertImage('testWidgets.png')
self.assertFalse(self._widget.cornerWidget().CloseButton.isEnabled())
self.assertEqual(self._widget.tabText(self._widget.currentIndex()), 'Results')
def testCloneClose(self):
"""
Test clone button works.
"""
self.copyfiles()
self._widget.cornerWidget().clone.emit()
self.assertEqual(self._widget.count(), 2)
self.assertEqual(self._widget.tabText(self._widget.currentIndex()), 'Results (2)')
self.assertTrue(self._widget.cornerWidget().CloseButton.isEnabled())
self.assertImage('testEmpty.png')
# Plot something
self.plot()
self.assertImage('testWidgets.png')
# Switch to first tab
self._widget.setCurrentIndex(0)
self.assertEqual(self._widget.tabText(self._widget.currentIndex()), 'Results')
self.assertImage('testEmpty.png')
self.plot()
self.assertImage('testWidgets.png')
# Close the first tab
self._widget.cornerWidget().close.emit()
self.assertEqual(self._widget.count(), 1)
self.assertEqual(self._widget.tabText(self._widget.currentIndex()), 'Results (2)')
self.assertFalse(self._widget.cornerWidget().CloseButton.isEnabled())
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
| lgpl-2.1 |
qiime2/q2-types | q2_types/sample_data/tests/test_transformer.py | 1 | 3363 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import pandas as pd
import qiime2
from pandas.testing import assert_series_equal
from q2_types.sample_data import AlphaDiversityFormat
from qiime2.plugin.testing import TestPluginBase
class TestTransformers(TestPluginBase):
package = "q2_types.sample_data.tests"
def test_pd_series_to_alpha_diversity_format(self):
transformer = self.get_transformer(pd.Series, AlphaDiversityFormat)
exp_index = pd.Index(['Sample1', 'Sample4'], dtype=object)
exp = pd.Series([0.970950594455, 0.721928094887],
name='shannon', index=exp_index)
obs = transformer(exp)
# Squeeze equals true to return series instead of dataframe
obs = pd.read_csv(str(obs), sep='\t', header=0, index_col=0,
squeeze=True)
assert_series_equal(exp, obs)
def test_alpha_diversity_format_to_pd_series(self):
filename = 'alpha-diversity.tsv'
_, obs = self.transform_format(AlphaDiversityFormat, pd.Series,
filename)
exp_index = pd.Index(['Sample1', 'Sample4'], dtype=object)
exp = pd.Series([0.970950594455, 0.721928094887],
name='shannon', index=exp_index)
assert_series_equal(exp, obs)
def test_alpha_diversity_format_with_metadata_to_pd_series(self):
filename = 'alpha-diversity-with-metadata.tsv'
_, obs = self.transform_format(AlphaDiversityFormat, pd.Series,
filename)
exp_index = pd.Index(['Sample1', 'Sample4'], dtype=object)
exp = pd.Series([0.970950594455, 0.721928094887],
name='shannon', index=exp_index)
assert_series_equal(exp, obs)
def test_alpha_diversity_format_to_pd_series_int_indices(self):
filename = 'alpha-diversity-int-indices.tsv'
_, obs = self.transform_format(AlphaDiversityFormat, pd.Series,
filename)
exp_index = pd.Index(['1', '4'], dtype=object)
exp = pd.Series([0.97, 0.72], name='foo', index=exp_index)
assert_series_equal(exp, obs)
def test_alpha_diversity_format_to_metadata(self):
filename = 'alpha-diversity.tsv'
_, obs = self.transform_format(AlphaDiversityFormat, qiime2.Metadata,
filename)
exp_index = pd.Index(['Sample1', 'Sample4'], name='Sample ID',
dtype=object)
exp_df = pd.DataFrame([[0.9709505944546688], [0.7219280948873623]],
columns=['shannon'], index=exp_index)
exp_md = qiime2.Metadata(exp_df)
self.assertEqual(obs, exp_md)
def test_non_alpha_diversity(self):
filename = 'also-not-alpha-diversity.tsv'
with self.assertRaisesRegex(ValueError, 'Non-numeric values '):
self.transform_format(AlphaDiversityFormat, pd.Series, filename)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
mblondel/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
qifeigit/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
ycool/apollo | modules/tools/prediction/data_pipelines/junctionMLP_train.py | 3 | 4312 | #!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
@requirement:
tensorflow 1.11
"""
import os
import h5py
import logging
import argparse
import numpy as np
import tensorflow as tf
import proto.fnn_model_pb2
from proto.fnn_model_pb2 import FnnModel, Layer
from sklearn.model_selection import train_test_split
dim_input = 7 + 72
dim_output = 12
def load_data(filename):
"""
Load the data from h5 file to the format of numpy
"""
if not (os.path.exists(filename)):
logging.error("file: {}, does not exist".format(filename))
os._exit(1)
if os.path.splitext(filename)[1] != '.h5':
logging.error("file: {} is not an hdf5 file".format(filename))
os._exit(1)
samples = dict()
h5_file = h5py.File(filename, 'r')
for key in h5_file.keys():
samples[key] = h5_file[key][:]
print("load file success")
return samples['data']
def data_preprocessing(data):
X = data[:, :dim_input]
Y = data[:, -dim_output:]
return X, Y
def save_model(model, filename):
"""
Save the trained model parameters into protobuf binary format file
"""
net_params = FnnModel()
net_params.num_layer = 0
for layer in model.layers:
net_params.num_layer += 1
net_layer = net_params.layer.add()
config = layer.get_config()
net_layer.layer_input_dim = dim_input
net_layer.layer_output_dim = dim_output
if config['activation'] == 'relu':
net_layer.layer_activation_func = proto.fnn_model_pb2.Layer.RELU
elif config['activation'] == 'tanh':
net_layer.layer_activation_func = proto.fnn_model_pb2.Layer.TANH
elif config['activation'] == 'sigmoid':
net_layer.layer_activation_func = proto.fnn_model_pb2.Layer.SIGMOID
elif config['activation'] == 'softmax':
net_layer.layer_activation_func = proto.fnn_model_pb2.Layer.SOFTMAX
weights, bias = layer.get_weights()
net_layer.layer_bias.columns.extend(bias.reshape(-1).tolist())
for col in weights.tolist():
row = net_layer.layer_input_weight.rows.add()
row.columns.extend(col)
net_params.dim_input = dim_input
net_params.dim_output = dim_output
with open(filename, 'wb') as params_file:
params_file.write(net_params.SerializeToString())
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='train neural network based on feature files and save parameters')
parser.add_argument('filename', type=str, help='h5 file of data.')
args = parser.parse_args()
file = args.filename
# load_data
data = load_data(file)
print("Data load success, with data shape: " + str(data.shape))
train_data, test_data = train_test_split(data, test_size=0.2)
X_train, Y_train = data_preprocessing(train_data)
X_test, Y_test = data_preprocessing(test_data)
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(30, activation=tf.nn.relu),
tf.keras.layers.Dense(20, activation=tf.nn.relu),
tf.keras.layers.Dense(12, activation=tf.nn.softmax)])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
# loss='MSE',
metrics=['accuracy'])
model.fit(X_train, Y_train, epochs=5)
model_path = os.path.join(os.getcwd(), "junction_mlp_vehicle_model.bin")
save_model(model, model_path)
print("Model saved to: " + model_path)
score = model.evaluate(X_test, Y_test)
print("Testing accuracy is: " + str(score))
| apache-2.0 |
michaelpacer/scikit-image | doc/examples/plot_local_otsu.py | 14 | 1575 | """
====================
Local Otsu Threshold
====================
This example shows how Otsu's threshold [1]_ method can be applied locally. For
each pixel, an "optimal" threshold is determined by maximizing the variance
between two classes of pixels of the local neighborhood defined by a structuring
element.
The example compares the local threshold with the global threshold.
.. note: local is much slower than global thresholding
.. [1] http://en.wikipedia.org/wiki/Otsu's_method
"""
import matplotlib
import matplotlib.pyplot as plt
from skimage import data
from skimage.morphology import disk
from skimage.filters import threshold_otsu, rank
from skimage.util import img_as_ubyte
matplotlib.rcParams['font.size'] = 9
img = img_as_ubyte(data.page())
radius = 15
selem = disk(radius)
local_otsu = rank.otsu(img, selem)
threshold_global_otsu = threshold_otsu(img)
global_otsu = img >= threshold_global_otsu
fig, ax = plt.subplots(2, 2, figsize=(8, 5))
ax1, ax2, ax3, ax4 = ax.ravel()
fig.colorbar(ax1.imshow(img, cmap=plt.cm.gray),
ax=ax1, orientation='horizontal')
ax1.set_title('Original')
ax1.axis('off')
fig.colorbar(ax2.imshow(local_otsu, cmap=plt.cm.gray),
ax=ax2, orientation='horizontal')
ax2.set_title('Local Otsu (radius=%d)' % radius)
ax2.axis('off')
ax3.imshow(img >= local_otsu, cmap=plt.cm.gray)
ax3.set_title('Original >= Local Otsu' % threshold_global_otsu)
ax3.axis('off')
ax4.imshow(global_otsu, cmap=plt.cm.gray)
ax4.set_title('Global Otsu (threshold = %d)' % threshold_global_otsu)
ax4.axis('off')
plt.show()
| bsd-3-clause |
binghongcha08/pyQMD | GWP/2D/1.0.4/resample/c.py | 28 | 1767 | ##!/usr/bin/python
import numpy as np
import pylab as plt
import seaborn as sns
sns.set_context('poster')
#with open("traj.dat") as f:
# data = f.read()
#
# data = data.split('\n')
#
# x = [row.split(' ')[0] for row in data]
# y = [row.split(' ')[1] for row in data]
#
# fig = plt.figure()
#
# ax1 = fig.add_subplot(111)
#
# ax1.set_title("Plot title...")
# ax1.set_xlabel('your x label..')
# ax1.set_ylabel('your y label...')
#
# ax1.plot(x,y, c='r', label='the data')
#
# leg = ax1.legend()
#fig = plt.figure()
f, (ax1, ax2) = plt.subplots(2, sharex=True)
#f.subplots_adjust(hspace=0.1)
#plt.subplot(211)
ax1.set_ylim(0,4)
data = np.genfromtxt(fname='q.dat')
#data = np.loadtxt('traj.dat')
for x in range(1,data.shape[1]):
ax1.plot(data[:,0],data[:,x], linewidth=1)
#plt.figure(1)
#plt.plot(x,y1,'-')
#plt.plot(x,y2,'g-')
#plt.xlabel('time')
ax1.set_ylabel('position [bohr]')
#plt.title('traj')
#plt.subplot(212)
data = np.genfromtxt(fname='c.dat')
#data = np.loadtxt('traj.dat')
for x in range(1,16):
ax2.plot(data[:,0],data[:,x], linewidth=1)
ax2.set_xlabel('time [a.u.]')
ax2.set_ylabel('$|c_i|$')
#plt.ylim(-0.2,5)
#plt.subplot(2,2,3)
#data = np.genfromtxt(fname='norm')
#plt.plot(data[:,0],data[:,1],'r-',linewidth=2)
#plt.ylim(0,2)
#plt.subplot(2,2,4)
#data = np.genfromtxt(fname='wf.dat')
#data1 = np.genfromtxt(fname='wf0.dat')
#data0 = np.genfromtxt('../spo_1d/t500')
#plt.plot(data[:,0],data[:,1],'r--',linewidth=2)
#plt.plot(data0[:,0],data0[:,1],'k-',linewidth=2)
#plt.plot(data1[:,0],data1[:,1],'k-.',linewidth=2)
#plt.title('t=100')
#plt.figure(1)
#plt.plot(x,y1,'-')
#plt.plot(x,y2,'g-')
#plt.xlim(0.8,2.1)
#plt.xlabel('x')
#plt.ylabel('$\psi^*\psi$')
plt.savefig('traj.pdf')
plt.show()
| gpl-3.0 |
RabadanLab/Pandora | helpers/makeHTML.py | 1 | 26512 | import pandas as pd;
def generateHTML(reportLoc, namesLoc, outputLoc):
"""
generateHTML: generate HTML report from report.taxon:
reportLoc - location of report.taxon
namesLoc - location of names.dmp file containing taxid to name
outputLoc - location of file output folder
"""
#reportLoc = "report.taxon.txt"
#namesLoc = "/names.dmp"
#outputLoc = "."
#reportLoc = "~/report.taxon.txt"
#namesLoc = "~/Downloads/taxdump/names.dmp"
#load taxon report
df = pd.read_table(reportLoc)
df.applymap(str)
df.columns = ['name','taxID','num_of_read','num_of_contigs','longest_contig','len_longest_contig','RPMH']
sampleID = str(df['name'][0])
####
#load canvasJS
# with open(canvasLoc,'r') as canvasjs:
# canvasString = canvasjs.read()
#
# #load jquery
# with open(jqueryLoc,'r') as jqueryjs:
# jqueryString = jqueryjs.read()
#
# #load DataTables
# with open(datatablesLoc,'r') as datatables:
# datatablesString = datatables.read()
#load taxid to name
nameDump = pd.read_table(namesLoc,sep="|",header=None)
nameDump[1] = nameDump[1].str.strip()
nameDump[2] = nameDump[2].str.strip()
nameDump[3] = nameDump[3].str.strip()
nameDump = nameDump.applymap(str)
#generate canvasJS string for donut chart
pd.options.mode.chained_assignment = None
#donutString = ""
df = df.applymap(str)
for i in range(0,len(df.index)):
df['taxID'][i] = df['taxID'][i].split(";")[0]
tempname = nameDump[(nameDump[0] == df['taxID'][i]) & (nameDump[3] == 'scientific name')][[1]]
if(not tempname.empty):
df['name'][i] = tempname.to_string(header=False,index=False)
#temp = "{y:"+str(df['RPMH'][i])+",label:'"+df['name'][i].replace("'","")+" -'},"
#df['taxID'][i] = '<a href="https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=Info&id='+ str(df['taxID'][i]) +'">'+str(df['taxID'][i])+'</a>'
#donutString=donutString+temp
#donutString=donutString.rstrip(',')
#workaround of column width to properly display taxid lnk
#old_width = pd.get_option('display.max_colwidth')
#pd.set_option('display.max_colwidth', -1)
#tableString = df.to_html(index=False).replace("<","<").replace(">",">").replace('<table border="1" class="dataframe">','<table id="table1" border="1" class="dataframe">')
#pd.set_option('display.max_colwidth', old_width)
#generate output including JS packages as string in header
htmloutput = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<script src="https://d3js.org/d3.v4.min.js"></script>
</head>
<body>
<div id="wrapper">
<h1 style="margin-left:10%; font-size: 40px">Pandora Report</h1>
<div id="chart"></div>
<div id="table"></div>
</div>
<script>
var chartDiv = document.getElementById("chart");
var width = chartDiv.clientWidth -20;
var height = chartDiv.clientHeight -20;
var x = d3.scaleLinear().range([0,width]).domain([0,1000])
var y = d3.scaleLinear().range([0,height]).domain([0,1000])
var scale = d3.scaleLinear().range([0,width]).domain([0,1000])
function donutChart() {
var width,
height,
margin = {top: 10, right: 10, bottom: 10, left: 10},
colour = d3.scaleOrdinal(d3.schemeCategory20c), // colour scheme
variable, // value in data that will dictate proportions on chart
category, // compare data by
padAngle, // effectively dictates the gap between slices
floatFormat = d3.format('.4r'),
cornerRadius, // sets how rounded the corners are on each slice
percentFormat = d3.format(',.2%');
function chart(selection){
selection.each(function(data) {
// generate chart
// ===========================================================================================
// Set up constructors for making donut. See https://github.com/d3/d3-shape/blob/master/README.md
var radius = Math.min(width, height) / 2;
// creates a new pie generator
var pie = d3.pie()
.value(function(d) { return floatFormat(d[variable]); })
.sort(null)
// contructs and arc generator. This will be used for the donut. The difference between outer and inner
// radius will dictate the thickness of the donut
var arc = d3.arc()
.outerRadius(radius * 0.8)
.innerRadius(radius * 0.5)
.cornerRadius(cornerRadius)
.padAngle(padAngle);
var bigArc = d3.arc()
.outerRadius(radius * 0.85)
.innerRadius(radius * 0.5)
.cornerRadius(cornerRadius)
.padAngle(padAngle);
// this arc is used for aligning the text labels
var outerArc = d3.arc()
.outerRadius(radius * 0.9)
.innerRadius(radius * 0.9);
// ===========================================================================================
// ===========================================================================================
// append the svg object to the selection
var svg = selection.append('svg')
.attr('width', width + margin.left + margin.right)
.attr('height', height + margin.top + margin.bottom)
.append('g')
.attr('transform', 'translate(' + width / 2 + ',' + height / 2 + ')');
// ===========================================================================================
// ===========================================================================================
// g elements to keep elements within svg modular
svg.append('g').attr('class', 'slices');
svg.append('g').attr('class', 'labelName');
svg.append('g').attr('class', 'lines');
// ===========================================================================================
// ===========================================================================================
// add and colour the donut slices
var path = svg.select('.slices')
.datum(data).selectAll('path')
.data(pie)
.enter().append('path')
.attr('fill', function(d) { return colour(d.data[category]); })
.attr('d', arc)
// ===========================================================================================
// ===========================================================================================
// add text labels
var label = svg.select('.labelName').selectAll('text')
.data(pie)
.enter().append('text')
.attr('dy', '.35em')
.html(function(d) {
// add "key: value" for given category. Number inside tspan is bolded in stylesheet.
return d.data[category] + ': <tspan>' + percentFormat(d.data[variable]) + '</tspan>';
})
.attr('transform', function(d) {
// effectively computes the centre of the slice.
// see https://github.com/d3/d3-shape/blob/master/README.md#arc_centroid
var pos = outerArc.centroid(d);
// changes the point to be on left or right depending on where label is.
pos[0] = radius * 0.95 * (midAngle(d) < Math.PI ? 1 : -1);
return 'translate(' + pos + ')';
})
.style('text-anchor', function(d) {
// if slice centre is on the left, anchor text to start, otherwise anchor to end
return (midAngle(d)) < Math.PI ? 'start' : 'end';
});
// ===========================================================================================
// ===========================================================================================
// add lines connecting labels to slice. A polyline creates straight lines connecting several points
var polyline = svg.select('.lines')
.selectAll('polyline')
.data(pie)
.enter().append('polyline')
.attr('points', function(d) {
// see label transform function for explanations of these three lines.
var pos = outerArc.centroid(d);
pos[0] = radius * 0.95 * (midAngle(d) < Math.PI ? 1 : -1);
return [arc.centroid(d), outerArc.centroid(d), pos]
});
// ===========================================================================================
// ===========================================================================================
// add tooltip to mouse events on slices and labels
d3.selectAll('.labelName text, .slices path').call(toolTip);
// ===========================================================================================
// ===========================================================================================
// Functions
// calculates the angle for the middle of a slice
function midAngle(d) { return d.startAngle + (d.endAngle - d.startAngle) / 2; }
// function that creates and adds the tool tip to a selected element
function toolTip(selection) {
// add tooltip (svg circle element) when mouse enters label or slice
selection.on('mouseenter', function (data) {
d3.select(this)
.transition()
.duration(200)
.attr('d',bigArc)
svg.append('text')
.attr('class', 'toolCircle')
.attr('dy', -15) // hard-coded. can adjust this to adjust text vertical alignment in tooltip
.html(toolTipHTML(data)) // add text to the circle.
.style('font-size', '.9em')
.style('text-anchor', 'middle'); // centres text in tooltip
svg.append('circle')
.attr('class', 'toolCircle')
.attr('r', radius * 0.45) // radius of tooltip circle
.style('fill', colour(data.data[category])) // colour based on category mouse is over
.style('fill-opacity', 0.35);
});
// remove the tooltip when mouse leaves the slice/label
selection.on('mouseout', function () {
d3.selectAll('.toolCircle').remove();
d3.select(this)
.transition()
.duration(100)
.attr('d',arc)
});
}
// function to create the HTML string for the tool tip. Loops through each key in data object
// and returns the html string key: value
function toolTipHTML(data) {
/*
var tip = '',
i = 0;
for (var key in data.data) {
// if value is a number, format it as a percentage
var value = data.data[key];
// leave off 'dy' attr for first tspan so the 'dy' attr on text element works. The 'dy' attr on
// tspan effectively imitates a line break.
if (i === 0) tip += '<tspan x="0">' + key + ': ' + value + '</tspan>';
else tip += '<tspan x="0" dy="1.2em">' + key + ': ' + value + '</tspan>';
i++;
}
*/
var tip = '<tspan x="0" dy="-0.5em" font-weight="bold">' + data.data.name + '</tspan>'
+ '<tspan x="0" dy="1.2em">' + 'Percentage : ' + percentFormat(data.data[variable]) + '</tspan>'
+ '<tspan x="0" dy="1.2em">' + 'Reads : ' + data.data.num_of_read + '</tspan>'
+ '<tspan x="0" dy="1.2em">' + 'Contigs : ' + data.data.num_of_contigs + '</tspan>'
return tip;
}
// ===========================================================================================
});
}
// getter and setter functions. See Mike Bostocks post "Towards Reusable Charts" for a tutorial on how this works.
chart.width = function(value) {
if (!arguments.length) return width;
width = value;
return chart;
};
chart.height = function(value) {
if (!arguments.length) return height;
height = value;
return chart;
};
chart.margin = function(value) {
if (!arguments.length) return margin;
margin = value;
return chart;
};
chart.radius = function(value) {
if (!arguments.length) return radius;
radius = value;
return chart;
};
chart.padAngle = function(value) {
if (!arguments.length) return padAngle;
padAngle = value;
return chart;
};
chart.cornerRadius = function(value) {
if (!arguments.length) return cornerRadius;
cornerRadius = value;
return chart;
};
chart.colour = function(value) {
if (!arguments.length) return colour;
colour = value;
return chart;
};
chart.variable = function(value) {
if (!arguments.length) return variable;
variable = value;
return chart;
};
chart.category = function(value) {
if (!arguments.length) return category;
category = value;
return chart;
};
return chart;
}
function barTable() {
var width,
cellheight,
margin = {top: 10, right: 10, bottom: 10, left: 10},
columns
function chart(selection) {
selection.each(function(data) {
console.log(data)
var svg = selection.append('svg')
.attr('width', '80%')
.attr('height', (data.length * cellheight) + margin.top + margin.bottom)
.attr('overflow','scroll')
.attr('transform', function(d,i){
return 'translate(' + scale(100) + ', 0)';
})
var header = svg.append('g')
.attr('class','header')
//loop through object to generate columns
for (var i in columns){
var headerCol = header.append('g')
.attr('class','headerCol')
.attr('transform', 'translate(' + columns[i].x + ', 0)')
headerCol.append('rect')
.attr('width',columns[i].width)
.attr('height',40)
.attr('rx',3)
.attr('ry',3)
.style('fill', 'rgb(253, 174, 107)')
.style('opacity',1)
headerCol.append('text')
.attr('transform', function(d,i){
return 'translate(10, 30)';
})
.style('font-weight','bold')
.text(columns[i].name);
}
//draw table body
var tablebody = svg.append('g')
.attr('class','tablebody')
var row = tablebody.selectAll('g')
.data(data)
.enter()
.append('g')
.attr('class','row')
.attr('transform', function(d,i){
return 'translate(0, ' + ((i * cellheight) + 60)+ ')';
})
var row_background = row.append('rect')
.attr('transform', function(d,i){
return 'translate(' + scale(10) + ', -15)';
})
.attr('width','97%')
.attr('height',20)
.style('fill', 'rgb(253, 208, 162)')
.style('opacity',0)
.on('mouseover',function(d){
d3.select(this)
.style('opacity',0.9)
})
.on('mouseout',function(d){
d3.select(this)
.transition()
.duration(100)
.style('opacity',0)
})
var name_col = row.append('text')
.attr('transform', function(d,i){
return 'translate(' + (10 + columns[0].x) + ', 0)';
})
.text(function(d,i){
return d.name
});
var taxid_col = row.append('a')
.attr('transform', function(d,i){
return 'translate(' + (10 + columns[1].x) + ', 0)';
})
.attr('href', function(d){
return 'https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=' + d.taxID
})
.attr('target','_blank')
.append('text')
.text(function(d){
return d.taxID
});
var longestcontigbox_col = row.append('rect')
.attr('transform', function(d,i){
return 'translate(' + (5 + columns[2].x) + ', -15)';
})
.attr('height',20)
.attr('width', function(d){
return d.contig_percentage * scale(90)
})
.attr('fill','rgb(198, 219, 239)')
.attr('stroke','none')
var longestcontig_col = row.append('text')
.attr('transform', function(d,i){
return 'translate(' + (10 + columns[2].x) + ', 0)';
})
.text(function(d){
return d.len_longest_contig + ' bp'
});
var percentbox_col = row.append('rect')
.attr('transform', function(d,i){
return 'translate(' + (5 + columns[3].x) + ', -15)';
})
.attr('height',20)
.attr('width', function(d){
return d.percentage * scale(200)
})
.attr('fill','rgb(198, 219, 239)')
.attr('stroke','none')
var readnum_col = row.append('text')
.attr('transform', function(d,i){
return 'translate(' + (10 + columns[3].x) + ', 0)';
})
.text(function(d){
return d.num_of_read
});
var rpmh_col = row.append('rect')
.attr('transform', function(d,i){
return 'translate(' + (5 + columns[4].x) + ', -15)';
})
.attr('height',20)
.attr('width', function(d){
return d.rpmh_percentage * scale(90)
})
.attr('fill','rgb(198, 219, 239)')
.attr('stroke','none')
var rpmh_col = row.append('text')
.attr('transform', function(d,i){
return 'translate(' + (10 + columns[4].x) + ', 0)';
})
.text(function(d){
return d.RPMH
});
});
}
chart.width = function(value) {
if (!arguments.length) return width;
width = value;
return chart;
};
chart.cellheight = function(value) {
if (!arguments.length) return cellheight;
cellheight = value;
return chart;
};
chart.margin = function(value) {
if (!arguments.length) return margin;
margin = value;
return chart;
};
chart.columns = function(value) {
if (!arguments.length) return columns;
columns = value;
return chart;
};
return chart;
}
var donut = donutChart()
.width(width)
.height(scale(250))
.cornerRadius(2) // sets how rounded the corners are on each slice
.padAngle(0.015) // effectively dictates the gap between slices
.variable('percentage')
.category('name');
var bartable = barTable()
.width(width)
.cellheight(22)
.columns([
{name: 'Name', width: scale(285), x:scale(10)},
{name: 'TaxID', width: scale(95), x:scale(300)},
{name: 'Longest contig', width: scale(95), x:scale(400)},
{name: '# of reads', width: scale(195), x:scale(500)},
{name: 'RPMH', width: scale(95), x:scale(700)},
])
var data = d3.tsvParse(`''' + df.to_csv(sep="\t",index=False,doublequote=False) + '''`)
//get total number of reads
var total_num_reads = 0
var longest_contig_length = 0
var max_rpmh = 0
for(var organism in data) {
if(data[organism].num_of_read){
total_num_reads += parseInt(data[organism].num_of_read)
}
if(parseInt(data[organism].len_longest_contig) > longest_contig_length){
longest_contig_length = data[organism].len_longest_contig
}
if(parseInt(data[organism].RPMH) > max_rpmh){
max_rpmh = data[organism].RPMH
}
}
//filter out organisms over thresh
for(var i in data){
data[i].percentage = data[i].num_of_read / total_num_reads
data[i].contig_percentage = data[i].len_longest_contig / longest_contig_length
data[i].rpmh_percentage = data[i].RPMH / max_rpmh
}
data.sort(function(a, b) {
return b.RPMH - a.RPMH ;
});
var filteredData = data.filter(function(d) {
return d.percentage > 0.015
})
var underThresh = data.filter(function(d) {
return d.percentage <= 0.015
})
//save organisms under thresh
var readUnderThresh = 0
for(var organism in underThresh) {
if(underThresh[organism].num_of_read){
readUnderThresh += parseInt(underThresh[organism].num_of_read)
}
}
//add combined others to filteredData
filteredData.push({
name: "Others",
num_of_read: readUnderThresh
})
for(var i in filteredData){
filteredData[i].percentage = filteredData[i].num_of_read / total_num_reads
}
d3.select('#chart')
.datum(filteredData) // bind data to the div
.call(donut); // draw chart in div
d3.select('#table')
.datum(data)
.call(bartable);
</script>
</body>
<style>
body {
font-family: 'Roboto', sans-serif;
color: #333333;
}
/*Styling for the lines connecting the labels to the slices*/
polyline{
opacity: .3;
stroke: black;
stroke-width: 1.5px;
fill: none;
}
/* Make the percentage on the text labels bold*/
.labelName tspan {
font-style: normal;
font-weight: 700;
}
/* In biology we generally italicise species names. */
.labelName {
font-size: 0.9em;
font-style: italic;
}
#wrapper {
display: table;
height: 100vh;
}
#chart {
display: table-row;
left: 0px;
right: 0px;
top: 0px;
bottom: 0px;
z-index: 1;
}
#table {
display:table-row;
height:100%;
overflow-y: scroll;
font-size: 15px;
}
</style>
</html>
'''
#write to html file
with open(outputLoc+'/report.taxon.html', 'w') as f:
f.write(htmloutput)
| mit |
ibis-project/ibis | ibis/backends/base/file/__init__.py | 1 | 3834 | from pathlib import Path
import ibis.expr.types as ir
from ibis.backends.base import Client, Database
from ibis.backends.pandas.core import execute_and_reset
class FileClient(Client):
def __init__(self, backend, root):
self.extension = backend.extension
self.table_class = backend.table_class
self.root = Path(str(root))
self.dictionary = {}
def insert(self, path, expr, **kwargs):
raise NotImplementedError
def table(self, name, path):
raise NotImplementedError
def database(self, name=None, path=None):
if name is None:
return FileDatabase('root', self, path=path)
if name not in self.list_databases(path):
raise AttributeError(name)
if path is None:
path = self.root
new_name = "{}.{}".format(name, self.extension)
if (self.root / name).is_dir():
path /= name
elif not str(path).endswith(new_name):
path /= new_name
return FileDatabase(name, self, path=path)
def execute(self, expr, params=None, **kwargs): # noqa
assert isinstance(expr, ir.Expr)
return execute_and_reset(expr, params=params, **kwargs)
def list_tables(self, path=None):
raise NotImplementedError
def _list_tables_files(self, path=None):
# tables are files in a dir
if path is None:
path = self.root
tables = []
if path.is_dir():
for d in path.iterdir():
if d.is_file():
if str(d).endswith(self.extension):
tables.append(d.stem)
elif path.is_file():
if str(path).endswith(self.extension):
tables.append(path.stem)
return tables
def list_databases(self, path=None):
raise NotImplementedError
def _list_databases_dirs(self, path=None):
# databases are dir
if path is None:
path = self.root
tables = []
if path.is_dir():
for d in path.iterdir():
if d.is_dir():
tables.append(d.name)
return tables
def _list_databases_dirs_or_files(self, path=None):
# databases are dir & file
if path is None:
path = self.root
tables = []
if path.is_dir():
for d in path.iterdir():
if d.is_dir():
tables.append(d.name)
elif d.is_file():
if str(d).endswith(self.extension):
tables.append(d.stem)
elif path.is_file():
# by definition we are at the db level at this point
pass
return tables
class FileDatabase(Database):
def __init__(self, name, client, path=None):
super().__init__(name, client)
self.path = path
def __str__(self):
return '{0.__class__.__name__}({0.name})'.format(self)
def __dir__(self):
dbs = self.list_databases(path=self.path)
tables = self.list_tables(path=self.path)
return sorted(set(dbs).union(set(tables)))
def __getattr__(self, name):
try:
return self.table(name, path=self.path)
except AttributeError:
return self.database(name, path=self.path)
def table(self, name, path):
return self.client.table(name, path=path)
def database(self, name=None, path=None):
return self.client.database(name=name, path=path)
def list_databases(self, path=None):
if path is None:
path = self.path
return sorted(self.client.list_databases(path=path))
def list_tables(self, path=None):
if path is None:
path = self.path
return sorted(self.client.list_tables(path=path))
| apache-2.0 |
rajat1994/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 253 | 4158 | """
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
yunfeilu/scikit-learn | sklearn/datasets/species_distributions.py | 198 | 7923 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
VandroiyLabs/FaroresWind | faroreswind/collector/Recording.py | 1 | 9078 | # standard libraries
import sys, os, time, io
import datetime
# enose library
import ElectronicNose as EN
# signal processing and math
import signal
import numpy as np
# to create threads
import multiprocessing as mp
# For plot
import matplotlib as mpl
mpl.use('Agg')
import pylab as pl
import matplotlib.gridspec as gridspec
# For the web service
import tornado.ioloop
import tornado.web
import logging
from websocket import create_connection
## Mutliprocessing Shared variables
# Stop switch
stopSwitch = mp.Value('i')
stopSwitch.value = 1
# Exporting flag
doExport = mp.Value('i')
doExport.value = 0
# Variable for the name of the nome
sensorname = mp.Value('i')
def exporter(host):
## Counts how many times the server was busy,
## then quits...
busyCount = 0
maxBusyCount = 3
## Automatic updating system
lastUpdateTimeStamp = datetime.datetime.now() # first sets as starting point
updateInterval = datetime.timedelta(minutes = 30)
while True:
currTime = datetime.datetime.now()
time4Update = currTime - lastUpdateTimeStamp > updateInterval
if doExport.value == 1 or time4Update:
## In case only time4Update was true
doExport.value = 1
print( "\n\nConnecting to server..." )
try:
## Connecting to the socket
ws = create_connection("ws://" + host + ":8799/DataIntegration")
response = ws.recv()
## Checking server status
if response == "Busy":
print( "Server is busy." )
busyCount += 1
# max trials achieved, quitting trying to contat the server...
if busyCount == maxBusyCount:
print( "Server not responsive!! (maxBusyCount achieved)" )
doExport.value = 0
busyCount = 0
## Server is ready to receive the data
elif response == "Free":
print( "Server ready to process data." )
## Resetting the counter for busy responses
busyCount = 0
## Identifying itself for the server, and sending a warning
ws.send( "My name: " + str(sensorname.value) )
ws.send( "sending" )
print( ws.recv() )
## Exporting the Data
# Sends a message to Enose to export the data
stopSwitch.value = 3
# Waiting until ENOSE exported the data
while doExport.value == 1:
time.sleep(2.)
## Receing confirmation that the server received the last message
ws.send("sent")
print( ws.recv() )
## Closing the connection
ws.close()
## Updating the record for the last updating time stamp
lastUpdateTimeStamp = datetime.datetime.now() # first sets as starting point
doExport.value = 0
else:
print( "Server is crazy." )
except:
print( "Server not responsive!!" )
doExport.value = 0
time.sleep(5.)
return
def collector(enose, user, host, folder):
## Interval between measurements
intrval = 0.050
## Counting for automatic plotting
count = 0
while stopSwitch.value != 0:
## Getting new sample
pre = time.time()
enose.sniff(nsamples=3)
e = time.time() - pre < 0.005
if e < 0.004: time.sleep(0.004 - e)
## Updating the local visualization tool
count +=1
if count == 20:
# updating shared array
np.save( 'recent.npy', enose.memory[-5000:] )
count = 0
## Checking if data should be exported
if stopSwitch.value == 3:
file_name = 'NewData_' + str(sensorname.value) + '_' \
+ time.strftime("%Y-%m_%d_%H-%M-%S")
##TO DO checar se arquivo foi salvo e exportado
np.save( file_name+'.npy', enose.memory )
outscp = os.system("scp " + file_name + ".npy "
+ user + "@" + host + ":" + folder)
if outscp == 0:
os.system("rm -f "+file_name+".npy")
enose.forget()
stopSwitch.value = 1
doExport.value = -1
## Clock
key = True
while key:
dif = intrval - ( time.time() % 1 ) % intrval
key = not ( dif < 0.005 )
time.sleep( dif*0.6 )
np.save('Data.npy', enose.memory)
return
def genImage():
# Collecting latest data
recent = np.load('recent.npy')
# Converting time from seconds to hours
time = recent[:,0] / 3600.
pl.figure( figsize=(8,5) )
gs = gridspec.GridSpec(2, 2, height_ratios=[1.5,1], width_ratios = [1,1] )
## Starting with the sensors
sensorPanel = pl.subplot(gs[0,:])
for j in range(1,9):
sensorPanel.plot(time, recent[:,j])
sensorPanel.set_ylabel("Sensor resistance")
sensorPanel.set_xlabel('Time (h)')
sensorPanel.set_xlim( time.min() - 0.01, time.max() + 0.01)
sensorPanel.grid(True)
## Temperature and humidity
tempPanel = pl.subplot(gs[1,0])
tempPanel.plot(time, recent[:,9])
tempPanel.set_ylabel("Temperature")
tempPanel.set_xlabel('Time (h)')
tempPanel.set_xlim( time.min() - 0.01, time.max() + 0.01)
humdPanel = pl.subplot(gs[1,1])
humdPanel.plot(time, recent[:,10])
humdPanel.set_ylabel("Humidity")
humdPanel.set_xlabel('Time (h)')
humdPanel.set_xlim( time.min() - 0.01, time.max() + 0.01)
memdata = io.BytesIO()
pl.tight_layout()
pl.savefig(memdata, format='png', dpi=150)
image = memdata.getvalue()
pl.close()
return image
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("<html><head>")
self.write('<meta http-equiv="refresh" content="5">')
self.write("<title>Sensor</title></head>")
self.write("<body>")
self.write("<h1>Sensor hal"+str(sensorname.value)+"k</h1>")
self.write('<img src="recent.png" style="width: 900px;" />')
self.write("</body></html>")
class ImageHandler(tornado.web.RequestHandler):
def get(self):
image = genImage()
self.set_header('Content-type', 'image/png')
self.set_header('Content-length', len(image))
self.write(image)
def webservice(port):
application = tornado.web.Application([
(r"/", MainHandler),
(r"/recent.png", ImageHandler),
])
application.listen(port)
tornado.ioloop.PeriodicCallback(try_exit, 100).start()
tornado.ioloop.IOLoop.instance().start()
return
def try_exit():
if stopSwitch.value == 0:
tornado.ioloop.IOLoop.instance().stop()
return
def signal_handler(signal, frame):
print( "\nStopping..." )
sys.exit(0)
def daemon( enoseID ):
hn = logging.NullHandler()
hn.setLevel(logging.DEBUG)
logging.getLogger("tornado.access").addHandler(hn)
logging.getLogger("tornado.access").propagate = False
signal.signal(signal.SIGINT, signal_handler)
# Defining the name
sensorname.value = int(enoseID)
print( "Creating the ENose object..." )
enose = EN.ElectronicNose()
print( "Preparing environment..." )
## Reading configuration file
configfile = file('Cconfig','r')
user = configfile.readline().split('\n')[0]
port = int(configfile.readline().split('\n')[0])
host = configfile.readline().split('\n')[0]
folder = configfile.readline().split('\n')[0]
configfile.close()
## Parallel processes
sniffer = mp.Process(target=collector, args=(enose, user, host, folder))
sniffer.start()
exporter_th = mp.Process(target=exporter, args=(host,))
exporter_th.start()
print( "Starting web service (use port " + str(port) + ")" )
is_closing = False
webserv = mp.Process(target=webservice, args=(port,))
webserv.start()
print( "Starting data collection (CTRL+C to stop)" )
print( "\n" )
while True:
command = raw_input("\nCommand: [")
if command == "":
ctime = datetime.datetime.now()
print( "Current time stamp: " + str(ctime) )
elif command == "plot" or command == "p":
stopSwitch.value = 2
pl.close()
elif command == "export" or command == "e":
doExport.value = 1
while doExport.value != 0:
time.sleep(0.1)
elif command == "stop" or command == "s":
stopSwitch.value = 0
sniffer.join()
os.system(" rm -f recent.npy plot_recent.png ")
break
print( "\nThe end, my friend." )
return
| gpl-3.0 |
aiguofer/bokeh | examples/charts/file/hover_span.py | 6 | 2741 | import pandas as pd
from bokeh.charts import Line, Scatter, show, output_file, defaults
from bokeh.layouts import gridplot
from bokeh.models import HoverTool
from bokeh.sampledata.degrees import data
defaults.width = 500
defaults.height = 300
TOOLS='box_zoom,box_select,hover,crosshair,reset'
TOOLTIPS = [ ("y", "$~y"), ("x", "$~x") ]
data = data[['Biology', 'Business', 'Computer Science', "Year"]]
data = pd.melt(data, id_vars=['Year'],
value_vars=['Biology', 'Business', 'Computer Science'],
value_name='Count', var_name='Degree')
vline = Line(data, y='Count', color='Degree', title="Lines VLine", ylabel='measures',
tools=TOOLS)
hline = Line(data, y='Count', color='Degree', title="Lines HLine",
ylabel='measures', tools=TOOLS)
int_vline = Line(data, y='Count', color='Degree', title="Lines VLine Interp",
ylabel='measures', tools=TOOLS)
int_hline = Line(data, y='Count', color='Degree', title="Lines HLine Interp",
ylabel='measures', tools=TOOLS)
scatter_point = Scatter(data, x='Year', y='Count', color='Degree',
title="Scatter mouse", ylabel='measures', legend=True,
tools=TOOLS)
scatter = Scatter(data, x='Year', y='Count', color='Degree',
title="Scatter V Line", ylabel='measures', legend=True, tools=TOOLS)
int_point_line = Line(data, x='Year', y='Count', color='Degree',
title="Lines Mouse Interp.", ylabel='measures', tools=TOOLS)
point_line = Line(data, x='Year', y='Count', color='Degree',
title="Lines Mouse", ylabel='measures', tools=TOOLS)
hhover = hline.select(HoverTool)
hhover.mode = 'hline'
hhover.line_policy = 'next'
vhover = vline.select(HoverTool)
vhover.mode = 'vline'
vhover.line_policy = 'nearest'
int_hhover = int_hline.select(HoverTool)
int_hhover.mode = 'hline'
int_hhover.line_policy = 'interp'
int_vhover = int_vline.select(HoverTool)
int_vhover.mode = 'vline'
int_vhover.line_policy = 'interp'
iphover = int_point_line.select(HoverTool)
iphover.mode = 'mouse'
iphover.line_policy = 'interp'
tphover = point_line.select(HoverTool)
tphover.mode = 'mouse'
shover = scatter.select(HoverTool)
shover.mode = 'vline'
shoverp = scatter_point.select(HoverTool)
shoverp.mode = 'mouse'
# set up tooltips
int_vhover.tooltips = int_hhover.tooltips = TOOLTIPS
tphover.tooltips = iphover.tooltips = TOOLTIPS
shover.tooltips = shoverp.tooltips = TOOLTIPS
vhover.tooltips = hhover.tooltips = TOOLTIPS
output_file("hover_span.html", title="hover_span.py example")
show(gridplot(hline, vline, int_hline, int_vline,
int_point_line, point_line, scatter_point, scatter,
ncols=2))
| bsd-3-clause |
poryfly/scikit-learn | examples/neighbors/plot_classification.py | 287 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
BiaDarkia/scikit-learn | sklearn/metrics/pairwise.py | 1 | 47372 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
from functools import partial
import warnings
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False, dtype=None):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : string, type, list of types or None (default=None)
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
warn_on_dtype = dtype is not None
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
Y = check_array(Y, accept_sparse='csr', dtype=dtype,
warn_on_dtype=warn_on_dtype, estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[0., 1.],
[1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples1, n_features)
Array containing points.
Y : {array-like, sparse matrix}, shape (n_samples2, n_features)
Arrays containing points.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=None):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[0., 2.],
[4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[1., 1.],
[1., 1.]])
"""
if size_threshold is not None:
warnings.warn('Use of the "size_threshold" is deprecated '
'in 0.19 and it will be removed version '
'0.21 of scikit-learn', DeprecationWarning)
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
np.clip(S, 0, 2, out=S)
if X is Y or Y is None:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
S[np.diag_indices_from(S)] = 0.0
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([0., 1.])
See also
--------
pairwise_distances : Computes the distance between every pair of samples
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None, dense_output=True):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.20
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=dense_output)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 3
gamma : float, default None
if None, defaults to 1.0 / n_features
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
coef0 : int, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter ``dense_output`` for dense output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
See also
--------
paired_distances : Computes the distances between corresponding
elements of two arrays
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
X, Y = check_pairwise_arrays(X, Y, dtype=dtype)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# These distances recquire boolean arrays, when using scipy.spatial.distance
PAIRWISE_BOOLEAN_FUNCTIONS = [
'dice',
'jaccard',
'kulsinski',
'matching',
'rogerstanimoto',
'russellrao',
'sokalmichener',
'sokalsneath',
'yule',
]
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": frozenset(["gamma"]),
"cosine": (),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
filter_params : boolean
Whether to filter invalid parameters or not.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
**kwds : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
nickcdryan/hep_ml | hep_ml/uboost.py | 4 | 24323 | """
The module contains an implementation of uBoost algorithm.
The main goal of **uBoost** is to fight correlation between predictions and some variables (i.e. mass of particle).
* `uBoostBDT` is a modified version of AdaBoost, that targets to obtain efficiency uniformity at the specified level (global efficiency)
* `uBoostClassifier` is a combination of uBoostBDTs for different efficiencies
This implementation is more advanced than one described in the original paper,
contains smoothing and trains classifiers in threads, has `learning_rate` and `uniforming_rate` parameters,
does automatic weights renormalization and supports SAMME.R modification to use predicted probabilities.
Only binary classification is implemented.
See also: :class:`hep_ml.losses.BinFlatnessLossFunction`, :class:`hep_ml.losses.KnnFlatnessLossFunction`,
:class:`hep_ml.losses.KnnAdaLossFunction`
to fight correlation.
Examples
________
To get uniform prediction in mass for background:
>>> base_tree = DecisionTreeClassifier(max_depth=3)
>>> clf = uBoostClassifier(uniform_features=['mass'], uniform_label=0, base_estimator=base_tree,
>>> train_features=['pt', 'flight_time'])
>>> clf.fit(train_data, train_labels, sample_weight=train_weights)
>>> proba = clf.predict_proba(test_data)
To get uniform prediction in Dalitz variables for signal
>>> clf = uBoostClassifier(uniform_features=['mass_12', 'mass_23'], uniform_label=1, base_estimator=base_tree,
>>> train_features=['pt', 'flight_time'])
>>> clf.fit(train_data, train_labels, sample_weight=train_weights)
>>> proba = clf.predict_proba(test_data)
"""
# Authors:
# Alex Rogozhnikov <[email protected]>
# Nikita Kazeev <[email protected]>
from six.moves import zip
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils.random import check_random_state
from .commonutils import sigmoid_function, map_on_cluster, \
compute_knn_indices_of_same_class, compute_cut_for_efficiency, check_xyw
from . import commonutils
from .metrics_utils import compute_group_efficiencies_by_indices
__author__ = "Alex Rogozhnikov, Nikita Kazeev"
__all__ = ["uBoostBDT", "uBoostClassifier"]
class uBoostBDT(BaseEstimator, ClassifierMixin):
def __init__(self,
uniform_features,
uniform_label,
target_efficiency=0.5,
n_neighbors=50,
subsample=1.0,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
uniforming_rate=1.,
train_features=None,
smoothing=0.0,
random_state=None,
algorithm="SAMME"):
"""
uBoostBDT is AdaBoostClassifier, which is modified to have flat
efficiency of signal (class=1) along some variables.
Efficiency is only guaranteed at the cut,
corresponding to global efficiency == target_efficiency.
Can be used alone, without uBoostClassifier.
:param uniform_features: list of strings, names of variables, along which
flatness is desired
:param uniform_label: int, label of class on which uniformity is desired
(typically 0 for background, 1 for signal).
:param target_efficiency: float, the flatness is obtained at global BDT cut,
corresponding to global efficiency
:param n_neighbors: int, (default=50) the number of neighbours,
which are used to compute local efficiency
:param subsample: float (default=1.0), part of training dataset used
to build each base estimator.
:param base_estimator: classifier, optional (default=DecisionTreeClassifier(max_depth=2))
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper
`classes_` and `n_classes_` attributes.
:param n_estimators: integer, optional (default=50)
number of estimators used.
:param learning_rate: float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate``
and ``n_estimators``.
:param uniforming_rate: float, optional (default=1.)
how much do we take into account the uniformity of signal,
there is a trade-off between uniforming_rate and the speed of
uniforming, zero value corresponds to plain AdaBoost
:param train_features: list of strings, names of variables used in
fit/predict. If None, all the variables are used
(including uniform_variables)
:param smoothing: float, (default=0.), used to smooth computing of local
efficiencies, 0.0 corresponds to usual uBoost
:param random_state: int, RandomState instance or None (default None)
Reference
----------
.. [1] J. Stevens, M. Williams 'uBoost: A boosting method for
producing uniform selection efficiencies from multivariate classifiers'
"""
self.base_estimator = base_estimator
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.uniforming_rate = uniforming_rate
self.uniform_features = uniform_features
self.target_efficiency = target_efficiency
self.n_neighbors = n_neighbors
self.subsample = subsample
self.train_features = train_features
self.smoothing = smoothing
self.uniform_label = uniform_label
self.random_state = random_state
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None, neighbours_matrix=None):
"""Build a boosted classifier from the training set (X, y).
:param X: array-like of shape [n_samples, n_features]
:param y: labels, array of shape [n_samples] with 0 and 1.
:param sample_weight: array-like of shape [n_samples] or None
:param neighbours_matrix: array-like of shape [n_samples, n_neighbours],
each row contains indices of signal neighbours
(neighbours should be computed for background too),
if None, this matrix is computed.
:return: self
"""
if self.smoothing < 0:
raise ValueError("Smoothing must be non-negative")
if not isinstance(self.base_estimator, BaseEstimator):
raise TypeError("estimator must be a subclass of BaseEstimator")
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than zero.")
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if self.base_estimator is None:
self.base_estimator = DecisionTreeClassifier(max_depth=2)
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator, 'predict_proba'):
raise TypeError(
"uBoostBDT with algorithm='SAMME.R' requires "
"that the weak learner have a predict_proba method.\n"
"Please change the base estimator or set algorithm='SAMME' instead.")
assert np.in1d(y, [0, 1]).all(), \
"only two-class classification is implemented, with labels 0 and 1"
self.signed_uniform_label = 2 * self.uniform_label - 1
if neighbours_matrix is not None:
assert np.shape(neighbours_matrix) == (len(X), self.n_neighbors), \
"Wrong shape of neighbours_matrix"
self.knn_indices = neighbours_matrix
else:
assert self.uniform_features is not None, \
"uniform_variables should be set"
self.knn_indices = compute_knn_indices_of_same_class(
X.ix[:, self.uniform_features], y, self.n_neighbors)
sample_weight = commonutils.check_sample_weight(y, sample_weight=sample_weight, normalize=True)
assert np.all(sample_weight >= 0.), 'the weights should be non-negative'
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = []
# score cuts correspond to
# global efficiency == target_efficiency on each iteration.
self.score_cuts_ = []
x_train_features = self._get_train_features(X)
x_train_features, y, sample_weight = check_xyw(x_train_features, y, sample_weight)
self.random_state_ = check_random_state(self.random_state)
self._boost(x_train_features, y, sample_weight)
self.score_cut = self.signed_uniform_label * compute_cut_for_efficiency(
self.target_efficiency, y == self.uniform_label, self.decision_function(X) * self.signed_uniform_label)
assert np.allclose(self.score_cut, self.score_cuts_[-1], rtol=1e-10, atol=1e-10), \
"score cut doesn't appear to coincide with the staged one"
assert len(self.estimators_) == len(self.estimator_weights_) == len(self.score_cuts_)
return self
def _make_estimator(self):
estimator = clone(self.base_estimator)
# self.estimators_.append(estimator)
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
return estimator
def _estimator_score(self, estimator, X):
if self.algorithm == "SAMME":
return 2 * estimator.predict(X) - 1.
else:
p = estimator.predict_proba(X)
p[p <= 1e-5] = 1e-5
return np.log(p[:, 1] / p[:, 0])
@staticmethod
def _normalize_weight(y, weight):
# frequently algorithm assigns very big weight to signal events
# compared to background ones (or visa versa, if want to be uniform in bck)
return commonutils.check_sample_weight(y, sample_weight=weight, normalize=True, normalize_by_class=True)
def _compute_uboost_multipliers(self, sample_weight, score, y):
"""Returns uBoost multipliers to sample_weight and computed global cut"""
signed_score = score * self.signed_uniform_label
signed_score_cut = compute_cut_for_efficiency(self.target_efficiency, y == self.uniform_label, signed_score)
global_score_cut = signed_score_cut * self.signed_uniform_label
local_efficiencies = compute_group_efficiencies_by_indices(signed_score, self.knn_indices, cut=signed_score_cut,
smoothing=self.smoothing)
# pay attention - sample_weight should be used only here
e_prime = np.average(np.abs(local_efficiencies - self.target_efficiency),
weights=sample_weight)
is_uniform_class = (y == self.uniform_label)
# beta = np.log((1.0 - e_prime) / e_prime)
# changed to log(1. / e_prime), otherwise this can lead to the situation
# where beta is negative (which is a disaster).
# Mike (uboost author) said he didn't take that into account.
beta = np.log(1. / e_prime)
boost_weights = np.exp((self.target_efficiency - local_efficiencies) * is_uniform_class *
(beta * self.uniforming_rate))
return boost_weights, global_score_cut
def _boost(self, X, y, sample_weight):
"""Implement a single boost using the SAMME or SAMME.R algorithm,
which is modified in uBoost way"""
cumulative_score = np.zeros(len(X))
y_signed = 2 * y - 1
for iteration in range(self.n_estimators):
estimator = self._make_estimator()
mask = _generate_subsample_mask(len(X), self.subsample, self.random_state_)
estimator.fit(X[mask], y[mask], sample_weight=sample_weight[mask])
# computing estimator weight
if self.algorithm == 'SAMME':
y_pred = estimator.predict(X)
# Error fraction
estimator_error = np.average(y_pred != y, weights=sample_weight)
estimator_error = np.clip(estimator_error, 1e-6, 1. - 1e-6)
estimator_weight = self.learning_rate * 0.5 * (
np.log((1. - estimator_error) / estimator_error))
score = estimator_weight * (2 * y_pred - 1)
else:
estimator_weight = self.learning_rate * 0.5
score = estimator_weight * self._estimator_score(estimator, X)
# correcting the weights and score according to predictions
sample_weight *= np.exp(- y_signed * score)
sample_weight = self._normalize_weight(y, sample_weight)
cumulative_score += score
uboost_multipliers, global_score_cut = \
self._compute_uboost_multipliers(sample_weight, cumulative_score, y)
sample_weight *= uboost_multipliers
sample_weight = self._normalize_weight(y, sample_weight)
self.score_cuts_.append(global_score_cut)
self.estimators_.append(estimator)
self.estimator_weights_.append(estimator_weight)
# erasing from memory
self.knn_indices = None
def _get_train_features(self, X):
"""Gets the DataFrame and returns only columns
that should be used in fitting / predictions"""
if self.train_features is None:
return X
else:
return X[self.train_features]
def staged_decision_function(self, X):
"""Decision function after each stage of boosting.
Float for each sample, the greater --- the more signal like event is.
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: array of shape [n_samples] with floats.
"""
X = self._get_train_features(X)
score = np.zeros(len(X))
for classifier, weight in zip(self.estimators_, self.estimator_weights_):
score += self._estimator_score(classifier, X) * weight
yield score
def decision_function(self, X):
"""Decision function. Float for each sample, the greater --- the more signal like event is.
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: array of shape [n_samples] with floats
"""
return commonutils.take_last(self.staged_decision_function(X))
def predict(self, X):
"""Predict classes for each sample
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: array of shape [n_samples] with predicted classes.
"""
return np.array(self.decision_function(X) > self.score_cut, dtype=int)
def predict_proba(self, X):
"""Predict probabilities
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: array of shape [n_samples, n_classes] with probabilities.
"""
return commonutils.score_to_proba(self.decision_function(X))
def staged_predict_proba(self, X):
"""Predicted probabilities for each sample after each stage of boosting.
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: sequence of numpy.arrays of shape [n_samples, n_classes]
"""
for score in self.staged_decision_function(X):
yield commonutils.score_to_proba(score)
def _uboost_predict_score(self, X):
"""Method added specially for uBoostClassifier"""
return sigmoid_function(self.decision_function(X) - self.score_cut,
self.smoothing)
def _uboost_staged_predict_score(self, X):
"""Method added specially for uBoostClassifier"""
for cut, score in zip(self.score_cuts_, self.staged_decision_function(X)):
yield sigmoid_function(score - cut, self.smoothing)
@property
def feature_importances_(self):
"""Return the feature importances for `train_features`.
:return: array of shape [n_features], the order is the same as in `train_features`
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted,"
" call `fit` before `feature_importances_`.")
return sum(tree.feature_importances_ * weight for tree, weight
in zip(self.estimators_, self.estimator_weights_))
def _train_classifier(classifier, X_train_vars, y, sample_weight, neighbours_matrix):
# supplementary function to train separate parts of uBoost on cluster
return classifier.fit(X_train_vars, y,
sample_weight=sample_weight,
neighbours_matrix=neighbours_matrix)
class uBoostClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, uniform_features,
uniform_label,
train_features=None,
n_neighbors=50,
efficiency_steps=20,
n_estimators=40,
base_estimator=None,
subsample=1.0,
algorithm="SAMME",
smoothing=None,
n_threads=1,
random_state=None):
"""uBoost classifier, an algorithm of boosting targeted to obtain
flat efficiency in signal along some variables (e.g. mass).
In principle, uBoost is ensemble of uBoostBDTs. See [1] for details.
Parameters
----------
:param uniform_features: list of strings, names of variables,
along which flatness is desired
:param uniform_label: int,
tha label of class for which uniformity is desired
:param train_features: list of strings,
names of variables used in fit/predict.
if None, all the variables are used (including uniform_variables)
:param n_neighbors: int, (default=50) the number of neighbours,
which are used to compute local efficiency
:param n_estimators: integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
:param efficiency_steps: integer, optional (default=20),
How many uBoostBDTs should be trained
(each with its own target_efficiency)
:param base_estimator: object, optional (default=DecisionTreeClassifier(max_depth=2))
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required,
as well as proper `classes_` and `n_classes_` attributes.
:param subsample: float (default =1.) part of training dataset used
to train each base classifier.
:param smoothing: float, default=None, used to smooth computing of
local efficiencies, 0.0 corresponds to usual uBoost,
:param random_state: int, RandomState instance or None, (default=None)
:param n_threads: int, number of threads used.
Reference
----------
.. [1] J. Stevens, M. Williams 'uBoost: A boosting method
for producing uniform selection efficiencies from multivariate classifiers'
"""
self.uniform_features = uniform_features
self.uniform_label = uniform_label
self.knn = n_neighbors
self.efficiency_steps = efficiency_steps
self.random_state = random_state
self.n_estimators = n_estimators
self.base_estimator = base_estimator
self.subsample = subsample
self.train_features = train_features
self.smoothing = smoothing
self.n_threads = n_threads
self.algorithm = algorithm
def _get_train_features(self, X):
if self.train_features is not None:
return X[self.train_features]
else:
return X
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set.
:param X: data, pandas.DatFrame of shape [n_samples, n_features]
:param y: labels, array of shape [n_samples] with 0 and 1.
The target values (integers that correspond to classes).
:param sample_weight: array-like of shape [n_samples] with weights or None
:return: self
"""
if self.uniform_features is None:
raise ValueError("Please set uniform variables")
if len(self.uniform_features) == 0:
raise ValueError("The set of uniform variables cannot be empty")
assert np.in1d(y, [0, 1]).all(), \
"only two-class classification is implemented"
if self.base_estimator is None:
self.base_estimator = DecisionTreeClassifier(max_depth=2)
X, y, sample_weight = check_xyw(X, y, sample_weight=sample_weight, classification=True)
data_train_features = self._get_train_features(X)
if self.smoothing is None:
self.smoothing = 10. / self.efficiency_steps
neighbours_matrix = compute_knn_indices_of_same_class(
X[self.uniform_features], y, n_neighbours=self.knn)
self.target_efficiencies = np.linspace(0, 1, self.efficiency_steps + 2)[1:-1]
self.classifiers = []
for efficiency in self.target_efficiencies:
classifier = uBoostBDT(
uniform_features=self.uniform_features,
uniform_label=self.uniform_label,
train_features=None,
target_efficiency=efficiency, n_neighbors=self.knn,
n_estimators=self.n_estimators,
base_estimator=self.base_estimator,
random_state=self.random_state, subsample=self.subsample,
smoothing=self.smoothing, algorithm=self.algorithm)
self.classifiers.append(classifier)
self.classifiers = map_on_cluster('threads-{}'.format(self.n_threads),
_train_classifier,
self.classifiers,
self.efficiency_steps * [data_train_features],
self.efficiency_steps * [y],
self.efficiency_steps * [sample_weight],
self.efficiency_steps * [neighbours_matrix])
return self
def predict(self, X):
"""Predict labels
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: numpy.array of shape [n_samples]
"""
return self.predict_proba(X).argmax(axis=1)
def predict_proba(self, X):
"""Predict probabilities
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: array of shape [n_samples, n_classes] with probabilities.
"""
X = self._get_train_features(X)
score = sum(clf._uboost_predict_score(X) for clf in self.classifiers)
return commonutils.score_to_proba(score / self.efficiency_steps)
def staged_predict_proba(self, X):
"""Predicted probabilities for each sample after each stage of boosting.
:param X: data, pandas.DataFrame of shape [n_samples, n_features]
:return: sequence of numpy.arrays of shape [n_samples, n_classes]
"""
X = self._get_train_features(X)
for scores in zip(*[clf._uboost_staged_predict_score(X) for clf in self.classifiers]):
yield commonutils.score_to_proba(sum(scores) / self.efficiency_steps)
def _generate_subsample_mask(n_samples, subsample, random_generator):
"""
:param float subsample: part of samples to be left
:param random_generator: numpy.random.RandomState instance
"""
assert 0 < subsample <= 1., 'subsample should be in range (0, 1]'
if subsample == 1.0:
mask = slice(None, None, None)
else:
mask = random_generator.uniform(size=n_samples) < subsample
return mask | apache-2.0 |
DGrady/pandas | pandas/tests/io/msgpack/test_seq.py | 14 | 1171 | # coding: utf-8
import io
import pandas.io.msgpack as msgpack
binarydata = bytes(bytearray(range(256)))
def gen_binary_data(idx):
return binarydata[:idx % 300]
def test_exceeding_unpacker_read_size():
dumpf = io.BytesIO()
packer = msgpack.Packer()
NUMBER_OF_STRINGS = 6
read_size = 16
# 5 ok for read_size=16, while 6 glibc detected *** python: double free or
# corruption (fasttop):
# 20 ok for read_size=256, while 25 segfaults / glibc detected *** python:
# double free or corruption (!prev)
# 40 ok for read_size=1024, while 50 introduces errors
# 7000 ok for read_size=1024*1024, while 8000 leads to glibc detected ***
# python: double free or corruption (!prev):
for idx in range(NUMBER_OF_STRINGS):
data = gen_binary_data(idx)
dumpf.write(packer.pack(data))
f = io.BytesIO(dumpf.getvalue())
dumpf.close()
unpacker = msgpack.Unpacker(f, read_size=read_size, use_list=1)
read_count = 0
for idx, o in enumerate(unpacker):
assert type(o) == bytes
assert o == gen_binary_data(idx)
read_count += 1
assert read_count == NUMBER_OF_STRINGS
| bsd-3-clause |
zaxliu/deepnap | experiments/kdd-exps/experiment_message_2016-6-12_G5_BUF2_AR1_b2_legacy.py | 1 | 4372 | # System built-in modules
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
sys.path.append('../')
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
sys_stdout = sys.stdout
log_file_name = 'message_2016-6-12_G5_BUF2_AR1_b2.log'
# Composite classes
class Phi_QAgentNN(PhiMixin, QAgentNN):
def __init__(self, **kwargs):
super(Phi_QAgentNN, self).__init__(**kwargs)
# Parameters
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.5, 0.9
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - Phi
phi_length = 5
dim_state = (1, phi_length, 3+2)
range_state_slice = [(0, 10), (0, 10), (0, 10), (0, 1), (0, 1)]
range_state = [[range_state_slice]*phi_length]
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size = 2, 200
reward_scaling, reward_scaling_update = 1, 'adaptive'
batch_size, update_period, freeze_period, rs_period = 100, 4, 16, 32
# |- Env model
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, 0.0
beta = 0.2
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- Env
# |- Time
start_time = pd.to_datetime('2014-11-05 09:20:00')
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer='../data/trace_dh3.dat',
parse_dates=['startTime_datetime', 'endTime_datetime']
)
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
agent = Phi_QAgentNN(
phi_length=phi_length,
dim_state=dim_state, range_state=range_state,
f_build_net = None,
batch_size=batch_size, learning_rate=learning_rate, momentum=momentum,
reward_scaling=reward_scaling, reward_scaling_update=reward_scaling_update, rs_period=rs_period,
update_period=update_period, freeze_period=freeze_period,
memory_size=memory_size, num_buffer=num_buffer,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = './log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.05*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print
print log_file_name,
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
| bsd-3-clause |
kevin-intel/scikit-learn | sklearn/linear_model/_bayes.py | 2 | 26806 | """
Various bayesian regression
"""
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from ._base import LinearModel, _rescale_data
from ..base import RegressorMixin
from ._base import _deprecate_normalize
from ..utils.extmath import fast_logdet
from scipy.linalg import pinvh
from ..utils.validation import _check_sample_weight
###############################################################################
# BayesianRidge regression
class BayesianRidge(RegressorMixin, LinearModel):
"""Bayesian ridge regression.
Fit a Bayesian ridge model. See the Notes section for details on this
implementation and the optimization of the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, default=300
Maximum number of iterations. Should be greater than or equal to 1.
tol : float, default=1e-3
Stop the algorithm if w has converged.
alpha_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter.
alpha_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
lambda_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter.
lambda_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
alpha_init : float, default=None
Initial value for alpha (precision of the noise).
If not set, alpha_init is 1/Var(y).
.. versionadded:: 0.22
lambda_init : float, default=None
Initial value for lambda (precision of the weights).
If not set, lambda_init is 1.
.. versionadded:: 0.22
compute_score : bool, default=False
If True, compute the log marginal likelihood at each iteration of the
optimization.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model.
The intercept is not treated as a probabilistic parameter
and thus has no associated variance. If set
to False, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
.. deprecated:: 1.0
``normalize`` was deprecated in version 1.0 and will be removed in
1.2.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
verbose : bool, default=False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array-like of shape (n_features,)
Coefficients of the regression model (mean of distribution)
intercept_ : float
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated precision of the noise.
lambda_ : float
Estimated precision of the weights.
sigma_ : array-like of shape (n_features, n_features)
Estimated variance-covariance matrix of the weights
scores_ : array-like of shape (n_iter_+1,)
If computed_score is True, value of the log marginal likelihood (to be
maximized) at each iteration of the optimization. The array starts
with the value of the log marginal likelihood obtained for the initial
values of alpha and lambda and ends with the value obtained for the
estimated alpha and lambda.
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
X_offset_ : float
If `normalize=True`, offset subtracted for centering data to a
zero mean.
X_scale_ : float
If `normalize=True`, parameter used to scale data to a unit
standard deviation.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
BayesianRidge()
>>> clf.predict([[1, 1]])
array([1.])
Notes
-----
There exist several strategies to perform Bayesian ridge regression. This
implementation is based on the algorithm described in Appendix A of
(Tipping, 2001) where updates of the regularization parameters are done as
suggested in (MacKay, 1992). Note that according to A New
View of Automatic Relevance Determination (Wipf and Nagarajan, 2008) these
update rules do not guarantee that the marginal likelihood is increasing
between two consecutive iterations of the optimization.
References
----------
D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,
Vol. 4, No. 3, 1992.
M. E. Tipping, Sparse Bayesian Learning and the Relevance Vector Machine,
Journal of Machine Learning Research, Vol. 1, 2001.
"""
def __init__(self, *, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, alpha_init=None,
lambda_init=None, compute_score=False, fit_intercept=True,
normalize='deprecated', copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.alpha_init = alpha_init
self.lambda_init = lambda_init
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y, sample_weight=None):
"""Fit the model
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary
sample_weight : ndarray of shape (n_samples,), default=None
Individual weights for each sample
.. versionadded:: 0.20
parameter *sample_weight* support to BayesianRidge.
Returns
-------
self : returns an instance of self.
"""
self._normalize = _deprecate_normalize(
self.normalize, default=False,
estimator_name=self.__class__.__name__
)
if self.n_iter < 1:
raise ValueError('n_iter should be greater than or equal to 1.'
' Got {!r}.'.format(self.n_iter))
X, y = self._validate_data(X, y, dtype=np.float64, y_numeric=True)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X,
dtype=X.dtype)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self._normalize, self.copy_X,
sample_weight=sample_weight)
if sample_weight is not None:
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
n_samples, n_features = X.shape
# Initialization of the values of the parameters
eps = np.finfo(np.float64).eps
# Add `eps` in the denominator to omit division by zero if `np.var(y)`
# is zero
alpha_ = self.alpha_init
lambda_ = self.lambda_init
if alpha_ is None:
alpha_ = 1. / (np.var(y) + eps)
if lambda_ is None:
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
# update posterior mean coef_ based on alpha_ and lambda_ and
# compute corresponding rmse
coef_, rmse_ = self._update_coef_(X, y, n_samples, n_features,
XT_y, U, Vh, eigen_vals_,
alpha_, lambda_)
if self.compute_score:
# compute the log marginal likelihood
s = self._log_marginal_likelihood(n_samples, n_features,
eigen_vals_,
alpha_, lambda_,
coef_, rmse_)
self.scores_.append(s)
# Update alpha and lambda according to (MacKay, 1992)
gamma_ = np.sum((alpha_ * eigen_vals_) /
(lambda_ + alpha_ * eigen_vals_))
lambda_ = ((gamma_ + 2 * lambda_1) /
(np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /
(rmse_ + 2 * alpha_2))
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.n_iter_ = iter_ + 1
# return regularization parameters and corresponding posterior mean,
# log marginal likelihood and posterior covariance
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_, rmse_ = self._update_coef_(X, y, n_samples, n_features,
XT_y, U, Vh, eigen_vals_,
alpha_, lambda_)
if self.compute_score:
# compute the log marginal likelihood
s = self._log_marginal_likelihood(n_samples, n_features,
eigen_vals_,
alpha_, lambda_,
coef_, rmse_)
self.scores_.append(s)
self.scores_ = np.array(self.scores_)
# posterior covariance is given by 1/alpha_ * scaled_sigma_
scaled_sigma_ = np.dot(Vh.T,
Vh / (eigen_vals_ +
lambda_ / alpha_)[:, np.newaxis])
self.sigma_ = (1. / alpha_) * scaled_sigma_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
return_std : bool, default=False
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array-like of shape (n_samples,)
Mean of predictive distribution of query points.
y_std : array-like of shape (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self._normalize:
X = (X - self.X_offset_) / self.X_scale_
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
def _update_coef_(self, X, y, n_samples, n_features, XT_y, U, Vh,
eigen_vals_, alpha_, lambda_):
"""Update posterior mean and compute corresponding rmse.
Posterior mean is given by coef_ = scaled_sigma_ * X.T * y where
scaled_sigma_ = (lambda_/alpha_ * np.eye(n_features)
+ np.dot(X.T, X))^-1
"""
if n_samples > n_features:
coef_ = np.linalg.multi_dot([Vh.T,
Vh / (eigen_vals_ + lambda_ /
alpha_)[:, np.newaxis],
XT_y])
else:
coef_ = np.linalg.multi_dot([X.T,
U / (eigen_vals_ + lambda_ /
alpha_)[None, :],
U.T, y])
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
return coef_, rmse_
def _log_marginal_likelihood(self, n_samples, n_features, eigen_vals,
alpha_, lambda_, coef, rmse):
"""Log marginal likelihood."""
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
# compute the log of the determinant of the posterior covariance.
# posterior covariance is given by
# sigma = (lambda_ * np.eye(n_features) + alpha_ * np.dot(X.T, X))^-1
if n_samples > n_features:
logdet_sigma = - np.sum(np.log(lambda_ + alpha_ * eigen_vals))
else:
logdet_sigma = np.full(n_features, lambda_,
dtype=np.array(lambda_).dtype)
logdet_sigma[:n_samples] += alpha_ * eigen_vals
logdet_sigma = - np.sum(np.log(logdet_sigma))
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
score += 0.5 * (n_features * log(lambda_) +
n_samples * log(alpha_) -
alpha_ * rmse -
lambda_ * np.sum(coef ** 2) +
logdet_sigma -
n_samples * log(2 * np.pi))
return score
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(RegressorMixin, LinearModel):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, default=300
Maximum number of iterations.
tol : float, default=1e-3
Stop the algorithm if w has converged.
alpha_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter.
alpha_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
lambda_1 : float, default=1e-6
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter.
lambda_2 : float, default=1e-6
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
compute_score : bool, default=False
If True, compute the objective function at each step of the model.
threshold_lambda : float, default=10 000
threshold for removing (pruning) weights with high precision from
the computation.
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
.. deprecated:: 1.0
``normalize`` was deprecated in version 1.0 and will be removed in
1.2.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
verbose : bool, default=False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array-like of shape (n_features,)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array-like of shape (n_features,)
estimated precisions of the weights.
sigma_ : array-like of shape (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
intercept_ : float
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
X_offset_ : float
If `normalize=True`, offset subtracted for centering data to a
zero mean.
X_scale_ : float
If `normalize=True`, parameter used to scale data to a unit
standard deviation.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
ARDRegression()
>>> clf.predict([[1, 1]])
array([1.])
Notes
-----
For an example, see :ref:`examples/linear_model/plot_ard.py
<sphx_glr_auto_examples_linear_model_plot_ard.py>`.
References
----------
D. J. C. MacKay, Bayesian nonlinear modeling for the prediction
competition, ASHRAE Transactions, 1994.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our ``self.alpha_``
Their alpha is our ``self.lambda_``
ARD is a little different than the slide: only dimensions/features for
which ``self.lambda_ < self.threshold_lambda`` are kept and the rest are
discarded.
"""
def __init__(self, *, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True,
normalize='deprecated', copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values (integers). Will be cast to X's dtype if necessary
Returns
-------
self : returns an instance of self.
"""
self._normalize = _deprecate_normalize(
self.normalize, default=False,
estimator_name=self.__class__.__name__
)
X, y = self._validate_data(X, y, dtype=np.float64, y_numeric=True,
ensure_min_samples=2)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self._normalize, self.copy_X)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
eps = np.finfo(np.float64).eps
# Add `eps` in the denominator to omit division by zero if `np.var(y)`
# is zero
alpha_ = 1. / (np.var(y) + eps)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
def update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_):
coef_[keep_lambda] = alpha_ * np.linalg.multi_dot([
sigma_, X[:, keep_lambda].T, y])
return coef_
update_sigma = (self._update_sigma if n_samples >= n_features
else self._update_sigma_woodbury)
# Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) /
((coef_[keep_lambda]) ** 2 +
2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) /
(rmse_ + 2. * alpha_2))
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +
np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
if not keep_lambda.any():
break
if keep_lambda.any():
# update sigma and mu using updated params from the last iteration
sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda)
coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_)
else:
sigma_ = np.array([]).reshape(0, 0)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def _update_sigma_woodbury(self, X, alpha_, lambda_, keep_lambda):
# See slides as referenced in the docstring note
# this function is used when n_samples < n_features and will invert
# a matrix of shape (n_samples, n_samples) making use of the
# woodbury formula:
# https://en.wikipedia.org/wiki/Woodbury_matrix_identity
n_samples = X.shape[0]
X_keep = X[:, keep_lambda]
inv_lambda = 1 / lambda_[keep_lambda].reshape(1, -1)
sigma_ = pinvh(
np.eye(n_samples) / alpha_ + np.dot(X_keep * inv_lambda, X_keep.T)
)
sigma_ = np.dot(sigma_, X_keep * inv_lambda)
sigma_ = - np.dot(inv_lambda.reshape(-1, 1) * X_keep.T, sigma_)
sigma_[np.diag_indices(sigma_.shape[1])] += 1. / lambda_[keep_lambda]
return sigma_
def _update_sigma(self, X, alpha_, lambda_, keep_lambda):
# See slides as referenced in the docstring note
# this function is used when n_samples >= n_features and will
# invert a matrix of shape (n_features, n_features)
X_keep = X[:, keep_lambda]
gram = np.dot(X_keep.T, X_keep)
eye = np.eye(gram.shape[0])
sigma_inv = lambda_[keep_lambda] * eye + alpha_ * gram
sigma_ = pinvh(sigma_inv)
return sigma_
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
return_std : bool, default=False
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array-like of shape (n_samples,)
Mean of predictive distribution of query points.
y_std : array-like of shape (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self._normalize:
X = (X - self.X_offset_) / self.X_scale_
X = X[:, self.lambda_ < self.threshold_lambda]
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.