repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Ledoux/ShareYourSystem | Pythonlogy/ShareYourSystem/Standards/Viewers/Pyploter/11_ExampleDoc.py | 2 | 2392 |
#ImportModules
import ShareYourSystem as SYS
#figure
MyPyploter=SYS.PyploterClass(
).mapSet(
{
'-Panels':
[
('|A',
{
'-Charts':
[
('|a',{
'-Draws':[
('|0',{
'PyplotingDrawVariable':
[
(
'plot',
{
'#liarg':[
[1,2,3],
[0,1,2]
],
'#kwarg':{
'linestyle':"",
'marker':'o'
}
}
)
]
}),
('|1',{
'PyplotingDrawVariable':
[
(
'plot',
{
'#liarg':[
[0,1,2],
[2,1,0]
],
'#kwarg':{
'linestyle':"--",
'color':'r'
}
}
)
],
})
]
}),
('|b',{
'PyplotingShiftVariable':[2,0],
'-Draws':[
('|0',{
'PyplotingDrawVariable':
[
(
'plot',
{
'#liarg':[
[1,2,3],
[1,1,1]
],
'#kwarg':{
'linestyle':"-",
'marker':'o'
}
}
)
]
})
]
})
]
}
),
('|B',
{
'PyplotingShiftVariable':[0,3],
'-Charts':
[
('|a',{
'-Draws':[
('|0',{
'PyplotingDrawVariable':
[
(
'plot',
{
'#liarg':[
[1,2,3],
[4,2,4]
],
'#kwarg':{
'linestyle':"-",
'marker':'o'
}
}
)
]
})
]
}),('|b',{
'-Draws':[
('|0',{
'PyplotingDrawVariable':
[
(
'plot',
{
'#liarg':[
[1,2,3],
[0,10,1]
],
'#kwarg':{
'linestyle':"-",
'marker':'o'
}
}
)
]
})
]
})
]
}
)
]
}
).pyplot(
)
#print
print('MyPyploter is ')
SYS._print(MyPyploter)
#show
SYS.matplotlib.pyplot.show()
"""
"""
"""
""" | mit |
dmnfarrell/epitopepredict | epitopepredict/tests.py | 2 | 5628 | #!/usr/bin/env python
"""
MHC prediction unit tests
Created September 2015
Copyright (C) Damien Farrell
"""
from __future__ import absolute_import, print_function
import sys, os
import pandas as pd
import unittest
from . import base, analysis, sequtils, peptutils, mhclearn
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
path = os.path.dirname(os.path.abspath(__file__))
testdir = os.path.join(path, 'testing')
datadir = os.path.join(path, 'mhcdata')
class PredictorTests(unittest.TestCase):
"""Basic tests for predictor"""
def setUp(self):
self.m2alleles = base.get_preset_alleles('mhc2_supertypes')
self.peptides = peptutils.create_random_sequences(50)
self.genbankfile = os.path.join(testdir, 'zaire-ebolavirus.gb')
self.fastafile = os.path.join(testdir, 'zaire-ebolavirus.faa')
self.df = sequtils.genbank_to_dataframe(self.genbankfile, cds=True)
self.testdir = testdir
if not os.path.exists(self.testdir):
os.mkdir(self.testdir)
return
def test_peptide_utils(self):
s = peptutils.create_random_sequences(100)
print (s)
seq = 'MTDDPGSGFTTVWNAVVSELNGDPKVDDGP'
f = peptutils.get_fragments(seq=seq)
print (f)
return
def test_classes(self):
cl = base.get_predictor_classes()
for c in cl:
P=base.get_predictor(c)
print (P)
def test_cutoffs(self):
cl = base.get_predictor_classes()
for c in cl:
P=base.get_predictor(c)
P.get_allele_cutoffs()
def test_basicmhc1(self):
P=base.get_predictor('basicmhc1')
print (P)
allele='HLA-A*01:01'
data = mhclearn.get_training_set(allele)
peps = list(data.peptide)
P.predict_peptides(peps[:50],alleles=allele)
return
def test_tepitope(self):
"""Tepitope test"""
df = self.df
P = base.get_predictor('tepitope')
alleles = ["HLA-DRB1*0101", "HLA-DRB1*0305"]
print (P)
P.predict_proteins(df, length=11, alleles=alleles,
path=self.testdir)
P.get_binders(data=P.data)
return
def test_netmhcpan(self):
"""netMHCpan test"""
#requires netmHCpan is installed
df = self.df
P = base.get_predictor('netmhcpan')
print (P)
seqs = peptutils.create_random_sequences(10)
P.predict_peptides(seqs, alleles=['HLA-A*02:02'], threads=1)
print (len(P.data))
return
'''def test_netmhciipan(self):
"""netMHCIIpan test"""
#requires netmHCIIpan is installed
df = self.df
P = base.get_predictor('netmhciipan')
alleles = ["HLA-DRB1*0101"]
names = ['ZEBOVgp1']
print (P)
P.predictProteins(df, length=11, alleles=alleles, names=names,
path=self.testdir)
P.getBinders(data=P.data)
return'''
'''def test_iedbmhc1(self):
"""IEDB MHCI test"""
df = self.df
P = base.get_predictor('iedbmhc1')
base.iedbmhc1path = '/local/iedbmhc1'
print (P)
if not os.path.exists(base.iedbmhc1path):
print ('IEDB MHC-I not found')
return
alleles = ["HLA-A*02:02", "HLA-A*01:01"]
for m in P.methods:
if m == 'comblib_sidney2008': continue
print (P.name, m)
P.predict_proteins(df, length=8, alleles=alleles,
method=m)
b = P.get_binders(data=P.data, cutoff=5, cutoff_method='rank')
print ('%s binders' %len(b))
return'''
def test_peptide_prediction(self):
m2alleles = base.get_preset_alleles('mhc2_supertypes')
P = base.get_predictor('tepitope')
x = P.predict_peptides(self.peptides, alleles=self.m2alleles)
return
def test_multiproc(self):
P = base.get_predictor('tepitope')
x = P.predict_peptides(self.peptides, alleles=self.m2alleles, threads=2)
return
def test_fasta(self):
"""Test fasta predictions"""
df = sequtils.fasta_to_dataframe(self.fastafile)
alleles = ["HLA-DRB1*0101"]
P = base.get_predictor('tepitope')
P.predict_proteins(df, length=11, alleles=alleles, path=self.testdir)
return
def test_load(self):
"""Test re-loading predictions"""
infile = os.path.join(self.testdir, 'ZEBOVgp1.csv')
P = base.get_predictor('iedbmhc1')
P.load(infile)
return
def test_features(self):
"""Test genbank feature handling"""
df = sequtils.fasta_to_dataframe(self.fastafile)
name = 'ZEBOVgp1'
sequtils.dataframe_to_fasta(df)
sequtils.check_tags(df)
return
def test_mhcflurry(self):
"""Test mhcflurry predictor"""
P = base.get_predictor('mhcflurry')
print (P)
seqs = peptutils.create_random_sequences(10)
P.predict_peptides(seqs, alleles=['HLA-A*02:02'], threads=1)
print (len(P.data))
return
def test_iedbmhc1(self):
"""iedbmhc1 test"""
df = self.df
P = base.get_predictor('iedbmhc1')
if P.check_install() == 0:
return
seqs = peptutils.create_random_sequences(10, length=11)
P.predict_peptides(seqs, alleles=['HLA-A*02:02'], threads=1)
print (len(P.data))
return
def quit(self):
self.app.quit()
def run():
unittest.main()
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
xzh86/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 234 | 9928 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
msbeta/apollo | modules/tools/mapshow/libs/plot_st.py | 3 | 1978 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from cyber_py import cyber
from planning import Planning
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from subplot_st_main import StMainSubplot
from subplot_st_speed import StSpeedSubplot
from modules.planning.proto import planning_pb2
planning = Planning()
def update(frame_number):
st_main_subplot.show(planning)
st_speed_subplot.show(planning)
def planning_callback(planning_pb):
planning.update_planning_pb(planning_pb)
planning.compute_st_data()
def add_listener():
planning_sub = cyber.Node("st_plot")
planning_sub.create_reader('/apollo/planning', planning_pb2.ADCTrajectory,
planning_callback)
def press_key():
pass
if __name__ == '__main__':
cyber.init()
add_listener()
fig = plt.figure(figsize=(14, 6))
fig.canvas.mpl_connect('key_press_event', press_key)
ax = plt.subplot2grid((1, 2), (0, 0))
st_main_subplot = StMainSubplot(ax, 'QpSplineStSpeedOptimizer')
ax2 = plt.subplot2grid((1, 2), (0, 1))
st_speed_subplot = StSpeedSubplot(ax2, "QpSplineStSpeedOptimizer")
ani = animation.FuncAnimation(fig, update, interval=100)
plt.show()
cyber.shutdown() | apache-2.0 |
henrykironde/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 142 | 6276 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples(np.arange(1000) * 100)
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/io/test_feather.py | 7 | 4975 | """ test feather-format compat """
import pytest
feather = pytest.importorskip('feather')
import numpy as np
import pandas as pd
from pandas.io.feather_format import to_feather, read_feather
from feather import FeatherError
from pandas.util.testing import assert_frame_equal, ensure_clean
import pandas.util.testing as tm
from distutils.version import LooseVersion
fv = LooseVersion(feather.__version__)
@pytest.mark.single
class TestFeather(object):
def check_error_on_write(self, df, exc):
# check that we are raising the exception
# on writing
with pytest.raises(exc):
with ensure_clean() as path:
to_feather(df, path)
def check_round_trip(self, df, **kwargs):
with ensure_clean() as path:
to_feather(df, path)
result = read_feather(path, **kwargs)
assert_frame_equal(result, df)
def test_error(self):
for obj in [pd.Series([1, 2, 3]), 1, 'foo', pd.Timestamp('20130101'),
np.array([1, 2, 3])]:
self.check_error_on_write(obj, ValueError)
def test_basic(self):
df = pd.DataFrame({'string': list('abc'),
'int': list(range(1, 4)),
'uint': np.arange(3, 6).astype('u1'),
'float': np.arange(4.0, 7.0, dtype='float64'),
'float_with_null': [1., np.nan, 3],
'bool': [True, False, True],
'bool_with_null': [True, np.nan, False],
'cat': pd.Categorical(list('abc')),
'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
self.check_round_trip(df)
@pytest.mark.skipif(fv >= '0.4.0', reason='fixed in 0.4.0')
def test_strided_data_issues(self):
# strided data issuehttps://github.com/wesm/feather/issues/97
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list('abc'))
self.check_error_on_write(df, FeatherError)
def test_duplicate_columns(self):
# https://github.com/wesm/feather/issues/53
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=list('aaa')).copy()
self.check_error_on_write(df, ValueError)
def test_stringify_columns(self):
df = pd.DataFrame(np.arange(12).reshape(4, 3)).copy()
self.check_error_on_write(df, ValueError)
@pytest.mark.skipif(fv >= '0.4.0', reason='fixed in 0.4.0')
def test_unsupported(self):
# timedelta
df = pd.DataFrame({'a': pd.timedelta_range('1 day', periods=3)})
self.check_error_on_write(df, FeatherError)
# non-strings
df = pd.DataFrame({'a': ['a', 1, 2.0]})
self.check_error_on_write(df, ValueError)
def test_unsupported_other(self):
# period
df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)})
self.check_error_on_write(df, ValueError)
@pytest.mark.skipif(fv < '0.4.0', reason='new in 0.4.0')
def test_rw_nthreads(self):
df = pd.DataFrame({'A': np.arange(100000)})
self.check_round_trip(df, nthreads=2)
def test_write_with_index(self):
df = pd.DataFrame({'A': [1, 2, 3]})
self.check_round_trip(df)
# non-default index
for index in [[2, 3, 4],
pd.date_range('20130101', periods=3),
list('abc'),
[1, 3, 4],
pd.MultiIndex.from_tuples([('a', 1), ('a', 2),
('b', 1)]),
]:
df.index = index
self.check_error_on_write(df, ValueError)
# index with meta-data
df.index = [0, 1, 2]
df.index.name = 'foo'
self.check_error_on_write(df, ValueError)
# column multi-index
df.index = [0, 1, 2]
df.columns = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)]),
self.check_error_on_write(df, ValueError)
def test_path_pathlib(self):
df = tm.makeDataFrame().reset_index()
result = tm.round_trip_pathlib(df.to_feather, pd.read_feather)
tm.assert_frame_equal(df, result)
def test_path_localpath(self):
df = tm.makeDataFrame().reset_index()
result = tm.round_trip_localpath(df.to_feather, pd.read_feather)
tm.assert_frame_equal(df, result)
| apache-2.0 |
hbenniou/trunk | doc/sphinx/conf.py | 1 | 27761 | # -*- coding: utf-8 -*-
#
# Yade documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 16 21:49:34 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# relevant posts to sphinx ML
# http://groups.google.com/group/sphinx-dev/browse_thread/thread/b4fbc8d31d230fc4
# http://groups.google.com/group/sphinx-dev/browse_thread/thread/118598245d5f479b
#####################
## custom yade roles
#####################
##
## http://docutils.sourceforge.net/docs/howto/rst-roles.html
import sys, os, re
from docutils import nodes
from sphinx import addnodes
from sphinx.roles import XRefRole
import docutils
#
# needed for creating hyperlink targets.
# it should be cleand up and unified for both LaTeX and HTML via
# the pending_xref node which gets resolved to real link target
# by sphinx automatically once all docs have been processed.
#
# xrefs: http://groups.google.com/group/sphinx-dev/browse_thread/thread/d719d19307654548
#
#
import __builtin__
if 'latex' in sys.argv: __builtin__.writer='latex'
elif 'html' in sys.argv: __builtin__.writer='html'
elif 'epub' in sys.argv: __builtin__.writer='epub'
else: raise RuntimeError("Must have either 'latex' or 'html' on the command line (hack for reference styles)")
def yaderef_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yref:`` role, by making hyperlink to yade.wrapper.*. It supports :yref:`Link text<link target>` syntax, like usual hyperlinking roles."
id=rawtext.split(':',2)[2][1:-1]
txt=id; explicitText=False
m=re.match('(.*)\s*<(.*)>\s*',id)
if m:
explicitText=True
txt,id=m.group(1),m.group(2)
id=id.replace('::','.')
#node=nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='http://beta.arcig.cz/~eudoxos/yade/doxygen/?search=%s'%id,**options)
#node=nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='yade.wrapper.html#yade.wrapper.%s'%id,**options)
return [mkYrefNode(id,txt,rawtext,role,explicitText,lineno,options)],[]
def yadesrc_role(role,rawtext,lineno,inliner,options={},content=[]):
"Handle the :ysrc:`` role, making hyperlink to git repository webpage with that path. Supports :ysrc:`Link text<file/name>` syntax, like usual hyperlinking roles. If target ends with ``/``, it is assumed to be a directory."
id=rawtext.split(':',2)[2][1:-1]
txt=id
m=re.match('(.*)\s*<(.*)>\s*',id)
if m:
txt,id=m.group(1),m.group(2)
return [nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='https://github.com/yade/trunk/blob/master/%s'%id)],[] ### **options should be passed to nodes.reference as well
# map modules to their html (rst) filenames. Used for sub-modules, where e.g. SpherePack is yade._packSphere.SpherePack, but is documented from yade.pack.rst
moduleMap={
'yade._packPredicates':'yade.pack',
'yade._packSpheres':'yade.pack',
'yade._packObb':'yade.pack'
}
class YadeXRefRole(XRefRole):
#def process_link
def process_link(self, env, refnode, has_explicit_title, title, target):
print 'TARGET:','yade.wrapper.'+target
return '[['+title+']]','yade.wrapper.'+target
def mkYrefNode(target,text,rawtext,role,explicitText,lineno,options={}):
"""Create hyperlink to yade target. Targets starting with literal 'yade.' are absolute, but the leading 'yade.' will be stripped from the link text. Absolute tergets are supposed to live in page named yade.[module].html, anchored at #yade.[module2].[rest of target], where [module2] is identical to [module], unless mapped over by moduleMap.
Other targets are supposed to live in yade.wrapper (such as c++ classes)."""
writer=__builtin__.writer # to make sure not shadowed by a local var
import string
if target.startswith('yade.'):
module='.'.join(target.split('.')[0:2])
module2=(module if module not in moduleMap.keys() else moduleMap[module])
if target==module: target='' # to reference the module itself
uri=('%%%s#%s'%(module2,target) if writer=='latex' else '%s.html#%s'%(module2,target))
if not explicitText and module!=module2:
text=module2+'.'+'.'.join(target.split('.')[2:])
text=string.replace(text,'yade.','',1)
elif target.startswith('external:'):
exttarget=target.split(':',1)[1]
if not explicitText: text=exttarget
target=exttarget if '.' in exttarget else 'module-'+exttarget
uri=(('%%external#%s'%target) if writer=='latex' else 'external.html#%s'%target)
else:
uri=(('%%yade.wrapper#yade.wrapper.%s'%target) if writer=='latex' else 'yade.wrapper.html#yade.wrapper.%s'%target)
#print writer,uri
return nodes.reference(rawtext,docutils.utils.unescape(text),refuri=uri,**options)
#return [refnode],[]
def ydefault_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :ydefault:`something` role. fixSignature handles it now in the member signature itself, this merely expands to nothing."
return [],[]
def yattrtype_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yattrtype:`something` role. fixSignature handles it now in the member signature itself, this merely expands to nothing."
return [],[]
# FIXME: should return readable representation of bits of the number (yade.wrapper.AttrFlags enum)
def yattrflags_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yattrflags:`something` role. fixSignature handles it now in the member signature itself."
return [],[]
from docutils.parsers.rst import roles
def yaderef_role_2(type,rawtext,text,lineno,inliner,options={},content=[]): return YadeXRefRole()('yref',rawtext,text,lineno,inliner,options,content)
roles.register_canonical_role('yref', yaderef_role)
roles.register_canonical_role('ysrc', yadesrc_role)
roles.register_canonical_role('ydefault', ydefault_role)
roles.register_canonical_role('yattrtype', yattrtype_role)
roles.register_canonical_role('yattrflags', yattrflags_role)
## http://sphinx.pocoo.org/config.html#confval-rst_epilog
rst_epilog = """
.. |yupdate| replace:: *(auto-updated)*
.. |ycomp| replace:: *(auto-computed)*
.. |ystatic| replace:: *(static)*
"""
import collections
def customExclude(app, what, name, obj, skip, options):
if name=='clone':
if 'Serializable.clone' in str(obj): return False
return True
#escape crash on non iterable __doc__ in some qt object
if hasattr(obj,'__doc__') and obj.__doc__ and not isinstance(obj.__doc__, collections.Iterable): return True
if hasattr(obj,'__doc__') and obj.__doc__ and ('|ydeprecated|' in obj.__doc__ or '|yhidden|' in obj.__doc__): return True
#if re.match(r'\b(__init__|__reduce__|__repr__|__str__)\b',name): return True
if name.startswith('_'):
if name=='__init__':
# skip boost classes with parameterless ctor (arg1=implicit self)
if obj.__doc__=="\n__init__( (object)arg1) -> None": return True
# skip undocumented ctors
if not obj.__doc__: return True
# skip default ctor for serializable, taking dict of attrs
if obj.__doc__=='\n__init__( (object)arg1) -> None\n\nobject __init__(tuple args, dict kwds)': return True
#for i,l in enumerate(obj.__doc__.split('\n')): print name,i,l,'##'
return False
return True
return False
def isBoostFunc(what,obj):
return what=='function' and obj.__repr__().startswith('<Boost.Python.function object at 0x')
def isBoostMethod(what,obj):
"I don't know how to distinguish boost and non-boost methods..."
return what=='method' and obj.__repr__().startswith('<unbound method ');
def replaceLaTeX(s):
# replace single non-escaped dollars $...$ by :math:`...`
# then \$ by single $
s=re.sub(r'(?<!\\)\$([^\$]+)(?<!\\)\$',r'\ :math:`\1`\ ',s)
return re.sub(r'\\\$',r'$',s)
def fixSrc(app,docname,source):
source[0]=replaceLaTeX(source[0])
def fixDocstring(app,what,name,obj,options,lines):
# remove empty default roles, which is not properly interpreted by docutils parser
for i in range(0,len(lines)):
lines[i]=lines[i].replace(':ydefault:``','')
lines[i]=lines[i].replace(':yattrtype:``','')
lines[i]=lines[i].replace(':yattrflags:``','')
#lines[i]=re.sub(':``',':` `',lines[i])
# remove signature of boost::python function docstring, which is the first line of the docstring
if isBoostFunc(what,obj):
l2=boostFuncSignature(name,obj)[1]
# we must replace lines one by one (in-place) :-|
# knowing that l2 is always shorter than lines (l2 is docstring with the signature stripped off)
for i in range(0,len(lines)):
lines[i]=l2[i] if i<len(l2) else ''
elif isBoostMethod(what,obj):
l2=boostFuncSignature(name,obj)[1]
for i in range(0,len(lines)):
lines[i]=l2[i] if i<len(l2) else ''
# LaTeX: replace $...$ by :math:`...`
# must be done after calling boostFuncSignature which uses original docstring
for i in range(0,len(lines)): lines[i]=replaceLaTeX(lines[i])
def boostFuncSignature(name,obj,removeSelf=False):
"""Scan docstring of obj, returning tuple of properly formatted boost python signature
(first line of the docstring) and the rest of docstring (as list of lines).
The rest of docstring is stripped of 4 leading spaces which are automatically
added by boost.
removeSelf will attempt to remove the first argument from the signature.
"""
doc=obj.__doc__
if doc==None: # not a boost method
return None,None
nname=name.split('.')[-1]
docc=doc.split('\n')
if len(docc)<2: return None,docc
doc1=docc[1]
# functions with weird docstring, likely not documented by boost
if not re.match('^'+nname+r'(.*)->.*$',doc1):
return None,docc
if doc1.endswith(':'): doc1=doc1[:-1]
strippedDoc=doc.split('\n')[2:]
# check if all lines are padded
allLinesHave4LeadingSpaces=True
for l in strippedDoc:
if l.startswith(' '): continue
allLinesHave4LeadingSpaces=False; break
# remove the padding if so
if allLinesHave4LeadingSpaces: strippedDoc=[l[4:] for l in strippedDoc]
for i in range(len(strippedDoc)):
# fix signatures inside docstring (one function with multiple signatures)
strippedDoc[i],n=re.subn(r'([a-zA-Z_][a-zA-Z0-9_]*\() \(object\)arg1(, |)',r'\1',strippedDoc[i].replace('->','→'))
# inspect dosctring after mangling
if 'getViscoelasticFromSpheresInteraction' in name and False:
print name
print strippedDoc
print '======================'
for l in strippedDoc: print l
print '======================'
sig=doc1.split('(',1)[1]
if removeSelf:
# remove up to the first comma; if no comma present, then the method takes no arguments
# if [ precedes the comma, add it to the result (ugly!)
try:
ss=sig.split(',',1)
if ss[0].endswith('['): sig='['+ss[1]
else: sig=ss[1]
except IndexError:
# grab the return value
try:
sig=') -> '+sig.split('->')[-1]
#if 'Serializable' in name: print 1000*'#',name
except IndexError:
sig=')'
return '('+sig,strippedDoc
def fixSignature(app, what, name, obj, options, signature, return_annotation):
#print what,name,obj,signature#,dir(obj)
if what=='attribute':
doc=unicode(obj.__doc__)
ret=''
m=re.match('.*:ydefault:`(.*?)`.*',doc)
if m:
typ=''
#try:
# clss='.'.join(name.split('.')[:-1])
# instance=eval(clss+'()')
# typ='; '+getattr(instance,name.split('.')[-1]).__class__.__name__
# if typ=='; NoneType': typ=''
#except TypeError: ##no registered converted
# typ=''
dfl=m.group(1)
m2=re.match(r'\s*\(\s*\(\s*void\s*\)\s*\"(.*)\"\s*,\s*(.*)\s*\)\s*',dfl)
if m2: dfl="%s, %s"%(m2.group(2),m2.group(1))
if dfl!='': ret+=' (='+dfl+'%s)'%typ
else: ret+=' (=uninitalized%s)'%typ
#m=re.match('.*\[(.{,8})\].*',doc)
#m=re.match('.*:yunit:`(.?*)`.*',doc)
#if m:
# units=m.group(1)
# print '@@@@@@@@@@@@@@@@@@@@@',name,units
# ret+=' ['+units+']'
return ret,None
elif what=='class':
ret=[]
if len(obj.__bases__)>0:
base=obj.__bases__[0]
while base.__module__!='Boost.Python':
ret+=[base.__name__]
if len(base.__bases__)>0: base=base.__bases__[0]
else: break
if len(ret):
return ' (inherits '+u' → '.join(ret)+')',None
else: return None,None
elif isBoostFunc(what,obj):
sig=boostFuncSignature(name,obj)[0] or ' (wrapped c++ function)'
return sig,None
elif isBoostMethod(what,obj):
sig=boostFuncSignature(name,obj,removeSelf=True)[0]
return sig,None
#else: print what,name,obj.__repr__()
#return None,None
from sphinx import addnodes
def parse_ystaticattr(env,attr,attrnode):
m=re.match(r'([a-zA-Z0-9_]+)\.(.*)\(=(.*)\)',attr)
if not m:
print 100*'@'+' Static attribute %s not matched'%attr
attrnode+=addnodes.desc_name(attr,attr)
klass,name,default=m.groups()
#attrnode+=addnodes.desc_type('static','static')
attrnode+=addnodes.desc_name(name,name)
plist=addnodes.desc_parameterlist()
if default=='': default='unspecified'
plist+=addnodes.desc_parameter('='+default,'='+default)
attrnode+=plist
attrnode+=addnodes.desc_annotation(' [static]',' [static]')
return klass+'.'+name
#############################
## set tab size
###################
## http://groups.google.com/group/sphinx-dev/browse_thread/thread/35b8071ffe9a8feb
def setup(app):
from sphinx.highlighting import lexers
from pygments.lexers.compiled import CppLexer
lexers['cpp'] = CppLexer(tabsize=3)
lexers['c++'] = CppLexer(tabsize=3)
from pygments.lexers.agile import PythonLexer
lexers['python'] = PythonLexer(tabsize=3)
app.connect('source-read',fixSrc)
app.connect('autodoc-skip-member',customExclude)
app.connect('autodoc-process-signature',fixSignature)
app.connect('autodoc-process-docstring',fixDocstring)
app.add_description_unit('ystaticattr',None,objname='static attribute',indextemplate='pair: %s; static method',parse_node=parse_ystaticattr)
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#
# HACK: change ipython console regexp from ipython_console_highlighting.py
import re
sys.path.append(os.path.abspath('.'))
import yade.config
if 1:
if yade.runtime.ipython_version<12:
import ipython_directive as id
else:
if 12<=yade.runtime.ipython_version<13:
import ipython_directive012 as id
elif 13<=yade.runtime.ipython_version<200:
import ipython_directive013 as id
else:
import ipython_directive200 as id
#The next four lines are for compatibility with IPython 0.13.1
ipython_rgxin =re.compile(r'(?:In |Yade )\[(\d+)\]:\s?(.*)\s*')
ipython_rgxout=re.compile(r'(?:Out| -> )\[(\d+)\]:\s?(.*)\s*')
ipython_promptin ='Yade [%d]:'
ipython_promptout=' -> [%d]: '
ipython_cont_spaces=' '
#For IPython <=0.12, the following lines are used
id.rgxin =re.compile(r'(?:In |Yade )\[(\d+)\]:\s?(.*)\s*')
id.rgxout=re.compile(r'(?:Out| -> )\[(\d+)\]:\s?(.*)\s*')
id.rgxcont=re.compile(r'(?: +)\.\.+:\s?(.*)\s*')
id.fmtin ='Yade [%d]:'
id.fmtout =' -> [%d]: ' # for some reason, out and cont must have the trailing space
id.fmtcont=' .\D.: '
id.rc_override=dict(prompt_in1="Yade [\#]:",prompt_in2=" .\D.:",prompt_out=r" -> [\#]: ")
if yade.runtime.ipython_version<12:
id.reconfig_shell()
import ipython_console_highlighting as ich
ich.IPythonConsoleLexer.input_prompt = re.compile("(Yade \[[0-9]+\]: )")
ich.IPythonConsoleLexer.output_prompt = re.compile("(( -> |Out)|\[[0-9]+\]: )")
ich.IPythonConsoleLexer.continue_prompt = re.compile("\s+\.\.\.+:")
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.graphviz',
'sphinx.ext.viewcode',
'sphinx.ext.inheritance_diagram',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.only_directives',
#'matplotlib.sphinxext.mathmpl',
'ipython_console_highlighting',
'youtube',
'sphinx.ext.todo',
]
if yade.runtime.ipython_version<12:
extensions.append('ipython_directive')
else:
if 12<=yade.runtime.ipython_version<13:
extensions.append('ipython_directive012')
elif 13<=yade.runtime.ipython_version<200:
extensions.append('ipython_directive013')
else:
extensions.append('ipython_directive200')
# the sidebar extension
if False:
if writer=='html':
extensions+=['sphinx.ext.sidebar']
sidebar_all=True
sidebar_relling=True
#sidebar_abbrev=True
sidebar_tocdepth=3
## http://trac.sagemath.org/sage_trac/attachment/ticket/7549/trac_7549-doc_inheritance_underscore.patch
# GraphViz includes dot, neato, twopi, circo, fdp.
graphviz_dot = 'dot'
inheritance_graph_attrs = { 'rankdir' : 'BT' }
inheritance_node_attrs = { 'height' : 0.5, 'fontsize' : 12, 'shape' : 'oval' }
inheritance_edge_attrs = {}
my_latex_preamble=r'''
\usepackage{euler} % must be loaded before fontspec for the whole doc (below); this must be kept for pngmath, however
\usepackage{hyperref}
\usepackage{amsmath}
\usepackage{amsbsy}
%\usepackage{mathabx}
\usepackage{underscore}
\usepackage[all]{xy}
% Metadata of the pdf output
\hypersetup{pdftitle={Yade Documentation}}
\hypersetup{pdfauthor={Smilauer, V., et al.}}
\hypersetup{pdfkeywords={Discrete element method, granular materials, contact law, hydromechanical coupling}}
% symbols
\let\mat\boldsymbol % matrix
\let\vec\boldsymbol % vector
\let\tens\boldsymbol % tensor
\def\normalized#1{\widehat{#1}}
\def\locframe#1{\widetilde{#1}}
% timestep
\def\Dt{\Delta t}
\def\Dtcr{\Dt_{\rm cr}}
% algorithm complexity
\def\bigO#1{\ensuremath{\mathcal{O}(#1)}}
% variants for greek symbols
\let\epsilon\varepsilon
\let\theta\vartheta
\let\phi\varphi
% shorthands
\let\sig\sigma
\let\eps\epsilon
% variables at different points of time
\def\prev#1{#1^-}
\def\pprev#1{#1^\ominus}
\def\curr#1{#1^{\circ}}
\def\nnext#1{#1^\oplus}
\def\next#1{#1^+}
% shorthands for geometry
\def\currn{\curr{\vec{n}}}
\def\currC{\curr{\vec{C}}}
\def\uT{\vec{u}_T}
\def\curruT{\curr{\vec{u}}_T}
\def\prevuT{\prev{\vec{u}}_T}
\def\currn{\curr{\vec{n}}}
\def\prevn{\prev{\vec{n}}}
% motion
\def\pprevvel{\pprev{\dot{\vec{u}}}}
\def\nnextvel{\nnext{\dot{\vec{u}}}}
\def\curraccel{\curr{\ddot{\vec{u}}}}
\def\prevpos{\prev{\vec{u}}}
\def\currpos{\curr{\vec{u}}}
\def\nextpos{\next{\vec{u}}}
\def\curraaccel{\curr{\dot{\vec{\omega}}}}
\def\pprevangvel{\pprev{\vec{\omega}}}
\def\nnextangvel{\nnext{\vec{\omega}}}
\def\loccurr#1{\curr{\locframe{#1}}}
\def\numCPU{n_{\rm cpu}}
\DeclareMathOperator{\Align}{Align}
\DeclareMathOperator{\sign}{sgn}
% sorting algorithms
\def\isleq#1{\currelem{#1}\ar@/^/[ll]^{\leq}}
\def\isnleq#1{\currelem{#1}\ar@/^/[ll]^{\not\leq}}
\def\currelem#1{\fbox{$#1$}}
\def\sortSep{||}
\def\sortInv{\hbox{\phantom{||}}}
\def\sortlines#1{\xymatrix@=3pt{#1}}
\def\crossBound{||\mkern-18mu<}
'''
pngmath_latex_preamble=r'\usepackage[active]{preview}'+my_latex_preamble
pngmath_use_preview=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index-toctree'
# General information about the project.
project = u'Yade'
copyright = u'2009, Václav Šmilauer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = yade.config.version
# The full version, including alpha/beta/rc tags.
release = yade.config.revision
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['yade.']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'stickysidebar':'true','collapsiblesidebar':'true','rightsidebar':'false'}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'fig/yade-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'fig/yade-favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static-html']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_index='index.html'
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = { 'index':'index.html'}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Yadedoc'
# -- Options for LaTeX output --------------------------------------------------
my_maketitle=r'''
\begin{titlepage}
\begin{flushright}
\hrule{}
% Upper part of the page
\begin{flushright}
\includegraphics[width=0.15\textwidth]{yade-logo.png}\par
\end{flushright}
\vspace{20 mm}
\text{\sffamily\bfseries\Huge Yade Documentation}\\
\vspace{5 mm}
\vspace{70 mm}
\begin{sffamily}\bfseries\Large
V\'{a}clav \v{S}milauer, Emanuele Catalano, Bruno Chareyre, Sergei Dorofeenko, Jerome Duriez, Anton Gladky, Janek Kozicki, Chiara Modenese, Luc Scholt\`{e}s, Luc Sibille, Jan Str\'{a}nsk\'{y}, Klaus Thoeni
\end{sffamily}
\vspace{20 mm}
\hrule{}
\vfill
% Bottom of the page
\textit{\Large Release '''\
+yade.config.revision\
+r''', \today}
\end{flushright}
\end{titlepage}
\text{\sffamily\bfseries\LARGE Authors}\\
\\
\text{\sffamily\bfseries\Large V\'{a}clav \v{S}milauer}\\
\text{\sffamily\Large Freelance consultant (http://woodem.eu)}\\
\\
\text{\sffamily\bfseries\Large Emanuele Catalano}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Bruno Chareyre}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Sergei Dorofeenko}\\
\text{\sffamily\Large IPCP RAS, Chernogolovka}\\
\\
\text{\sffamily\bfseries\Large Jerome Duriez}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Anton Gladky}\\
\text{\sffamily\Large TU Bergakademie Freiberg}\\
\\
\text{\sffamily\bfseries\Large Janek Kozicki}\\
\text{\sffamily\Large Gdansk University of Technology - lab. 3SR Grenoble University }\\
\\
\text{\sffamily\bfseries\Large Chiara Modenese}\\
\text{\sffamily\Large University of Oxford}\\
\\
\text{\sffamily\bfseries\Large Luc Scholt\`{e}s}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Luc Sibille}\\
\text{\sffamily\Large University of Nantes, lab. GeM}\\
\\
\text{\sffamily\bfseries\Large Jan Str\'{a}nsk\'{y}}\\
\text{\sffamily\Large CVUT Prague}\\
\\
\text{\sffamily\bfseries\Large Klaus Thoeni}\\
\text{\sffamily\Large The University of Newcastle (Australia)}\\
\text{\sffamily\bfseries\large Citing this document}\\
In order to let users cite Yade consistently in publications, we provide a list of bibliographic references for the different parts of the documentation. This way of acknowledging Yade is also a way to make developments and documentation of Yade more attractive for researchers, who are evaluated on the basis of citations of their work by others. We therefore kindly ask users to cite Yade as accurately as possible in their papers, as explained in http://yade-dem/doc/citing.html.
'''
latex_elements=dict(
papersize='a4paper',
fontpkg=r'''
\usepackage{euler}
\usepackage{fontspec,xunicode,xltxtra}
%\setmainfont[BoldFont={LMRoman10 Bold}]{CMU Concrete} %% CMU Concrete must be installed by hand as otf
''',
utf8extra='',
fncychap='',
preamble=my_latex_preamble,
footer='',
inputenc='',
fontenc='',
maketitle=my_maketitle,
)
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index-toctree', 'Yade.tex', u'Yade Documentation',
u'Václav Šmilauer', 'manual'),
('index-toctree_manuals', 'YadeManuals.tex', u'Yade Tutorial and Manuals',
u'Václav Šmilauer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = 'fig/yade-logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| gpl-2.0 |
zhuango/python | machine-learning-algorithms/MDN/sinInverse.py | 2 | 2010 | #!/usr/bin/python3
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import math
NSAMPLE = 1000
x_data = np.float32(np.random.uniform(-10.5, 10.5, (1, NSAMPLE))).T
r_data = np.float32(np.random.normal(size=(NSAMPLE,1)))
y_data = np.float32(np.sin(0.75*x_data)*7.0+x_data*0.5+r_data*1.0)
plt.figure(figsize=(8, 8))
plot_out = plt.plot(x_data,y_data,'ro',alpha=0.3)
plt.show()
x = tf.placeholder(dtype=tf.float32, shape=[None,1])
y = tf.placeholder(dtype=tf.float32, shape=[None,1])
NHIDDEN = 20
W = tf.Variable(tf.random_normal([1,NHIDDEN], stddev=1.0, dtype=tf.float32))
b = tf.Variable(tf.random_normal([1,NHIDDEN], stddev=1.0, dtype=tf.float32))
W_out = tf.Variable(tf.random_normal([NHIDDEN,1], stddev=1.0, dtype=tf.float32))
b_out = tf.Variable(tf.random_normal([1,1], stddev=1.0, dtype=tf.float32))
hidden_layer = tf.nn.tanh(tf.matmul(x, W) + b)
y_out = tf.matmul(hidden_layer,W_out) + b_out
lossfunc = tf.nn.l2_loss(y_out-y)
train_op = tf.train.RMSPropOptimizer(learning_rate=0.1, decay=0.8).minimize(lossfunc)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
NEPOCH = 1000
for i in range(NEPOCH):
sess.run(train_op,feed_dict={x: x_data, y: y_data})
x_test = np.float32(np.arange(-10.5,10.5,0.1))
x_test = x_test.reshape(x_test.size,1)
y_test = sess.run(y_out,feed_dict={x: x_test})
plt.figure(figsize=(8, 8))
plt.plot(x_data,y_data,'ro', x_test,y_test,'bo',alpha=0.3)
plt.show()
sess.close()
temp_data = x_data
x_data = y_data
y_data = temp_data
plt.figure(figsize=(8, 8))
plot_out = plt.plot(x_data,y_data,'ro',alpha=0.3)
plt.show()
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
for i in range(NEPOCH):
sess.run(train_op,feed_dict={x: x_data, y: y_data})
x_test = np.float32(np.arange(-10.5,10.5,0.1))
x_test = x_test.reshape(x_test.size,1)
y_test = sess.run(y_out,feed_dict={x: x_test})
plt.figure(figsize=(8, 8))
plt.plot(x_data,y_data,'ro', x_test,y_test,'bo',alpha=0.3)
plt.show()
sess.close() | gpl-2.0 |
fyffyt/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
tensorflow/workshops | tfx_airflow/notebooks/utils.py | 1 | 20660 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils to query ml-metadata store in a notebook."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from IPython.display import display_html
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
from ml_metadata.proto import metadata_store_pb2
def _is_output_event(event):
"""Checks if event is an Output event."""
return event.type == metadata_store_pb2.Event.OUTPUT
def _is_input_event(event):
"""Checks if event is an Input event."""
return event.type in [metadata_store_pb2.Event.DECLARED_INPUT,
metadata_store_pb2.Event.INPUT]
def _get_value_str(p):
"""Returns a string representation of a `metadata_store_pb2.Value` object."""
if p.int_value:
return str(p.int_value)
if p.string_value:
return p.string_value
if p.double_value:
return str(p.double_value)
return ''
class _LineageGraphHelper(object):
"""A helper class to compute and plot lineage of ml-metadata artifacts."""
def __init__(self, store):
"""Initializes the _LineageGraphBuilder with given metadata store.
Args:
store: An instance of `metadata_store.MetadataStore`.
"""
self.metadata_store = store
def _get_upstream_execution_ids(self, artifact_id):
"""Returns a tuple of most recent execution id and whether it is cached run.
Args:
artifact_id: The artifact used to retrieve the upstream executions.
"""
events = self.metadata_store.get_events_by_artifact_ids([artifact_id])
# skip other executions from caching.
execution_ids = [e.execution_id for e in events if _is_output_event(e)]
num_parents = len(execution_ids)
return [] if num_parents < 1 else [(max(execution_ids), num_parents > 1)]
def _get_upstream_artifact_ids(self, execution_id):
"""Returns a list of artifact_ids that were inputs for `execution_id`."""
events = self.metadata_store.get_events_by_execution_ids([execution_id])
return [e.artifact_id for e in events if _is_input_event(e)]
def _add_node_attribute(self, g, node_id, depth, is_artifact):
"""Adds the attributes of given artifact or execution to the graph `g`."""
# if it is not an artifact, use negative gnode id
gnode_id = node_id if is_artifact else -1 * node_id
g.add_node(gnode_id, depth=depth, is_artifact=is_artifact)
node_label = ''
if is_artifact:
[a] = self.metadata_store.get_artifacts_by_id([node_id])
[t] = self.metadata_store.get_artifact_types_by_id([a.type_id])
node_label += t.name
else:
[e] = self.metadata_store.get_executions_by_id([node_id])
[t] = self.metadata_store.get_execution_types_by_id([e.type_id])
node_label += t.name
g.nodes[gnode_id]['_label_'] = node_label
def _add_parents(self, g, node_id, is_artifact, depth, max_depth=None):
"""Adds the parent artifacts/executions for `node_id` to the graph `g`."""
# if it is not an artifact, use negative gnode id
gnode_id = node_id if is_artifact else -1 * node_id
self._add_node_attribute(g, node_id, depth, is_artifact)
if gnode_id in g and g.in_edges(gnode_id):
return
if max_depth is not None and depth > max_depth:
return
if is_artifact:
for (e_id, is_cached) in self._get_upstream_execution_ids(node_id):
g.add_edge(e_id * -1, node_id, is_cached=is_cached)
self._add_parents(g, e_id, not is_artifact, depth + 1, max_depth)
else:
for a_id in self._get_upstream_artifact_ids(node_id):
g.add_edge(a_id, node_id * -1, is_cached=False)
self._add_parents(g, a_id, not is_artifact, depth + 1, max_depth)
def get_artifact_lineage(self, artifact_id, max_depth=None):
"""Returns a `nx.DiGraph` representing the lineage of given `artifact_id`.
Args:
artifact_id: An `int` indicating the id of an Artifact.
max_depth: (Optional): An `int` indicating how far back the lineage
should be computed for `artifact_id`. By default the entire lineage
is computed.
Returns:
A `nx.DiGraph` for the lineage of given `artifact_id`.
Nodes with positive ids indicate an Artifact.
Nodes with negative ids indicate an Execution.
"""
g = nx.DiGraph(query_artifact_id=artifact_id)
if max_depth is None or max_depth > 0:
self._add_parents(g, artifact_id, True, 1, max_depth)
return g
def plot_artifact_lineage(self, g):
"""Plots a `nx.DiGraph` object.
This method uses networkx and matplotlib to plot the graph.
The nodes are places from left to right w.r.t. its depth.
Nodes at the same depths are placed vertically.
Artifact is shown in green, and Execution is shown in red.
Nodes are positioned in a bipartite graph layout.
Args:
g: A `nx.DiGraph` object.
"""
# make a copy of the graph; add auxiliary nodes
dag = g.copy(as_view=False)
label_anchor_id = 10000
for node_id in g.nodes:
if node_id > 0:
dag.add_node(label_anchor_id + node_id)
else:
dag.add_node(node_id - label_anchor_id)
# assign node color and label
node_color = ''
node_labels = {}
for node_id in dag.nodes:
if node_id > 0 and node_id < label_anchor_id:
node_color += 'c'
node_labels[node_id] = abs(node_id)
elif node_id > 0 and node_id >= label_anchor_id:
# artifact label
node_color += 'w'
type_name = dag.nodes[node_id - label_anchor_id]['_label_']
type_segments = re.split('([A-Z][a-z]+)', type_name)
node_txt = ('\n').join([s for s in type_segments if s])
node_labels[node_id] = node_txt
elif node_id < 0 and node_id > -1 * label_anchor_id:
node_color += 'm'
node_labels[node_id] = abs(node_id)
else:
# execution label
node_color += 'w'
type_name = dag.nodes[node_id + label_anchor_id]['_label_']
node_txt = type_name.split('.')[-1]
node_labels[node_id] = node_txt
pos = {}
a_nodes = []
e_nodes = []
for node_id in dag.nodes:
if node_id > 0 and node_id < label_anchor_id:
a_nodes.append(node_id)
elif node_id < 0 and node_id > -1 * label_anchor_id:
e_nodes.append(node_id)
# assign edge color
edge_color = []
for (_, _, labels) in dag.edges(data=True):
edge_color.append('y' if labels['is_cached'] else 'k')
a_nodes.sort(key=abs)
e_nodes.sort(key=abs)
a_node_y = 0
e_node_y = 0.035
a_offset = -0.5 if len(a_nodes) % 2 == 0 else 0
e_offset = -0.5 if len(e_nodes) % 2 == 0 else 0
a_node_x_min = -1 * len(a_nodes)/2 + a_offset
e_node_x_min = -1 * len(e_nodes)/2 + e_offset
a_node_x = a_node_x_min
e_node_x = e_node_x_min
node_step = 1
for a_id in a_nodes:
pos[a_id] = [a_node_x, a_node_y]
pos[a_id + label_anchor_id] = [a_node_x, a_node_y - 0.01]
a_node_x += node_step
for e_id in e_nodes:
pos[e_id] = [e_node_x, e_node_y]
pos[e_id - label_anchor_id] = [e_node_x, e_node_y + 0.01]
e_node_x += node_step
nx.draw(dag, pos=pos,
node_size=500, node_color=node_color,
labels=node_labels, node_shape='o', font_size=8.3, label='abc',
width=0.5, edge_color=edge_color)
a_bbox_props = dict(boxstyle='square,pad=0.3', fc='c', ec='b', lw=0)
plt.annotate(' Artifacts ',
xycoords='axes fraction', xy=(0.85, 0.575),
textcoords='axes fraction', xytext=(0.85, 0.575),
bbox=a_bbox_props, alpha=0.6)
e_bbox_props = dict(boxstyle='square,pad=0.3', fc='m', ec='b', lw=0)
plt.annotate('Executions',
xycoords='axes fraction', xy=(0.85, 0.5),
textcoords='axes fraction', xytext=(0.85, 0.5),
bbox=e_bbox_props, alpha=0.6)
plt.annotate(' Cached ',
xycoords='axes fraction', xy=(0.85, 0.425),
textcoords='axes fraction', xytext=(0.85, 0.425),
alpha=0.6)
plt.annotate('', xycoords='axes fraction', xy=(0.975, 0.405),
textcoords='axes fraction', xytext=(0.845, 0.405),
arrowprops=dict(edgecolor='y', arrowstyle='->', alpha=0.6))
x_lim_left = min(a_node_x_min, e_node_x_min) - 0.5
x_lim_right = min(1 - 0.05 * len(a_nodes), max(a_node_x, e_node_x))
x_lim_left = max(-2 - 1.5/len(a_nodes),
min(a_node_x_min, e_node_x_min) - 1.0)
x_lim_right = max(a_node_x, e_node_x) + 0.1
plt.xlim(x_lim_left, x_lim_right)
plt.show()
class ReadonlyMetadataStore(object):
"""An ml-metadata store that provides read-only methods for notebooks."""
def __init__(self, store):
"""Initializes a ReadonlyMetadataStore with given store.
Args:
store: An instance of `metadata_store.MetadataStore`.
"""
self.metadata_store = store
self._lineage_graph_helper = _LineageGraphHelper(store)
def get_df_from_single_artifact_or_execution(self, obj):
"""Returns a `pd.DataFrame` based on an artifact/execution properties.
Args:
obj: An instance of `metadata_store_pb2.Artifact` or
`metadata_store_pb2.Execution`.
Returns:
A `pd.DataFrame` to display the properties of an artifact/execution.
"""
data = {}
if isinstance(obj, metadata_store_pb2.Artifact):
data['URI'] = obj.uri
for p in obj.properties:
data[p.upper()] = _get_value_str(obj.properties[p])
for p in obj.custom_properties:
data[p.upper()] = _get_value_str(obj.custom_properties[p])
return pd.DataFrame.from_dict(
data=data, orient='index', columns=['']).fillna('-')
def get_df_from_artifacts_or_executions(self, objects):
"""Returns a `pd.DataFrame` of given artifacts'/executions' properties."""
data = {}
for obj in objects:
col_map = {}
if isinstance(obj, metadata_store_pb2.Artifact):
col_map['URI'] = obj.uri
for p in obj.properties:
col_map[p.upper()] = _get_value_str(obj.properties[p])
for p in obj.custom_properties:
col_map[p.upper()] = _get_value_str(obj.custom_properties[p])
data[obj.id] = col_map
df = pd.DataFrame.from_dict(data=data, orient='index').fillna('-')
df.index.name = 'ID'
return df
def get_artifact_df(self, artifact_id):
"""Returns a `pd.DataFrame` for an artifact with `artifact_id`.
Args:
artifact_id: An `int` indicating the id of an artifact in the store.
Returns:
A `pd.DataFrame` to display the properties of the artifact corresponding
to `artifact_id` or None if no such artifact exists in the store.
"""
artifacts = self.metadata_store.get_artifacts_by_id([artifact_id])
return (
self.get_df_from_single_artifact_or_execution(artifacts[0])
if artifacts else None
)
def get_execution_df(self, execution_id):
"""Returns a `pd.DataFrame` for an execution with `execution_id`.
Args:
execution_id: An `int` indicating the id of an execution in the store.
Returns:
A `pd.DataFrame` to display the properties of the execution corresponding
to `execution_id` or None if no such execution exists in the store.
"""
executions = self.metadata_store.get_executions_by_id([execution_id])
return (
self.get_df_from_single_artifact_or_execution(executions[0])
if executions else None
)
def get_artifacts_of_type_df(self, type_name):
"""Returns a `pd.DataFrame` for all artifacts of given `type_name`.
Args:
type_name: A `str` indicating the name of an artifact type in the store.
Returns:
A `pd.DataFrame` to display the properties of all artifacts with given
type in the store.
"""
return self.get_df_from_artifacts_or_executions(
self.metadata_store.get_artifacts_by_type(type_name))
def get_executions_of_type_df(self, type_name):
"""Returns a `pd.DataFrame` for all executions of given `type_name`.
Args:
type_name: A `str` indicating the name of an execution type in the store.
Returns:
A `pd.DataFrame` to display the properties of all executions with given
type in the store.
"""
return self.get_df_from_artifacts_or_executions(
self.metadata_store.get_executions_by_type(type_name))
def get_source_artifact_of_type(self, artifact_id, source_type_name):
"""Returns the source artifact of `source_type_name` for `artifact_id`.
This method recursively traverses the events and associated executions that
led to generating `artifact_id` to find an artifact of type
`source_type_name` that was an input for these events.
Args:
artifact_id: A `int` indicating the id of an artifact.
source_type_name: A `str` indicating the type of an artifact that is
a direct or indirect input for generating `artifact_id`.
Returns:
A `metadata_store_pb2.Artifact` of type `source_type_name` that is a
direct/indirect input for generating `artifact_id` or `None` if no such
artifact exists.
"""
a_events = self.metadata_store.get_events_by_artifact_ids([artifact_id])
for a_event in a_events:
if _is_input_event(a_event):
continue
[execution] = self.metadata_store.get_executions_by_id(
[a_event.execution_id])
e_events = self.metadata_store.get_events_by_execution_ids([execution.id])
for e_event in e_events:
if _is_output_event(e_event):
continue
[artifact] = self.metadata_store.get_artifacts_by_id(
[e_event.artifact_id])
[artifact_type] = self.metadata_store.get_artifact_types_by_id(
[artifact.type_id])
if artifact_type.name == source_type_name:
return artifact
input_artifact = self.get_source_artifact_of_type(
artifact.id, source_type_name)
if input_artifact:
return input_artifact
def get_dest_artifact_of_type(self, artifact_id, dest_type_name):
"""Returns the destination artifact of `dest_type_name` from `artifact_id`.
This method recursively traverses the events and associated executions that
consumed `artifact_id` to find an artifact of type `dest_type_name` that was
an output for these events.
Args:
artifact_id: A `int` indicating the id of an artifact.
dest_type_name: A `str` indicating the type of an artifact that is
a output of an event that directly/indirectly consumed `artifact_id`.
Returns:
A `metadata_store_pb2.Artifact` of type `dest_type_name` that is a
direct/indirect output from `artifact_id` or `None` if no such artifact
exists.
"""
a_events = self.metadata_store.get_events_by_artifact_ids([artifact_id])
for a_event in a_events:
if _is_output_event(a_event):
continue
[execution] = self.metadata_store.get_executions_by_id(
[a_event.execution_id])
e_events = self.metadata_store.get_events_by_execution_ids(
[execution.id])
for e_event in e_events:
if _is_input_event(e_event):
continue
[artifact] = self.metadata_store.get_artifacts_by_id(
[e_event.artifact_id])
[artifact_type] = self.metadata_store.get_artifact_types_by_id(
[artifact.type_id])
if artifact_type.name == dest_type_name:
return artifact
dest_artifact = self.get_dest_artifact_of_type(
artifact.id, dest_type_name)
if dest_artifact:
return dest_artifact
def get_execution_for_output_artifact(self, artifact_id, type_name):
"""Returns the execution of `type_name` that generated `artifact_id`.
Args:
artifact_id: A `int` indicating the id of an artifact.
type_name: A `str` indicating the type of an Execution that generated
`artifact_id`.
Returns:
A `metadata_store_pb2.Execution` of type `type_name` that generated
`artifact_id` or `None` if no such execution exists.
"""
a_events = self.metadata_store.get_events_by_artifact_ids([artifact_id])
for a_event in a_events:
if _is_input_event(a_event):
continue
[execution] = self.metadata_store.get_executions_by_id(
[a_event.execution_id])
[execution_type] = self.metadata_store.get_execution_types_by_id(
[execution.type_id])
if execution_type.name == type_name:
return execution
def display_artifact_and_execution_properties(self, artifact_id,
execution_type_name):
"""Displays properties of artifact and the execution that generated it.
Args:
artifact_id: A `int` indicating the id of an artifact.
execution_type_name: A `str` indicating the type of an execution that
generated `artifact_id`.
"""
execution = self.get_execution_for_output_artifact(
artifact_id, execution_type_name)
if not execution:
return
execution_id = execution.id
# Get data frames to visualize the artifact and execution properties.
artifact_df, execution_df = (
self.get_artifact_df(artifact_id), self.get_execution_df(execution_id)
)
# Style the data frames to set captions.
artifact_df_styler = artifact_df.style.set_caption(
'Properties for Artifact {}'.format(artifact_id))
execution_df_styler = execution_df.style.set_caption(
'Properties for Execution {} that generated Artifact {}'.format(
execution_id, artifact_id))
# Display the HTML.
# pylint: disable=protected-access
display_html(
artifact_df_styler._repr_html_() + execution_df_styler._repr_html_(),
raw=True)
# pylint: enable=protected-access
def compare_artifact_pair_and_execution_properties(
self, artifact_id, other_artifact_id, execution_type_name):
"""Displays properties of 2 artifacts and executions that generated them.
Args:
artifact_id: A `int` indicating the id of one artifact.
other_artifact_id: A `int` indicating the id of another artifact.
execution_type_name: A `str` indicating the type of executions that
generated `artifact_id` and `other_artifact_id`.
"""
# Get data frame to visualize properties of the 2 artifacts.
df = self.get_df_from_artifacts_or_executions(
self.metadata_store.get_artifacts_by_id(
[artifact_id, other_artifact_id]))
artifacts_df_styler = df.style.set_caption(
'Properties for Artifacts {}, {}'.format(
artifact_id, other_artifact_id))
# Compare properties of the executions that generated these artifacts.
execution = self.get_execution_for_output_artifact(
artifact_id, execution_type_name)
other_execution = self.get_execution_for_output_artifact(
other_artifact_id, execution_type_name)
if not execution or not other_execution:
return
executions_df = self.get_df_from_artifacts_or_executions([
execution, other_execution])
executions_df_styler = executions_df.style.set_caption(
'Properties for Executions that generated Artifacts {}, {}'.format(
artifact_id, other_artifact_id))
# Display the HTML.
# pylint: disable=protected-access
display_html(
artifacts_df_styler._repr_html_() + executions_df_styler._repr_html_(),
raw=True)
# pylint: enable=protected-access
def plot_artifact_lineage(self, artifact_id, max_depth=None):
"""Computes and plots the lineage graph for `artifact_id` upto `max_depth`.
Args:
artifact_id: An `int` indicating the id of an Artifact.
max_depth: (Optional): An `int` indicating how far back the lineage
should be computed for `artifact_id`. By default the entire lineage
is computed.
"""
self._lineage_graph_helper.plot_artifact_lineage(
self._lineage_graph_helper.get_artifact_lineage(
artifact_id, max_depth=max_depth)) | apache-2.0 |
Averroes/statsmodels | statsmodels/examples/run_all.py | 34 | 1984 | '''run all examples to make sure we don't get an exception
Note:
If an example contaings plt.show(), then all plot windows have to be closed
manually, at least in my setup.
uncomment plt.show() to show all plot windows
'''
from __future__ import print_function
from statsmodels.compat.python import lzip, input
import matplotlib.pyplot as plt #matplotlib is required for many examples
stop_on_error = True
filelist = ['example_glsar.py', 'example_wls.py', 'example_gls.py',
'example_glm.py', 'example_ols_tftest.py', #'example_rpy.py',
'example_ols.py', 'example_ols_minimal.py', 'example_rlm.py',
'example_discrete.py', 'example_predict.py',
'example_ols_table.py',
'tut_ols.py', 'tut_ols_rlm.py', 'tut_ols_wls.py']
use_glob = True
if use_glob:
import glob
filelist = glob.glob('*.py')
print(lzip(range(len(filelist)), filelist))
for fname in ['run_all.py', 'example_rpy.py']:
filelist.remove(fname)
#filelist = filelist[15:]
#temporarily disable show
plt_show = plt.show
def noop(*args):
pass
plt.show = noop
cont = input("""Are you sure you want to run all of the examples?
This is done mainly to check that they are up to date.
(y/n) >>> """)
has_errors = []
if 'y' in cont.lower():
for run_all_f in filelist:
try:
print("\n\nExecuting example file", run_all_f)
print("-----------------------" + "-"*len(run_all_f))
exec(open(run_all_f).read())
except:
#f might be overwritten in the executed file
print("**********************" + "*"*len(run_all_f))
print("ERROR in example file", run_all_f)
print("**********************" + "*"*len(run_all_f))
has_errors.append(run_all_f)
if stop_on_error:
raise
print('\nModules that raised exception:')
print(has_errors)
#reenable show after closing windows
plt.close('all')
plt.show = plt_show
plt.show()
| bsd-3-clause |
mne-tools/mne-python | tutorials/inverse/30_mne_dspm_loreta.py | 3 | 5666 | """
.. _tut-inverse-methods:
Source localization with MNE/dSPM/sLORETA/eLORETA
=================================================
The aim of this tutorial is to teach you how to compute and apply a linear
minimum-norm inverse method on evoked/raw/epochs data.
"""
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse
###############################################################################
# Process MEG data
data_path = sample.data_path()
raw_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(raw_fname) # already has an average reference
events = mne.find_events(raw, stim_channel='STI 014')
event_id = dict(aud_l=1) # event trigger and conditions
tmin = -0.2 # start of each epoch (200ms before the trigger)
tmax = 0.5 # end of each epoch (500ms after the trigger)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
baseline = (None, 0) # means from the first instant to t = 0
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=('meg', 'eog'), baseline=baseline, reject=reject)
###############################################################################
# Compute regularized noise covariance
# ------------------------------------
# For more details see :ref:`tut-compute-covariance`.
noise_cov = mne.compute_covariance(
epochs, tmax=0., method=['shrunk', 'empirical'], rank=None, verbose=True)
fig_cov, fig_spectra = mne.viz.plot_cov(noise_cov, raw.info)
###############################################################################
# Compute the evoked response
# ---------------------------
# Let's just use the MEG channels for simplicity.
evoked = epochs.average().pick('meg')
evoked.plot(time_unit='s')
evoked.plot_topomap(times=np.linspace(0.05, 0.15, 5), ch_type='mag',
time_unit='s')
###############################################################################
# It's also a good idea to look at whitened data:
evoked.plot_white(noise_cov, time_unit='s')
del epochs, raw # to save memory
###############################################################################
# Inverse modeling: MNE/dSPM on evoked and raw data
# -------------------------------------------------
# Here we first read the forward solution. You will likely need to compute
# one for your own data -- see :ref:`tut-forward` for information on how
# to do it.
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
fwd = mne.read_forward_solution(fname_fwd)
###############################################################################
# Next, we make an MEG inverse operator.
inverse_operator = make_inverse_operator(
evoked.info, fwd, noise_cov, loose=0.2, depth=0.8)
del fwd
# You can write it to disk with::
#
# >>> from mne.minimum_norm import write_inverse_operator
# >>> write_inverse_operator('sample_audvis-meg-oct-6-inv.fif',
# inverse_operator)
###############################################################################
# Compute inverse solution
# ------------------------
# We can use this to compute the inverse solution and obtain source time
# courses:
method = "dSPM"
snr = 3.
lambda2 = 1. / snr ** 2
stc, residual = apply_inverse(evoked, inverse_operator, lambda2,
method=method, pick_ori=None,
return_residual=True, verbose=True)
###############################################################################
# Visualization
# -------------
# We can look at different dipole activations:
fig, ax = plt.subplots()
ax.plot(1e3 * stc.times, stc.data[::100, :].T)
ax.set(xlabel='time (ms)', ylabel='%s value' % method)
###############################################################################
# Examine the original data and the residual after fitting:
fig, axes = plt.subplots(2, 1)
evoked.plot(axes=axes)
for ax in axes:
ax.texts = []
for line in ax.lines:
line.set_color('#98df81')
residual.plot(axes=axes)
###############################################################################
# Here we use peak getter to move visualization to the time point of the peak
# and draw a marker at the maximum peak vertex.
# sphinx_gallery_thumbnail_number = 9
vertno_max, time_max = stc.get_peak(hemi='rh')
subjects_dir = data_path + '/subjects'
surfer_kwargs = dict(
hemi='rh', subjects_dir=subjects_dir,
clim=dict(kind='value', lims=[8, 12, 15]), views='lateral',
initial_time=time_max, time_unit='s', size=(800, 800), smoothing_steps=10)
brain = stc.plot(**surfer_kwargs)
brain.add_foci(vertno_max, coords_as_verts=True, hemi='rh', color='blue',
scale_factor=0.6, alpha=0.5)
brain.add_text(0.1, 0.9, 'dSPM (plus location of maximal activation)', 'title',
font_size=14)
# The documentation website's movie is generated with:
# brain.save_movie(..., tmin=0.05, tmax=0.15, interpolation='linear',
# time_dilation=20, framerate=10, time_viewer=True)
###############################################################################
# There are many other ways to visualize and work with source data, see
# for example:
#
# - :ref:`tut-viz-stcs`
# - :ref:`ex-morph-surface`
# - :ref:`ex-morph-volume`
# - :ref:`ex-vector-mne-solution`
# - :ref:`tut-dipole-orientations`
# - :ref:`tut-mne-fixed-free`
# - :ref:`examples using apply_inverse
# <sphx_glr_backreferences_mne.minimum_norm.apply_inverse>`.
| bsd-3-clause |
emmdim/guifiAnalyzer | vis/ipnetworksDB.py | 1 | 12438 | #!/usr/bin/env python
#
# network.py
from guifiAnalyzer.db.infrastructure import InfraDB
from guifiAnalyzer.proxies import getIPNetworks
import networkx
from networkx.readwrite import json_graph
# Community detection
# https://bitbucket.org/taynaud/python-louvain/
from guifiAnalyzer.lib.pythonLouvain import community
import matplotlib.pyplot as plt
from collections import Counter
import pandas
import functools
import os
import json
import http_server
import random
import math
import pdb
def node2Gnode(graph, digraph, infra, nodes_networks, router_ips_per_node, node):
node_id = node['_id']
node_type = "client"
graphServer = "NA"
devices = list(node['devices'])
radios = []
for d in devices:
radios.extend(d['radios'])
# Networks where the nodes participates
if node_id in nodes_networks:
networks = nodes_networks[node_id]
else:
networks = []
links = infra.parseNodeLinks(node)
# Add to graph only nodes with links
if len(links) == 0:
return
# decide supernodes
if len(radios) > 1:
node_type = 'supernode'
# check for router_ips
isrouter = 'N'
router = ''
if node_id in router_ips_per_node:
# is router
isrouter = router_ips_per_node[node_id]
router = 'N'
else:
# Find corresponding router
if node_id in nodes_networks:
router = []
for net in networks:
spl = net.split('.')
router_ip = '.'.join([spl[0],spl[1],spl[2],str(int(spl[3])+1)])
for n,ips in router_ips_per_node.iteritems():
if router_ip in ips:
router.append(n)
router = list(set(router))
services = node['services']
proxy = 0
graphServer = 0
for s in services:
if s['type'] == 'Proxy':
proxy = 1
if s['type'] == 'SNPgraphs':
graphServer = 1
digraph.add_node(node_id,
{'name': node_id,
'status': node['status'],
'type': node_type,
'isproxy' : proxy,
'isrouter' : isrouter,
'router' : router,
'graphServer' : graphServer,
'zone' : node['parentZone'],
'networks' : networks})
graph.add_node(node_id,
{'name': node_id,
'status': node['status'],
'type': node_type,
'isproxy': proxy,
'isrouter' : isrouter,
'router' : router,
'graphServer' : graphServer,
'zone' : node['parentZone'],
'networks' : networks})
def link2Gedge(graph, digraph, link):
if link['nodeA'] and link['nodeB']:
if link['type'] == 'wds':
digraph.add_edge(
link['nodeA'], link['nodeB'],
{'id': link['_id']+'A',
'type': link['type']
#'direction': 'a',
#'group': link['status'],
})
digraph.add_edge(
link['nodeB'], link['nodeA'],
{'id': link['_id']+'B',
'type': link['type']
#'direction': 'b',
#'group': link['status'],
})
graph.add_edge(
link['nodeA'], link['nodeB'],
{'id': link['_id'],
'type': link['type']
#'direction': 'b',
#'group': link['status'],
})
elif link['type'] == 'ap/client':
# draw the links using the client info
index = 'A'
opindex = 'B'
digraph.add_edge(
link['node'+index], link['node'+opindex],
{'id': link['_id']+' out',
'type': link['type']
#'group': link['status'],
})
digraph.add_edge(
link['nodeB'], link['nodeA'],
{'id': link['_id']+'in',
'type': link['type']
#'group': link['status'],
})
graph.add_edge(
link['node'+index], link['node'+opindex],
{'id': link['_id'],
'type': link['type']
#'group': link['status'],
})
def createGraph(root, core):
infraDB = InfraDB(root, core)
infraDB.connect()
nodes = list(infraDB.getNodes())
nodes1 = {d['_id']:d for d in nodes}
links = infraDB.getLinks()
links1 = {d['_id']:d for d in links}
# Delete links where the second node is not in the db
wrong_links = [n for n,v in links1.iteritems() if v['nodeA'] not in nodes1 or v['nodeB'] not in nodes1]
for l in wrong_links:
del(links1[l])
ip_networks, router_ips_per_node = getIPNetworks.mapping(root,core)
graph = networkx.Graph()
digraph = networkx.DiGraph()
map(functools.partial(node2Gnode, graph, digraph, infraDB, ip_networks, router_ips_per_node), nodes)
map(functools.partial(link2Gedge, graph, digraph), links1.values())
return (graph, digraph)
#graph.add_node(1, {'id':'a','type':'supernode'}, color='red')
#graph.add_node(2, {'id':'b','color':'blue'}, type='node')
#graph.add_node(3, {'id':'c','type':'node'}, color='blue')
#graph.add_edge(1,2, {'id':'a','type':'wds'}, color='green')
#graph.add_edge(3,2, {'id':'b','type':'ap'}, color='yellow')
# graph.add_nodes_from([1,2,3,4,5,6])
# graph.add_edges_from([(1,2),(3,4),(5,6),(1,6),(4,5)])
# A=to_agraph(graph) # convert to a graphviz graph
# A.layout() # neato layout
# A.draw("k5.ps") # write postscript in k5.ps with neato layout
def graphStats(graph):
pathlengths = []
#print("source vertex {target:length, }")
for v in graph.nodes():
spl = networkx.single_source_shortest_path_length(graph, v)
#print('%s %s' % (v,spl))
for p in spl.values():
pathlengths.append(p)
print('')
print(
"average shortest path length %s" %
(sum(pathlengths) / len(pathlengths)))
# histogram of path lengths
dist = {}
for p in pathlengths:
if p in dist:
dist[p] += 1
else:
dist[p] = 1
print('')
# print("length #paths")
# verts=dist.keys()
# for d in sorted(verts):
# print('%s %d' % (d,dist[d]))
#print("radius: %d" % radius(graph))
#print("diameter: %d" % diameter(graph))
#print("eccentricity: %s" % eccentricity(graph))
#print("center: %s" % center(graph))
#print("periphery: %s" % periphery(graph))
#print("density: %s" % density(graph))
# draw_shell(graph,with_labels=True)
# plt.show()
# write_gexf(graph,"test.gexf")
#write_pajek(graph, "test.net")
def save(G, fname):
json.dump(dict(nodes=[[n, G.node[n]] for n in G.nodes()],
links=[[u, v, G.edge[u][v]] for u,v in G.edges()]),
open(fname, 'w'), indent=2)
def drawGraph(graph, is_connected=False):
print 'Nodes: %s' % graph.order()
print 'Links: %s' % graph.size()
connected_str = "_connected" if is_connected else ""
outputfile = os.path.join( os.getcwd(), 'guifiAnalyzerOut',
'd3', str(root)+corename+connected_str+'.json')
#outputgexf = os.path.join( os.getcwd(), 'guifiAnalyzerOut',
# 'results', str(root)+corename+connected_str+'.gexf')
#networkx.write_gexf(graph, outputgexf)
# For undirected
d = json_graph.node_link_data(graph)
json.dump(d, open(outputfile, 'w'))
# For directed
#save(graph,outputfile)
html = os.path.join( os.getcwd(), 'guifiAnalyzerOut',
'd3', 'test.html')
#http_server.load_url(html)
http_server.load_url('guifiAnalyzerOut/d3/networks.html')
def plot_log_degree(G):
degree_sequence=sorted(networkx.degree(G).values(),reverse=True) # degree sequence
#print "Degree sequence", degree_sequence
dmax=max(degree_sequence)
plt.loglog(degree_sequence,'b-',marker='o')
plt.title("Degree rank plot")
plt.ylabel("degree")
plt.xlabel("rank")
plt.show()
def plot_communities(G):
partition = community.best_partition(G)
#drawing
size = float(len(set(partition.values())))
pos = networkx.spring_layout(G)
count = 0.
for com in set(partition.values()) :
count = count + 1.
list_nodes = [nodes for nodes in partition.keys()
if partition[nodes] == com]
networkx.draw_networkx_nodes(G, pos, list_nodes, node_size = 20,
node_color = str(count / size))
networkx.draw_networkx_edges(G,pos, alpha=0.5)
plt.show()
def kcliques_to_html(G):
kcliques = list(networkx.k_clique_communities(G, 2))
#pdb.set_trace()
kcliques_colors = [random.randint(0,1000000)*len(l) for l in kcliques]
for clique in kcliques:
color = kcliques_colors[kcliques.index(clique)]
for node in clique:
G.node[node]['kclique'] = color
def components_to_html(G):
comps = list(networkx.connected_components(G))
comps_colors = [random.randint(0,1000000)*len(l) for l in comps]
for comp in comps:
color = comps_colors[comps.index(comp)]
for node in comp:
G.node[node]['component'] = color
def between_central_to_html(G):
bc = networkx.betweenness_centrality(G=G, normalized=True)
for node,value in bc.iteritems():
G.node[node]['bc'] = value*100
def getTrafficConnectedComponentGraph(G):
H = G.copy()
to_remove = []
for (s,d) in H.edges():
if H[s][d]['weight'] <= 2:
to_remove.extend([(s,d)])
H.remove_edges_from(to_remove)
#print list(networkx.connected_components(H))
print networkx.number_connected_components(H)
Gc = max(networkx.connected_component_subgraphs(H), key=len)
#drawGraph(Gc, connected=True)
return Gc
def addOneHopNeighbours(graph,conn_graph):
new_graph = conn_graph.copy()
nodeslist = graph.nodes(data=True)
nodes = {n:d for (n, d) in nodeslist}
for node in conn_graph:
neigh = graph.neighbors(node)
ccneigh = conn_graph.neighbors(node)
extraneigh = [item for item in neigh if item not in ccneigh]
for neighbor in extraneigh:
nodedata = nodes[node]
new_graph.add_node(node, nodedata)
edgedata = graph.get_edge_data(node, neighbor)
new_graph.add_edge(node, neighbor, edgedata)
#pdb.set_trace()
return new_graph
def distanceClientRouter(graph):
nodeslist = graph.nodes(data=True)
nodes = {n:d for (n, d) in nodeslist}
clients = [n for n in graph if 'isrouter' in nodes[n] and nodes[n]['isrouter'] == 'N']
#routers = [n for n in graph if 'isrouter' in nodes[n] and nodes[n]['isrouter'] != 'N']
#distance_per_client = { n:networkx.shortest_path_length(graph,n,nodes[nodes[n]['router']]) for n in clients if nodes[n]['router'] != []}
distance_per_client = {}
for n in clients:
if nodes[n]['router'] != []:
router_id = nodes[n]['router'][0]
node = n
distance = networkx.shortest_path_length(graph,node,router_id)
distance_per_client[n] = distance
pdb.set_trace()
print 'Counters of Distances between client and router: %s' % Counter(distance_per_client.values())
root = 8346
#root = 18668
#root = 2444
#root = 2435
core = False
#core = True
corename = '_core' if core else ''
G, DiG = createGraph(root, core)
#plot_communities(G)
#between_central_to_html(G)
distanceClientRouter(G)
drawGraph(G)
if False:
# Connected components and neighbors
Gc = getTrafficConnectedComponentGraph(G)
for (s,d) in G.edges():
if Gc.has_edge(s,d):
G[s][d]['incc'] = 1
else:
G[s][d]['incc'] = 0
Gc1 = addOneHopNeighbours(G, Gc)
Gc2 = addOneHopNeighbours(G,Gc1)
#for (s,d) in Gc.edges():
# G[s][d]['incc'] = True
drawGraph(Gc2, True)
| gpl-3.0 |
sahat/bokeh | examples/app/applet/example.py | 3 | 7687 | import bokeh.server
from bokeh.plotting import line, circle, curdoc
from bokeh.widgetobjects import (VBoxModelForm, HBox,
BokehApplet, TextInput, PreText,
Select, Slider)
from bokeh.objects import Plot, ColumnDataSource
from bokeh.plot_object import PlotObject
from bokeh.properties import (Dict, Float, String, Instance)
import numpy as np
import logging
logging.basicConfig(level=logging.DEBUG)
class MyModel(VBoxModelForm):
"""Input Widgets, define the fields you want to
read from the input here as bokeh properties
input_specs is a list of dictionary, specifying
how the kind of input widget you want for each
property. the name field must match
one of the properties, for example here,
we use names of offset and scale. You can
also specify title, if you want a different
label in the generated form
"""
offset = Float(1.0)
scale = Float(1.0)
title = String(default="my sin wave")
input_specs = [
{"widget" : TextInput,
"name" : "title",
"value" : "my sin wave"},
{"widget" : Slider,
"name" : "offset",
"value" : 1.0,
"start" : 0.0,
"end" : 5.0},
{"widget" : Slider,
"name" : "scale",
"value" : 1.0,
"start" : -5.0,
"end" : 5.0},
]
class MyApp(BokehApplet):
plot = Instance(Plot)
source = Instance(ColumnDataSource)
def create(self, doc):
"""
This function is called once, and is responsible for
creating all objects (plots, datasources, etc)
"""
self.modelform = MyModel()
self.modelform.create_inputs(doc)
self.source = ColumnDataSource(data={'x':[], 'y':[]})
self.update_data()
self.plot = line('x', 'y', source=self.source,
plot_width=400, plot_height=400,
title=self.modelform.title
)
self.children.append(self.modelform)
self.children.append(self.plot)
def input_change(self, obj, attrname, old, new):
"""
This function is called whenever the input form changes
This is responsible for updating the plot, or whatever
you want. The signature is
obj : the object that changed
attrname : the attr that changed
old : old value of attr
new : new value of attr
"""
self.update_data()
self.plot.title = self.modelform.title
def update_data(self):
N = 80
x = np.linspace(0, 4*np.pi, N)
y = np.sin(x)
logging.debug ("PARAMS %s %s", self.modelform.offset, self.modelform.scale)
y = self.modelform.offset + y * self.modelform.scale
self.source.data = {'x' : x, 'y' : y}
# the following addes "/exampleapp" as a url which renders MyApp
bokeh_url = "http://localhost:5006"
MyApp.add_route("/exampleapp", bokeh_url)
"""
Example 2
you need to run download.py to get the data from quantquote
"""
import os
from os.path import join, dirname, splitext
import pandas as pd
data_dir = join(dirname(__file__), "daily")
tickers = os.listdir(data_dir)
tickers = [splitext(x)[0].split("table_")[-1] for x in tickers]
class StockInputModel(VBoxModelForm):
"""Input Widgets, define the fields you want to
read from the input here as bokeh properties
input_specs is a list of dictionary, specifying
how the kind of input widget you want for each
property. the name field must match
one of the properties, for example here,
we use names of offset and scale. You can
also specify title, if you want a different
label in the generated form
"""
ticker1 = String(default="AAPL")
ticker2 = String(default="GOOG")
input_specs = [
{"widget" : Select,
"name" : "ticker1",
"value" : "AAPL",
"options" : ["AAPL","GOOG","INTC","BRCM","YHOO"]
},
{"widget" : Select,
"name" : "ticker2",
"value" : "GOOG",
"options" : ["AAPL","GOOG","INTC","BRCM","YHOO"]
}
]
class StockApp(BokehApplet):
plot = Instance(Plot)
source = Instance(ColumnDataSource)
pretext = Instance(PreText)
def get_data(self, ticker1, ticker2):
fname = join(data_dir, "table_%s.csv" % ticker1.lower())
data1 = pd.read_csv(fname,
names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'],
header=False,
parse_dates=['date'])
data1 = data1.set_index('date')
fname = join(data_dir, "table_%s.csv" % ticker2.lower())
data2 = pd.read_csv(fname,
names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'],
header=False,
parse_dates=['date'])
data2 = data2.set_index('date')
data = pd.DataFrame({ticker1 : data1.c, ticker2 : data2.c})
data[ticker1 + "_returns"] = data[ticker1].diff()
data[ticker2 + "_returns"] = data[ticker2].diff()
data = data.dropna()
return data
def create(self, doc):
"""
This function is called once, and is responsible for
creating all objects (plots, datasources, etc)
"""
self.modelform = StockInputModel()
self.modelform.create_inputs(doc)
ticker1 = self.modelform.ticker1
ticker2 = self.modelform.ticker2
self.pretext = PreText(text="")
self.make_source(ticker1, ticker2)
self.make_plots(ticker1, ticker2)
self.make_stats()
self.set_children()
def make_source(self, ticker1, ticker2):
df = self.get_data(ticker1, ticker2)
self.source = ColumnDataSource(data=df)
def make_plots(self, ticker1, ticker2):
self.plot = circle(ticker1 + "_returns", ticker2 + "_returns",
title="%s vs %s" %(ticker1, ticker2),
source=self.source,
plot_width=400, plot_height=400,
tools="pan,wheel_zoom,select"
)
def set_children(self):
self.children = [self.modelform, self.plot, self.pretext]
curdoc()._plotcontext.children = [self]
curdoc().add_all()
def input_change(self, obj, attrname, old, new):
"""
This function is called whenever the input form changes
This is responsible for updating the plot, or whatever
you want. The signature is
obj : the object that changed
attrname : the attr that changed
old : old value of attr
new : new value of attr
"""
if attrname in ("ticker1", "ticker2"):
ticker1 = self.modelform.ticker1
ticker2 = self.modelform.ticker2
self.make_source(ticker1, ticker2)
self.make_plots(ticker1, ticker2)
self.set_children()
def setup_events(self):
super(StockApp, self).setup_events()
if self.source:
self.source.on_change('selected', self, 'selection_change')
def make_stats(self):
pandas_df = pd.DataFrame(self.source.data)
selected = self.source.selected
if selected:
pandas_df = pandas_df.iloc[selected, :]
stats = pandas_df.describe()
self.pretext.text = str(stats)
def selection_change(self, obj, attrname, old, new):
self.make_stats()
# the following addes "/exampleapp" as a url which renders StockApp
bokeh_url = "http://localhost:5006"
StockApp.add_route("/stocks", bokeh_url)
if __name__ == "__main__":
bokeh.server.run()
| bsd-3-clause |
ligo-cbc/pycbc | test/test_tmpltbank.py | 9 | 25760 | # Copyright (C) 2013 Ian Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
These are the unittests for the pycbc.tmpltbank module
"""
from __future__ import division
import os
import numpy
import pycbc.tmpltbank
import pycbc.psd
import pycbc.pnutils
from pycbc import pnutils
from pycbc.types import Array
from pycbc.filter import match
from pycbc.waveform import get_fd_waveform
from six.moves import range
import difflib
import sys
import matplotlib
matplotlib.use('Agg')
import pylab
import unittest
from utils import parse_args_cpu_only, simple_exit
# This will return whatever is appropriate, depending on whether this
# particular instance of the unittest was called for CPU, CUDA, or OpenCL
parse_args_cpu_only("Template bank module")
import argparse
parser = argparse.ArgumentParser()
def update_mass_parameters(tmpltbank_class):
"""
Choose various sets of mass parameters for testing.
"""
num_comp_masses = 3
min_mass1 = [1,2,6]
max_mass1 = [5,8,12]
min_mass2 = [1,1,1]
max_mass2 = [5,5,5]
num_tot_masses = 3
# These *must* be provided
min_tot_mass = [None, 2.5, 3.5]
max_tot_mass = [None, 11, 7.5]
num_chirp_masses = 3
max_chirp_mass = [None, 2.43, 3.5]
min_chirp_mass = [None, 1.218, 2.43]
num_etas = 3
max_eta = [0.25, 0.24, 0.23]
min_eta = [None, 0.16, 0.17]
max_iter_idx = num_comp_masses * num_tot_masses *\
num_chirp_masses * num_etas
for idx in range(max_iter_idx):
comp_masses_idx = idx % num_comp_masses
tmpltbank_class.min_mass1 = min_mass1[comp_masses_idx]
tmpltbank_class.max_mass1 = max_mass1[comp_masses_idx]
tmpltbank_class.min_mass2 = min_mass2[comp_masses_idx]
tmpltbank_class.max_mass2 = max_mass2[comp_masses_idx]
reduced_idx = idx // num_comp_masses
tot_mass_idx = reduced_idx % num_tot_masses
tmpltbank_class.min_total_mass = min_tot_mass[tot_mass_idx]
tmpltbank_class.max_total_mass = max_tot_mass[tot_mass_idx]
reduced_idx = reduced_idx // num_tot_masses
chirp_mass_idx = reduced_idx % num_chirp_masses
tmpltbank_class.min_chirp_mass = min_chirp_mass[chirp_mass_idx]
tmpltbank_class.max_chirp_mass = max_chirp_mass[chirp_mass_idx]
reduced_idx = reduced_idx // num_chirp_masses
eta_idx = reduced_idx
tmpltbank_class.max_eta = max_eta[eta_idx]
tmpltbank_class.min_eta = min_eta[eta_idx]
yield idx
return
class TmpltbankTestClass(unittest.TestCase):
def setUp(self):
# Where are my data files?
if os.path.isfile('test/data/ZERO_DET_high_P.txt'):
self.dataDir = 'test/data/'
elif os.path.isfile('data/ZERO_DET_high_P.txt'):
self.dataDir = 'data/'
else:
self.assertTrue(False, msg="Cannot find data files!")
self.deltaF = 0.1
self.f_low = 15
self.f_upper = 2000
self.f0 = 70
self.sampleRate = 4096
self.pnOrder = 'threePointFivePN'
self.min_mass1 = 1
self.min_mass2 = 1
self.max_mass1 = 5
self.max_mass2 = 5
self.max_ns_spin_mag = 0.5
self.max_bh_spin_mag = 0.9
self.ns_bh_boundary_mass = 2.0
self.min_total_mass = 2.5
self.max_total_mass = 6.0
self.max_chirp_mass = 2.4375415772291475
self.min_chirp_mass = 1.2187707886145738
self.max_eta = 0.24
self.min_eta = 0.16
# Sanity check these
pycbc.tmpltbank.verify_mass_range_options(self, parser=parser)
# Need to use F2 metric for ethinca
self.ethincaOrder = 'threePointFivePN'
self.ethincaCutoff = 'SchwarzISCO'
self.ethincaFreqStep = 10.
self.segLen = 1./self.deltaF
self.psdSize = int(self.segLen * self.sampleRate / 2.) + 1
self.psd = pycbc.psd.from_txt('%sZERO_DET_high_P.txt' %(self.dataDir),\
self.psdSize, self.deltaF, self.f_low, is_asd_file=True)
match_psd_size = int(256 * self.sampleRate / 2.) + 1
self.psd_for_match = pycbc.psd.from_txt\
('%sZERO_DET_high_P.txt' %(self.dataDir), match_psd_size,
1./256., self.f_low, is_asd_file=True)
metricParams = pycbc.tmpltbank.metricParameters(self.pnOrder,\
self.f_low, self.f_upper, self.deltaF, self.f0)
metricParams.psd = self.psd
massRangeParams = pycbc.tmpltbank.massRangeParameters(self.min_mass1,\
self.max_mass1, self.min_mass2, self.max_mass2,\
maxNSSpinMag=self.max_ns_spin_mag,\
maxBHSpinMag=self.max_bh_spin_mag,\
maxTotMass=self.max_total_mass,\
minTotMass=self.min_total_mass,\
max_chirp_mass=self.max_chirp_mass,\
min_chirp_mass=self.min_chirp_mass,\
maxEta=self.max_eta,\
minEta=self.min_eta,\
ns_bh_boundary_mass=self.ns_bh_boundary_mass)
# And again with the nsbh flag
massRangeParams2 = pycbc.tmpltbank.massRangeParameters(self.min_mass1,\
self.max_mass1, self.min_mass2, self.max_mass2,\
maxNSSpinMag=self.max_ns_spin_mag,\
maxBHSpinMag=self.max_bh_spin_mag,\
maxTotMass=self.max_total_mass,\
minTotMass=self.min_total_mass,\
max_chirp_mass=self.max_chirp_mass,\
min_chirp_mass=self.min_chirp_mass,\
maxEta=self.max_eta,\
minEta=self.min_eta,\
nsbhFlag=True)
metricParams = pycbc.tmpltbank.determine_eigen_directions(metricParams)
vals=pycbc.tmpltbank.estimate_mass_range(100000, massRangeParams,\
metricParams, self.f_upper, covary=False)
cov = numpy.cov(vals)
_,self.evecsCV = numpy.linalg.eig(cov)
metricParams.evecsCV = {}
metricParams.evecsCV[self.f_upper] = self.evecsCV
vals=pycbc.tmpltbank.estimate_mass_range(100000, massRangeParams,\
metricParams, self.f_upper, covary=False)
self.metricParams = metricParams
self.massRangeParams = massRangeParams
self.massRangeParams2 = massRangeParams2
self.ethincaParams = pycbc.tmpltbank.ethincaParameters(
self.ethincaOrder, self.ethincaCutoff, self.ethincaFreqStep,
full_ethinca=False, time_ethinca=False)
self.xis = vals
def test_eigen_directions(self):
evalsStock = Array(numpy.loadtxt('%sstockEvals.dat'%(self.dataDir)))
evecsStock = Array(numpy.loadtxt('%sstockEvecs.dat'%(self.dataDir)))
maxEval = max(evalsStock)
evalsCurr = Array(self.metricParams.evals[self.f_upper])
evecsCurr = Array(self.metricParams.evecs[self.f_upper])
numpy.savetxt('newEvals.dat', evalsCurr)
numpy.savetxt('newEvecs.dat', evecsCurr)
errMsg = "pycbc.tmpltbank.determine_eigen_directions has failed "
errMsg += "sanity check."
evalsDiff = abs(evalsCurr - evalsStock)/maxEval
self.assertTrue(not (evalsDiff > 1E-5).any(), msg=errMsg)
for stock,test in zip(evecsStock.data,evecsCurr.data):
stockScaled = stock * evalsCurr.data**0.5
testScaled = test * evalsCurr.data**0.5
diff = stockScaled - testScaled
self.assertTrue(not (diff > 1E-4).any(), msg=errMsg)
def test_get_random_mass(self):
# Want to do this for a variety of mass combinations
for i in update_mass_parameters(self):
curr_min_mass = self.min_total_mass
curr_max_mass = self.max_total_mass
try:
pycbc.tmpltbank.verify_mass_range_options(self, parser=parser)
except ValueError:
# Some of the inputs are unphysical and will fail.
# These cases are known to fail, the inputs are unphysical
# 35 has inconsistent total mass and eta restrictions
# 38 Component mass, [upper] chirp mass and [lower] eta limits
# rule out the entire space.
# 41 Same as 38
# 44 Same as 38
# 62 From component mass and total mass limits only total masses
# between 7 and 7.5 are possible. This range all has eta
# lower than the limit of 0.17.
# 65 Same as 38
# 68 Same as 38
# 71 Same as 38
# 80 Same as 62
if i in [35,38,41,44,62,65,68,71,80]:
continue
raise
# Check that if the mass limits have changed, it was right to do so
# This is not exhaustive, but gets most things
if not self.min_total_mass == curr_min_mass:
min_comp_mass = self.min_mass1 + self.min_mass2
min_eta = self.min_mass1 * self.min_mass2 /\
(min_comp_mass * min_comp_mass)
min_chirp_mass = min_comp_mass * min_eta**(3./5.)
if self.min_total_mass == min_comp_mass:
# Okay, the total mass is changed by the components
pass
elif (self.min_eta and min_eta < self.min_eta) or \
(self.max_eta and min_eta > self.max_eta):
# Okay, not possible from eta
pass
elif min_chirp_mass < self.min_chirp_mass:
# Okay, not possible from chirp mass
pass
else:
err_msg = "Minimum total mass changed unexpectedly."
print(self.min_total_mass, curr_min_mass)
print(self.min_mass1, self.min_mass2, min_comp_mass)
print(min_eta, self.min_eta, self.max_eta)
print(min_chirp_mass, self.min_chirp_mass)
self.fail(err_msg)
if not self.max_total_mass == curr_max_mass:
max_comp_mass = self.max_mass1 + self.max_mass2
max_eta = self.max_mass1 * self.max_mass2 /\
(max_comp_mass * max_comp_mass)
max_chirp_mass = max_comp_mass * max_eta**(3./5.)
if self.max_total_mass == max_comp_mass:
# Okay, the total mass is changed by the components
pass
elif (self.min_eta and max_eta < self.min_eta) or\
(self.max_eta and max_eta > self.max_eta):
# Okay, not possible from eta
pass
elif max_chirp_mass > self.max_chirp_mass:
# Okay, not possible from chirp mass
pass
else:
err_msg = "Maximum total mass changed unexpectedly."
self.fail(err_msg)
massRangeParams = pycbc.tmpltbank.massRangeParameters(\
self.min_mass1,\
self.max_mass1, self.min_mass2, self.max_mass2,\
maxNSSpinMag=self.max_ns_spin_mag,\
maxBHSpinMag=self.max_bh_spin_mag,\
maxTotMass=self.max_total_mass,\
minTotMass=self.min_total_mass,\
max_chirp_mass=self.max_chirp_mass,\
min_chirp_mass=self.min_chirp_mass,\
maxEta=self.max_eta,\
minEta=self.min_eta,\
ns_bh_boundary_mass=self.ns_bh_boundary_mass)
# And again with the nsbh flag
massRangeParams2 = pycbc.tmpltbank.massRangeParameters(\
self.min_mass1,\
self.max_mass1, self.min_mass2, self.max_mass2,\
maxNSSpinMag=self.max_ns_spin_mag,\
maxBHSpinMag=self.max_bh_spin_mag,\
maxTotMass=self.max_total_mass,\
minTotMass=self.min_total_mass,\
max_chirp_mass=self.max_chirp_mass,\
min_chirp_mass=self.min_chirp_mass,\
maxEta=self.max_eta,\
minEta=self.min_eta,\
nsbhFlag=True)
mass1, mass2, spin1z, spin2z = \
pycbc.tmpltbank.get_random_mass(100000, massRangeParams)
mass = mass1 + mass2
errMsg = "pycbc.tmpltbank.get_random_mass returns invalid ranges."
self.assertTrue(not (mass < self.min_total_mass).any(),msg=errMsg)
self.assertTrue(not (mass > self.max_total_mass).any(),msg=errMsg)
self.assertTrue(not (mass1 > self.max_mass1 * 1.001).any(),
msg=errMsg)
self.assertTrue(not (mass1 < self.min_mass1 * 0.999).any(),
msg=errMsg)
self.assertTrue(not (mass2 > self.max_mass2 * 1.001).any(),
msg=errMsg)
self.assertTrue(not (mass2 < self.min_mass2 * 0.999).any(),
msg=errMsg)
self.assertTrue(not (mass1 < mass2).any(),msg=errMsg)
# Chirp mass and eta
mchirp, eta = pnutils.mass1_mass2_to_mchirp_eta(mass1,mass2)
if self.max_chirp_mass:
self.assertTrue(not (mchirp > self.max_chirp_mass*1.0001).any(),
msg=errMsg)
if self.min_chirp_mass:
self.assertTrue(not (mchirp < self.min_chirp_mass*0.9999).any(),
msg=errMsg)
if self.min_eta:
self.assertTrue(not (eta < self.min_eta*0.9999).any(),
msg=errMsg)
self.assertTrue(not (eta > self.max_eta*1.0001).any(),
msg=errMsg)
nsSpin1 = spin1z[mass1 < self.ns_bh_boundary_mass]
nsSpin2 = spin2z[mass2 < self.ns_bh_boundary_mass]
bhSpin1 = spin1z[mass1 > self.ns_bh_boundary_mass]
bhSpin2 = spin2z[mass2 > self.ns_bh_boundary_mass]
self.assertTrue(not (abs(nsSpin1) > 0.5).any(), msg=errMsg)
self.assertTrue(not (abs(nsSpin2) > 0.5).any(), msg=errMsg)
self.assertTrue(not (abs(bhSpin1) > 0.9).any(), msg=errMsg)
self.assertTrue(not (abs(bhSpin2) > 0.9).any(), msg=errMsg)
# Check that *some* spins are bigger than 0.5
if len(bhSpin1):
self.assertTrue((abs(bhSpin1) > 0.5).any(), msg=errMsg)
if len(bhSpin2):
self.assertTrue((abs(bhSpin2) > 0.5).any(), msg=errMsg)
# Check nsbh flag
mass1, mass2, spin1z, spin2z = \
pycbc.tmpltbank.get_random_mass(100000, massRangeParams2)
self.assertTrue(not (abs(spin1z) > 0.9).any(), msg=errMsg)
self.assertTrue(not (abs(spin2z) > 0.5).any(), msg=errMsg)
self.assertTrue((abs(spin1z) > 0.5).any(), msg=errMsg)
def test_metric_match_prediction(self):
mass1a, mass2a, spin1za, spin2za = \
pycbc.tmpltbank.get_random_mass(10, self.massRangeParams)
mass1b, mass2b, spin1zb, spin2zb = \
pycbc.tmpltbank.get_random_mass(10, self.massRangeParams)
for idx in range(10):
masses1 = [mass1a[idx], mass2a[idx], spin1za[idx], spin2za[idx]]
masses2 = [mass1b[idx], mass2b[idx], spin1zb[idx], spin2zb[idx]]
dist, _, _ = pycbc.tmpltbank.get_point_distance \
(masses1, masses2, self.metricParams, self.f_upper)
opt_dist = 0.02
while dist > opt_dist * 1.01 or dist < opt_dist * 0.99:
dist_fac = opt_dist / dist
dist_fac = dist_fac**0.5
if dist_fac < 0.01:
dist_fac = 0.01
if dist_fac > 2:
dist_fac = 2
for idx, curr_mass2 in enumerate(masses2):
masses2[idx] = masses1[idx] + \
(curr_mass2 - masses1[idx]) * dist_fac
dist, _, _ = pycbc.tmpltbank.get_point_distance \
(masses1, masses2, self.metricParams, self.f_upper)
self.assertFalse(numpy.isnan(dist))
htilde1, _ = get_fd_waveform\
(approximant='TaylorF2', mass1=masses1[0], mass2=masses1[1],
spin1z=masses1[2], spin2z=masses1[3], delta_f=1.0/256,
f_lower=15, f_final=2000)
htilde2, _ = get_fd_waveform\
(approximant='TaylorF2', mass1=masses2[0], mass2=masses2[1],
spin1z=masses2[2], spin2z=masses2[3], delta_f=1.0/256,
f_lower=15, f_final=2000)
overlap, _ = match(htilde1, htilde2, psd=self.psd_for_match,
low_frequency_cutoff=15)
self.assertTrue(overlap > 0.97 and overlap < 0.985)
def test_chirp_params(self):
chirps=pycbc.tmpltbank.get_chirp_params(2.2, 1.8, 0.2, 0.3,
self.metricParams.f0, self.metricParams.pnOrder)
stockChirps = numpy.loadtxt('%sstockChirps.dat'%(self.dataDir))
diff = (chirps - stockChirps) / stockChirps
errMsg = "Calculated chirp params differ from that expected."
self.assertTrue( not (abs(diff) > 1E-4).any(), msg=errMsg)
def test_hexagonal_placement(self):
arrz = pycbc.tmpltbank.generate_hexagonal_lattice(10, 0, 10, 0, 0.03)
arrz = numpy.array(arrz)
stockGrid = numpy.loadtxt("%sstockHexagonal.dat"%(self.dataDir))
diff = arrz - stockGrid
errMsg = "Calculated lattice differs from that expected."
self.assertTrue( not (diff > 1E-4).any(), msg=errMsg)
def test_anstar_placement(self):
arrz = pycbc.tmpltbank.generate_anstar_3d_lattice(0, 10, 0, 10, 0, \
10, 0.03)
arrz = numpy.array(arrz)
stockGrid = numpy.loadtxt("%sstockAnstar3D.dat"%(self.dataDir))
numpy.savetxt("new_example.dat", arrz)
errMsg = "Calculated lattice differs from that expected."
self.assertTrue(len(arrz) == len(stockGrid), msg=errMsg)
diff = arrz - stockGrid
self.assertTrue( not (diff > 1E-4).any(), msg=errMsg)
def test_get_mass_distribution(self):
# Just run the function, no checking output
pycbc.tmpltbank.get_mass_distribution([1.35,0.239,0.4,-0.2], 2, \
self.massRangeParams, self.metricParams, \
self.f_upper, \
numJumpPoints=123, chirpMassJumpFac=0.0002, \
etaJumpFac=0.009, spin1zJumpFac=0.1, \
spin2zJumpFac=0.2)
def test_get_phys_cov_masses(self):
evecs = self.metricParams.evecs[self.f_upper]
evals = self.metricParams.evals[self.f_upper]
masses1 = [2.2,1.8,0.4,0.3]
masses2 = [2.21,1.79,0.41,0.29]
xis1 = pycbc.tmpltbank.get_cov_params(masses1[0], masses1[1],
masses1[2], masses1[3], self.metricParams, self.f_upper)
xis2 = pycbc.tmpltbank.get_cov_params(masses2[0], masses2[1],
masses2[2], masses2[3], self.metricParams, self.f_upper)
testXis = [xis1[0],xis1[1]]
b_mtot, b_eta = pnutils.mass1_mass2_to_mtotal_eta(masses2[0],
masses2[1])
bestMasses = [b_mtot, b_eta, masses2[2], masses2[3]]
bestXis = xis2
output = pycbc.tmpltbank.get_physical_covaried_masses(testXis, \
bestMasses, bestXis, 0.0001, self.massRangeParams, \
self.metricParams, self.f_upper)
# Test that returned xis are close enough
diff = (output[6][0] - testXis[0])**2
diff += (output[6][1] - testXis[1])**2
errMsg = 'pycbc.tmpltbank.get_physical_covaried_masses '
errMsg += 'failed to find a point within the desired limits.'
self.assertTrue( diff < 1E-4,msg=errMsg)
# Test that returned masses and xis agree
massT = output[0] + output[1]
etaT = output[0]*output[1] / (massT*massT)
spinSetT = pycbc.pnutils.get_beta_sigma_from_aligned_spins(\
etaT, output[2], output[3])
xisT = pycbc.tmpltbank.get_cov_params(output[0], output[1],
output[2], output[3], self.metricParams, self.f_upper)
errMsg = "Recovered xis do not agree with those expected."
self.assertTrue( abs(xisT[0] - output[6][0]) < 1E-5, msg=errMsg)
self.assertTrue( abs(xisT[1] - output[6][1]) < 1E-5, msg=errMsg)
self.assertTrue( abs(xisT[2] - output[6][2]) < 1E-5, msg=errMsg)
self.assertTrue( abs(xisT[3] - output[6][3]) < 1E-5, msg=errMsg)
# Test again with nsbh flag on
output = pycbc.tmpltbank.get_physical_covaried_masses(testXis, \
bestMasses, bestXis, 0.0001, self.massRangeParams2, \
self.metricParams, self.f_upper)
# Test that returned xis are close enough
diff = (output[6][0] - testXis[0])**2
diff += (output[6][1] - testXis[1])**2
errMsg = 'pycbc.tmpltbank.get_physical_covaried_masses '
errMsg += 'failed to find a point within the desired limits.'
self.assertTrue( diff < 1E-4,msg=errMsg)
# Test that returned masses and xis agree
xisT = pycbc.tmpltbank.get_cov_params(output[0], output[1],
output[2], output[3], self.metricParams, self.f_upper)
errMsg = "Recovered xis do not agree with those expected."
self.assertTrue( abs(xisT[0] - output[6][0]) < 1E-5, msg=errMsg)
self.assertTrue( abs(xisT[1] - output[6][1]) < 1E-5, msg=errMsg)
self.assertTrue( abs(xisT[2] - output[6][2]) < 1E-5, msg=errMsg)
self.assertTrue( abs(xisT[3] - output[6][3]) < 1E-5, msg=errMsg)
def test_stack_xi_direction(self):
# Just run the function, no checking output
evecs = self.metricParams.evecs[self.f_upper]
evals = self.metricParams.evals[self.f_upper]
masses1 = [2.2,1.8,0.4,0.3]
masses2 = [2.21,1.79,0.41,0.29]
xis1 = pycbc.tmpltbank.get_cov_params(masses1[0], masses1[1], \
masses1[2], masses1[3], self.metricParams, self.f_upper)
xis2 = pycbc.tmpltbank.get_cov_params(masses2[0], masses2[1], \
masses2[2], masses2[3], self.metricParams, self.f_upper)
testXis = [xis1[0],xis1[1]]
b_mtot, b_eta = pnutils.mass1_mass2_to_mtotal_eta(masses2[0],
masses2[1])
bestMasses = [b_mtot, b_eta, masses2[2], masses2[3]]
bestXis = xis2
depths = pycbc.tmpltbank.stack_xi_direction_brute(testXis, \
bestMasses, bestXis, 3, 0.03, self.massRangeParams, \
self.metricParams, self.f_upper, numIterations=50)
def test_point_distance(self):
masses1 = [2,2,0.4,0.6]
masses2 = [2.02,1.97,0.41,0.59]
dist, xis1, xis2 = pycbc.tmpltbank.get_point_distance(masses1, \
masses2, self.metricParams, self.f_upper)
diff = abs((dist - 23.3681922039) / dist)
errMsg = "Obtained distance does not agree with expected value."
self.assertTrue( diff < 1E-5, msg=errMsg)
def test_conv_to_sngl(self):
# Just run the function, no checking output
masses1 = [(2,2,0.4,0.3),(4.01,0.249,0.41,0.29)]
pycbc.tmpltbank.convert_to_sngl_inspiral_table(masses1, "a")
def test_ethinca_calc(self):
# Just run the function, no checking output
m1 = 2.
m2 = 2.
s1z = 0.
s2z = 0.
# ethinca calc breaks unless f0 = fLow
self.metricParams.f0 = self.metricParams.fLow
output = pycbc.tmpltbank.calculate_ethinca_metric_comps(
self.metricParams, self.ethincaParams, m1, m2, s1z, s2z)
# restore initial f0 value
self.metricParams.f0 = self.f0
def tearDown(self):
pass
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TmpltbankTestClass))
if __name__ == '__main__':
results = unittest.TextTestRunner(verbosity=2).run(suite)
simple_exit(results)
| gpl-3.0 |
dib-lab/SSUsearch | scripts/taxa-change-ratio-copyrighter.py | 2 | 3379 | #! /usr/bin/env python
# make plot of copyrighter.py output table
# by gjr
"""
Get the taxon relative abudance change ratio after copy correction
% python taxa-change-ratio-copyrighter.py \
level <outfile> \
<file.before.cc.taxonomy> \
<file.after.cc.taxonomy>
"""
import sys, os, itertools, collections
from operator import itemgetter, attrgetter
import numpy
import pandas
EXCLUDE = ['Archaea', 'Eukaryota', 'unknown']
#EXCLUDE = []
TOP=20
#ORDER=True #reverse the order
ORDER=False #normal order
def readData(f):
taxa_lis = []
num_lis = []
for n, line in enumerate(open(f)):
if line.startswith('#'):
continue
line = line.rstrip()
if line == '':
continue
taxa, num = line.split('\t')
skip = False
for word in EXCLUDE:
if word in taxa:
skip = True
break
if skip:
continue
taxa = taxa.rstrip(';')
lis = taxa.split(';')
lis2 = []
for item in lis:
item = item.strip()
if item.endswith(')'):
item = item.split('(')[0].strip()
# remove taxon level prefix, e.g. 'p__Firmicutes'
if '__' in item:
item = item.split('__', 1)[1]
#item = item.strip('"')
item = item.lower()
if 'unclassified' in item:
item = 'Unclassifed'
elif 'unknown' in item:
item = 'Unclassifed'
elif 'other' in item:
item = 'Unclassifed'
elif 'unassigned' in item:
item = 'Unclassifed'
item = item.capitalize()
lis2.append(item)
taxa_lis.append(lis2)
num_lis.append(float(num))
return taxa_lis, num_lis
def main():
#Usage: python <thisFile> level <outfile> <file.taxonomy> ..
if len(sys.argv) < 3:
mes = 'Usage: python {} level <outfile> <file.taxonomy>..'
print >> sys.stderr, mes.format(os.path.basename(sys.argv[0]))
print >> sys.stderr, "*** filename.split('.')[0] will "\
"be the sample label"
sys.exit(1)
level = int(sys.argv[1])
level = level - 1
outfile = sys.argv[2]
d = {}
dCombined = {}
lisSampOrder = []
for f in sys.argv[3:]:
samp = os.path.basename(f).split('.')[0] # sample name
container, num_lis = readData(f)
tranLis = itertools.izip_longest(*container, fillvalue='Unclassified')
levelLis = list(tranLis)[level]
countD = {}
for tax, num in zip(levelLis, num_lis):
countD[tax] = countD.get(tax, 0) + num
total = sum(countD.values())
d[samp] = dict((taxa, countD[taxa]*1.0/total) for taxa in countD)
for key in d[samp]:
dCombined[key] = dCombined.get(key, 0) + d[samp][key]
lisSampOrder.append(samp)
df = pandas.DataFrame(d)
# only take members > 0.1% before copy correction
df2 = df[df.iloc[:,0] > 0.001]
df2['ratio'] = df2.iloc[:,0]/df2.iloc[:,1]
df2 = df2.sort(columns=['ratio'], ascending=[0])
df2.to_csv('{}.tsv'.format(outfile), sep='\t', index=False)
if __name__ == '__main__':
main()
| bsd-3-clause |
stinebuu/nest-simulator | pynest/examples/vinit_example.py | 6 | 3191 | # -*- coding: utf-8 -*-
#
# vinit_example.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Initial membrane voltage
----------------------------
Plot several runs of the ``iaf_cond_exp_sfa_rr`` neuron without input for various
initial values of the membrane potential.
References
~~~~~~~~~~~~
.. [1] Dayan, P. and Abbott, L.F. (2001) Theoretical neuroscience,
MIT Press, page 166
"""
###############################################################################
# First, the necessary modules for simulation and plotting are imported.
import nest
import numpy
import matplotlib.pyplot as plt
###############################################################################
# A loop runs over a range of initial membrane voltages.
#
# In the beginning of each iteration, the simulation kernel is put back to
# its initial state using `ResetKernel`.
#
# Next, a neuron is instantiated with ``Create``. The used neuron model
# ``iaf_cond_exp_sfa_rr`` is an implementation of a spiking neuron with
# integrate-and-fire dynamics, conductance-based synapses, an additional
# spike-frequency adaptation and relative refractory mechanisms as described
# in [1]_. Incoming spike events induce a postsynaptic change of
# conductance modeled by an exponential function. ``SetStatus`` allows to
# assign the initial membrane voltage of the current loop run to the neuron.
#
# ``Create`` is used once more to instantiate a ``voltmeter`` as recording device
# which is subsequently connected to the neuron with ``Connect``.
#
# Then, a simulation with a duration of 75 ms is started with ``Simulate``.
#
# When the simulation has finished, the recorded times and membrane voltages
# are read from the voltmeter via ``GetStatus`` where they can be accessed
# through the key ``events`` of the status dictionary.
#
# Finally, the time course of the membrane voltages is plotted for each of
# the different inital values.
for vinit in numpy.arange(-100, -50, 10, float):
nest.ResetKernel()
cbn = nest.Create("iaf_cond_exp_sfa_rr")
nest.SetStatus(cbn, "V_m", vinit)
voltmeter = nest.Create("voltmeter")
nest.Connect(voltmeter, cbn)
nest.Simulate(75.0)
t = voltmeter.get("events", "times")
v = voltmeter.get("events", "V_m")
plt.plot(t, v, label="initial V_m = %.2f mV" % vinit)
###############################################################################
# Set the legend and the labels for the plot outside of the loop.
plt.legend(loc=4)
plt.xlabel("time (ms)")
plt.ylabel("V_m (mV)")
plt.show()
| gpl-2.0 |
RobertABT/heightmap | build/matplotlib/examples/pylab_examples/tricontour_smooth_delaunay.py | 3 | 5467 | """
Demonstrates high-resolution tricontouring of a random set of points ;
a matplotlib.tri.TriAnalyzer is used to improve the plot quality.
The initial data points and triangular grid for this demo are:
- a set of random points is instantiated, inside [-1, 1] x [-1, 1] square
- A Delaunay triangulation of these points is then computed, of which a
random subset of triangles is masked out by the user (based on
*init_mask_frac* parameter). This simulates invalidated data.
The proposed generic procedure to obtain a high resolution contouring of such
a data set is the following:
1) Compute an extended mask with a matplotlib.tri.TriAnalyzer, which will
exclude badly shaped (flat) triangles from the border of the
triangulation. Apply the mask to the triangulation (using set_mask).
2) Refine and interpolate the data using a
matplotlib.tri.UniformTriRefiner.
3) Plot the refined data with tricontour.
"""
from matplotlib.tri import Triangulation, TriAnalyzer, UniformTriRefiner
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
#-----------------------------------------------------------------------------
# Analytical test function
#-----------------------------------------------------------------------------
def experiment_res(x, y):
""" An analytic function representing experiment results """
x = 2.*x
r1 = np.sqrt((0.5-x)**2 + (0.5-y)**2)
theta1 = np.arctan2(0.5-x, 0.5-y)
r2 = np.sqrt((-x-0.2)**2 + (-y-0.2)**2)
theta2 = np.arctan2(-x-0.2, -y-0.2)
z = (4*(np.exp((r1/10)**2)-1)*30. * np.cos(3*theta1) +
(np.exp((r2/10)**2)-1)*30. * np.cos(5*theta2) +
2*(x**2 + y**2))
return (np.max(z)-z)/(np.max(z)-np.min(z))
#-----------------------------------------------------------------------------
# Generating the initial data test points and triangulation for the demo
#-----------------------------------------------------------------------------
# User parameters for data test points
n_test = 200 # Number of test data points, tested from 3 to 5000 for subdiv=3
subdiv = 3 # Number of recursive subdivisions of the initial mesh for smooth
# plots. Values >3 might result in a very high number of triangles
# for the refine mesh: new triangles numbering = (4**subdiv)*ntri
init_mask_frac = 0.0 # Float > 0. adjusting the proportion of
# (invalid) initial triangles which will be masked
# out. Enter 0 for no mask.
min_circle_ratio = .01 # Minimum circle ratio - border triangles with circle
# ratio below this will be masked if they touch a
# border. Suggested value 0.01 ; Use -1 to keep
# all triangles.
# Random points
random_gen = np.random.mtrand.RandomState(seed=127260)
x_test = random_gen.uniform(-1., 1., size=n_test)
y_test = random_gen.uniform(-1., 1., size=n_test)
z_test = experiment_res(x_test, y_test)
# meshing with Delaunay triangulation
tri = Triangulation(x_test, y_test)
ntri = tri.triangles.shape[0]
# Some invalid data are masked out
mask_init = np.zeros(ntri, dtype=np.bool)
masked_tri = random_gen.randint(0, ntri, int(ntri*init_mask_frac))
mask_init[masked_tri] = True
tri.set_mask(mask_init)
#-----------------------------------------------------------------------------
# Improving the triangulation before high-res plots: removing flat triangles
#-----------------------------------------------------------------------------
# masking badly shaped triangles at the border of the triangular mesh.
mask = TriAnalyzer(tri).get_flat_tri_mask(min_circle_ratio)
tri.set_mask(mask)
# refining the data
refiner = UniformTriRefiner(tri)
tri_refi, z_test_refi = refiner.refine_field(z_test, subdiv=subdiv)
# analytical 'results' for comparison
z_expected = experiment_res(tri_refi.x, tri_refi.y)
# for the demo: loading the 'flat' triangles for plot
flat_tri = Triangulation(x_test, y_test)
flat_tri.set_mask(~mask)
#-----------------------------------------------------------------------------
# Now the plots
#-----------------------------------------------------------------------------
# User options for plots
plot_tri = True # plot of the base triangulation
plot_masked_tri = True # plot of the excessively flat excluded triangles
plot_refi_tri = False # plot of the refined triangulation
plot_expected = False # plot of the analytical function values for comparison
# Graphical options for tricontouring
levels = np.arange(0., 1., 0.025)
cmap = cm.get_cmap(name='Blues', lut=None)
plt.figure()
plt.gca().set_aspect('equal')
plt.title("Filtering a Delaunay mesh\n" +
"(application to high-resolution tricontouring)")
# 1) plot of the refined (computed) data countours:
plt.tricontour(tri_refi, z_test_refi, levels=levels, cmap=cmap,
linewidths=[2.0, 0.5, 1.0, 0.5])
# 2) plot of the expected (analytical) data countours (dashed):
if plot_expected:
plt.tricontour(tri_refi, z_expected, levels=levels, cmap=cmap,
linestyles='--')
# 3) plot of the fine mesh on which interpolation was done:
if plot_refi_tri:
plt.triplot(tri_refi, color='0.97')
# 4) plot of the initial 'coarse' mesh:
if plot_tri:
plt.triplot(tri, color='0.7')
# 4) plot of the unvalidated triangles from naive Delaunay Triangulation:
if plot_masked_tri:
plt.triplot(flat_tri, color='red')
plt.show()
| mit |
Tjorriemorrie/trading | 06_randomforests/backtest-1d.py | 1 | 1779 | import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.cross_validation import cross_val_score
from features import FeatureFactory
import sklearn as sk
from pprint import pprint
currencies = [
'AUDUSD',
'EURGBP',
'EURJPY',
'EURUSD',
'GBPJPY',
'GBPUSD',
'NZDUSD',
'USDCAD',
'USDCHF',
'USDJPY',
]
total = {}
for currency in currencies:
print '\n' + currency
# load data
df = pd.read_csv(
r'../data/' + currency + '1440.csv',
names=['date', 'time', 'open', 'high', 'low', 'close', 'volume'],
)
data = df.as_matrix()
# train on first 60%
data = data[-int(len(data) * 0.40):]
opens = data[:, 2].astype(float)
highs = data[:, 3].astype(float)
lows = data[:, 4].astype(float)
closes = data[:, 5].astype(float)
volumes = data[:, 6].astype(int)
rfc = sk.externals.joblib.load('models/' + currency + '.pkl')
# calculating features
ff = FeatureFactory()
X_scaled = ff.getFeatures(opens, highs, lows, closes, volumes)
# set rewards
# print '\ncalculating rewards...'
rewards = ff.getRewards(closes)
# simulate
action = 'none'
trades = []
for i, x in enumerate(X_scaled):
pos = i
# actual (what happened tomorrow)
reward = rewards[pos]
# predict
predict = rfc.predict(x)[0]
goal = True if predict == reward else False
print data[pos][0], 'p:', predict, 'a:', reward
trades.append(goal)
wins = sum([1. for t in trades if t])
ratio = wins / len(trades)
total[currency] = ratio
print 'Ratio', ratio
print '\n\n'
for cur, ratio in total.iteritems():
print cur, int(ratio * 100)
| mit |
tejaskhot/ConvAE-DeSTIN | scripts/convae_destin_3.py | 3 | 8387 | """Stacked fixed noise dConvAE test"""
import sys
sys.path.append("..")
import numpy as np
import matplotlib.pyplot as plt
import cPickle as pickle
import theano
import theano.tensor as T
import scae_destin.datasets as ds
from scae_destin.fflayers import ReLULayer
from scae_destin.fflayers import SoftmaxLayer
from scae_destin.convnet import ReLUConvLayer
from scae_destin.convnet import SigmoidConvLayer
from scae_destin.model import ConvAutoEncoder
from scae_destin.convnet import MaxPoolingSameSize, MaxPooling
from scae_destin.convnet import Flattener
from scae_destin.model import FeedForward
from scae_destin.optimize import gd_updates
from scae_destin.cost import mean_square_cost
from scae_destin.cost import categorical_cross_entropy_cost
from scae_destin.cost import L2_regularization
n_epochs=100
batch_size=100
nkerns=100
Xtr, Ytr, Xte, Yte=ds.load_CIFAR10("../cifar-10-batches-py/")
Xtr=np.mean(Xtr, 3)
Xte=np.mean(Xte, 3)
Xtrain=Xtr.reshape(Xtr.shape[0], Xtr.shape[1]*Xtr.shape[2])/255.0
Xtest=Xte.reshape(Xte.shape[0], Xte.shape[1]*Xte.shape[2])/255.0
train_set_x, train_set_y=ds.shared_dataset((Xtrain, Ytr))
test_set_x, test_set_y=ds.shared_dataset((Xtest, Yte))
n_train_batches=train_set_x.get_value(borrow=True).shape[0]/batch_size
n_test_batches=test_set_x.get_value(borrow=True).shape[0]/batch_size
print "[MESSAGE] The data is loaded"
################################## FIRST LAYER #######################################
X=T.matrix("data")
y=T.ivector("label")
idx=T.lscalar()
corruption_level=T.fscalar()
images=X.reshape((batch_size, 1, 32, 32))
layer_0_en=ReLUConvLayer(filter_size=(4,4),
num_filters=50,
num_channels=1,
fm_size=(32,32),
batch_size=batch_size,
border_mode="same")
layer_0_de=SigmoidConvLayer(filter_size=(4,4),
num_filters=1,
num_channels=50,
fm_size=(32,32),
batch_size=batch_size,
border_mode="same")
layer_1_en=ReLUConvLayer(filter_size=(2,2),
num_filters=50,
num_channels=50,
fm_size=(8,8),
batch_size=batch_size,
border_mode="same")
layer_1_de=SigmoidConvLayer(filter_size=(2,2),
num_filters=50,
num_channels=50,
fm_size=(8,8),
batch_size=batch_size,
border_mode="same")
model_0=ConvAutoEncoder(layers=[layer_0_en, MaxPoolingSameSize(pool_size=(4,4)), layer_0_de])
out_0=model_0.fprop(images, corruption_level=corruption_level)
cost_0=mean_square_cost(out_0[-1], images)+L2_regularization(model_0.params, 0.005)
updates_0=gd_updates(cost=cost_0, params=model_0.params, method="sgd", learning_rate=0.1)
## append a max-pooling layer
model_trans=FeedForward(layers=[layer_0_en, MaxPooling(pool_size=(4,4))]);
out_trans=model_trans.fprop(images);
model_1=ConvAutoEncoder(layers=[layer_1_en, MaxPoolingSameSize(pool_size=(2,2)), layer_1_de])
out_1=model_1.fprop(out_trans[-1], corruption_level=corruption_level)
cost_1=mean_square_cost(out_1[-1], out_trans[-1])+L2_regularization(model_1.params, 0.005)
updates_1=gd_updates(cost=cost_1, params=model_1.params, method="sgd", learning_rate=0.1)
train_0=theano.function(inputs=[idx, corruption_level],
outputs=[cost_0],
updates=updates_0,
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size]})
train_1=theano.function(inputs=[idx, corruption_level],
outputs=[cost_1],
updates=updates_1,
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size]})
print "[MESSAGE] The 2-layer model is built"
corr={}
corr[0]=corr[1]=np.random.uniform(low=0.1, high=0.3, size=1).astype("float32")
min_cost={0:None,
1:None}
corr_best={0:corr[0],
1:corr[0]}
max_iter={0:0,
1:0}
epoch = 0
while (epoch < n_epochs):
epoch = epoch + 1
c_0 = c_1 = []
for batch_index in xrange(n_train_batches):
for rep in xrange(8):
train_cost=train_1(batch_index, corr_best[1][0])
c_1.append(train_cost)
train_cost=train_0(batch_index, corr_best[0][0])
c_0.append(train_cost)
if min_cost[0]==None:
min_cost[0]=np.mean(c_0)
else:
if (np.mean(c_0)<min_cost[0]*0.5) or (max_iter[0]>=20):
min_cost[0]=np.mean(c_0)
corr_best[0][0]=corr[0]
corr[0]=np.random.uniform(low=corr_best[0][0], high=corr_best[0][0]+0.1, size=1).astype("float32")
max_iter[0]=0
else:
max_iter[0]+=1
if min_cost[1]==None:
min_cost[1]=np.mean(c_1)
else:
if (np.mean(c_1)<min_cost[1]*0.5) or (max_iter[1]>=20):
min_cost[1]=np.mean(c_1)
corr_best[1][0]=corr[1]
corr[1]=np.random.uniform(low=corr_best[1][0], high=corr_best[1][0]+0.1, size=1).astype("float32")
max_iter[1]=0
else:
max_iter[1]+=1
print 'Training epoch %d, cost ' % epoch, np.mean(c_0), str(corr_best[0][0]), min_cost[0], max_iter[0]
print ' ', np.mean(c_1), str(corr_best[1][0]), min_cost[1], max_iter[1]
print "[MESSAGE] The model is trained"
################################## BUILD SUPERVISED MODEL #######################################
pool_0=MaxPooling(pool_size=(4,4));
pool_1=MaxPooling(pool_size=(2,2));
flattener=Flattener()
layer_2=ReLULayer(in_dim=50*4*4,
out_dim=400)
layer_3=SoftmaxLayer(in_dim=400,
out_dim=10)
model_sup=FeedForward(layers=[layer_0_en, pool_0, layer_1_en, pool_1, flattener, layer_2, layer_3])
out_sup=model_sup.fprop(images)
cost_sup=categorical_cross_entropy_cost(out_sup[-1], y)
updates=gd_updates(cost=cost_sup, params=model_sup.params, method="sgd", learning_rate=0.1)
train_sup=theano.function(inputs=[idx],
outputs=cost_sup,
updates=updates,
givens={X: train_set_x[idx * batch_size: (idx + 1) * batch_size],
y: train_set_y[idx * batch_size: (idx + 1) * batch_size]})
test_sup=theano.function(inputs=[idx],
outputs=model_sup.layers[-1].error(out_sup[-1], y),
givens={X: test_set_x[idx * batch_size: (idx + 1) * batch_size],
y: test_set_y[idx * batch_size: (idx + 1) * batch_size]})
print "[MESSAGE] The supervised model is built"
n_epochs=100
test_record=np.zeros((n_epochs, 1))
epoch = 0
while (epoch < n_epochs):
epoch+=1
for minibatch_index in xrange(n_train_batches):
mlp_minibatch_avg_cost = train_sup(minibatch_index)
iteration = (epoch - 1) * n_train_batches + minibatch_index
if (iteration + 1) % n_train_batches == 0:
print 'MLP MODEL'
test_losses = [test_sup(i) for i in xrange(n_test_batches)]
test_record[epoch-1] = np.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error %f %%') %
(epoch, minibatch_index + 1, n_train_batches, test_record[epoch-1] * 100.))
# filters=[]
# filters.append(model_sup.layers[0].filters.get_value(borrow=True))
# filters.append(model_sup.layers[1].filters.get_value(borrow=True))
# filters.append(model_sup.layers[2].filters.get_value(borrow=True))
# filters.append(model_sup.layers[3].filters.get_value(borrow=True))
filters=model_1.layers[0].filters.get_value(borrow=True);
pickle.dump(test_record, open("convae_destin_100epochs_maxpooling_BtoT.pkl", "w"))
for i in xrange(50):
image_adr="convae_destin/layer_filter_%d.eps" % (i)
plt.imshow(filters[i, 0, :, :], cmap = plt.get_cmap('gray'), interpolation='nearest')
plt.axis('off')
plt.savefig(image_adr , bbox_inches='tight', pad_inches=0) | apache-2.0 |
gfyoung/pandas | pandas/tests/scalar/interval/test_ops.py | 4 | 2336 | """Tests for Interval-Interval operations, such as overlaps, contains, etc."""
import pytest
from pandas import Interval, Timedelta, Timestamp
@pytest.fixture(
params=[
(Timedelta("0 days"), Timedelta("1 day")),
(Timestamp("2018-01-01"), Timedelta("1 day")),
(0, 1),
],
ids=lambda x: type(x[0]).__name__,
)
def start_shift(request):
"""
Fixture for generating intervals of types from a start value and a shift
value that can be added to start to generate an endpoint
"""
return request.param
class TestOverlaps:
def test_overlaps_self(self, start_shift, closed):
start, shift = start_shift
interval = Interval(start, start + shift, closed)
assert interval.overlaps(interval)
def test_overlaps_nested(self, start_shift, closed, other_closed):
start, shift = start_shift
interval1 = Interval(start, start + 3 * shift, other_closed)
interval2 = Interval(start + shift, start + 2 * shift, closed)
# nested intervals should always overlap
assert interval1.overlaps(interval2)
def test_overlaps_disjoint(self, start_shift, closed, other_closed):
start, shift = start_shift
interval1 = Interval(start, start + shift, other_closed)
interval2 = Interval(start + 2 * shift, start + 3 * shift, closed)
# disjoint intervals should never overlap
assert not interval1.overlaps(interval2)
def test_overlaps_endpoint(self, start_shift, closed, other_closed):
start, shift = start_shift
interval1 = Interval(start, start + shift, other_closed)
interval2 = Interval(start + shift, start + 2 * shift, closed)
# overlap if shared endpoint is closed for both (overlap at a point)
result = interval1.overlaps(interval2)
expected = interval1.closed_right and interval2.closed_left
assert result == expected
@pytest.mark.parametrize(
"other",
[10, True, "foo", Timedelta("1 day"), Timestamp("2018-01-01")],
ids=lambda x: type(x).__name__,
)
def test_overlaps_invalid_type(self, other):
interval = Interval(0, 1)
msg = f"`other` must be an Interval, got {type(other).__name__}"
with pytest.raises(TypeError, match=msg):
interval.overlaps(other)
| bsd-3-clause |
michigraber/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 121 | 6117 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
barentsen/dave | detrendThis/makeplot.py | 3 | 2223 | from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import pyfits
import glob
import extract_lc
import progressbar
params = {'backend': 'png',
'axes.linewidth': 2.5,
'axes.labelsize': 24,
'axes.font': 'sans-serif',
'axes.fontweight' : 'bold',
'text.fontsize': 12,
'legend.fontsize': 16,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
# 'text.usetex': True,
# 'font.family': 'sans-serif',
'font.sans-serif': 'Helvetica',
'ps.useafm': True,
'pdf.use14corefonts': True,
'ps.fonttype': 42,
'legend.markersize': 200}
plt.rcParams.update(params)
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
if __name__ == '__main__':
dirname = '/Users/tom/Projects/Debra_data/data/lcfiles/'
files = glob.glob(dirname + '*.txt')
cdpparr = np.zeros(len(files))
bar = progressbar.ProgressBar(maxval=len(files), \
widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage(),
progressbar.ETA()])
bar.start()
for i,f in enumerate(files):
bar.update(i+1)
fig, (ax1,ax2,ax3) = plt.subplots(3,1, sharex=True, sharey=True,
figsize=[9,12])
t = np.genfromtxt(f).T
ax1.plot(t[0],t[1],c='r')
ax2.plot(t[0],t[2],c='b')
ax3.plot(t[0],t[3],c='g')
#calc CDPP
cdpp = 1.E6 * np.median(
np.std(rolling_window(t[2], 13) / np.sqrt(13), 1))
cdpparr[i] = cdpp
ax1.set_xlim([45.5,77.0])
ax1.set_title('Decor lc,')
ax2.set_title('Decor + medfilt lc, CDPP = {}'.format(cdpp))
ax3.set_title('Decor signal')
plt.tight_layout()
savename = f.split('/')[-1].split('.')[0]
plt.savefig('{}../figs/{}.png'.format(dirname,savename))
plt.close('all')
bar.finish()
np.savetxt('cdpp.txt',np.array([files,cdpparr],dtype=None).T,fmt='%s')
| mit |
zhenv5/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
HaebinShin/tensorflow | tensorflow/contrib/learn/python/learn/estimators/__init__.py | 4 | 3337 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.autoencoder import TensorFlowDNNAutoencoder
from tensorflow.contrib.learn.python.learn.estimators.base import TensorFlowBaseTransformer
from tensorflow.contrib.learn.python.learn.estimators.base import TensorFlowEstimator
from tensorflow.contrib.learn.python.learn.estimators.classifier import Classifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn import TensorFlowDNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import TensorFlowDNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.linear import TensorFlowClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import TensorFlowLinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import TensorFlowLinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.linear import TensorFlowRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.random_forest import TensorForestEstimator
from tensorflow.contrib.learn.python.learn.estimators.rnn import TensorFlowRNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.rnn import TensorFlowRNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
| apache-2.0 |
JAOSP/aosp_platform_external_chromium_org | chrome/test/nacl_test_injection/buildbot_nacl_integration.py | 61 | 2538 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
# This environment variable check mimics what
# buildbot_chrome_nacl_stage.py does.
is_win64 = (sys.platform in ('win32', 'cygwin') and
('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')))
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# http://code.google.com/p/nativeclient/issues/detail?id=2511
tests_to_disable.append('run_ppapi_ppb_image_data_browser_test')
if sys.platform == 'darwin':
# TODO(mseaborn) fix
# http://code.google.com/p/nativeclient/issues/detail?id=1835
tests_to_disable.append('run_ppapi_crash_browser_test')
if sys.platform in ('win32', 'cygwin'):
# This one is only failing for nacl_glibc on x64 Windows
# but it is not clear how to disable only that limited case.
# See http://crbug.com/132395
tests_to_disable.append('run_inbrowser_test_runner')
script_dir = os.path.dirname(os.path.abspath(__file__))
nacl_integration_script = os.path.join(script_dir,
'buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
# TODO(ncbray) re-enable.
# https://code.google.com/p/chromium/issues/detail?id=133568
'--disable_glibc',
'--disable_tests=%s' % ','.join(tests_to_disable)]
cmd += args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause |
MartinDelzant/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
josauder/procedural_city_generation | procedural_city_generation/polygons/plot_poly.py | 2 | 1072 | import matplotlib.pyplot as plt
def plot_edge(edge, color="k"):
plt.plot((edge[0][0], edge[1][0]), (edge[0][1], edge[1][1]), color=color)
"""
def plot_self(self, mode="type"):
if mode == "type":
t = self.self_type
if t == "lot" or t == "block":
for edge in self.edges:
plot_edge(edge, "g")
elif t == "road":
for edge in self.edges:
plot_edge(edge, "k")
else:
for edge in self.edges:
plot_edge(edge, "r")
elif mode == "borders":
for edge in self.edges:
if edge.bordering_road:
plot_edge(edge, 'k')
else:
plot_edge(edge, 'r')
else:
for edge in self.edges:
plot_edge(edge, mode)
"""
def plot_self(self):
color="r"
t=self.self_type
if t == "lot" or t == "block":
color="g"
elif t == "road":
color="k"
composite=np.array([edge for edge in self.edges]+[self.edges[0]])
plt.plot(composite[:, 0], composite[:, 1], color)
| mpl-2.0 |
rl-institut/reegis-hp | reegis_hp/experimental/berlin_brdbg_example_plot.py | 7 | 3580 | #!/usr/bin/python3
# -*- coding: utf-8
import logging
import matplotlib.pyplot as plt
from oemof.outputlib import to_pandas as tpd
from oemof.tools import logger
from oemof.core import energy_system as es
# The following dictionaries are a workaround due to issue #26
rename = {
"(val, ('sink', 'Landkreis Wittenberg', 'elec'))": "elec demand",
"(val, ('sto_simple', 'Landkreis Wittenberg', 'elec'))": "battery",
"(val, ('transport', 'bus', 'Stadt Dessau-Rosslau', 'elec', 'bus', 'Landkreis Wittenberg', 'elec'))": "to Dessau",
"(val, ('FixedSrc', 'Landkreis Wittenberg', 'pv_pwr'))": "pv power",
"(val, ('FixedSrc', 'Landkreis Wittenberg', 'wind_pwr'))": "wind power",
"(val, ('transformer', 'Landkreis Wittenberg', 'natural_gas'))": "gas power plant",
"(val, ('transport', 'bus', 'Landkreis Wittenberg', 'elec', 'bus', 'Stadt Dessau-Rosslau', 'elec'))": "to Wittenberg",
"(val, ('sink', 'Stadt Dessau-Rosslau', 'elec'))": "elec demand",
"(val, ('sto_simple', 'Stadt Dessau-Rosslau', 'elec'))": "battery",
"(val, ('FixedSrc', 'Stadt Dessau-Rosslau', 'pv_pwr'))": "pv power",
"(val, ('FixedSrc', 'Stadt Dessau-Rosslau', 'wind_pwr'))": "wind power",
"(val, ('transformer', 'Stadt Dessau-Rosslau', 'lignite'))": "lignite power plant",
"(val, ('transformer', 'Stadt Dessau-Rosslau', 'natural_gas'))": "gas power plant",
}
# Define a color set for the plots.
cdict = {}
cdict["('FixedSrc', 'Landkreis Wittenberg', 'wind_pwr')"] = '#4536bb'
cdict["('FixedSrc', 'Landkreis Wittenberg', 'pv_pwr')"] = '#ffcc00'
cdict["('FixedSrc', 'Stadt Dessau-Rosslau', 'wind_pwr')"] = '#4536bb'
cdict["('FixedSrc', 'Stadt Dessau-Rosslau', 'pv_pwr')"] = '#ffcc00'
cdict["('transport', 'bus', 'Landkreis Wittenberg', 'elec', 'bus', 'Stadt Dessau-Rosslau', 'elec')"] = '#643780'
cdict["('transport', 'bus', 'Stadt Dessau-Rosslau', 'elec', 'bus', 'Landkreis Wittenberg', 'elec')"] = '#643780'
cdict["('transformer', 'Landkreis Wittenberg', 'natural_gas')"] = '#7c7c7c'
cdict["('transformer', 'Stadt Dessau-Rosslau', 'natural_gas')"] = '#7c7c7c'
cdict["('transformer', 'Landkreis Wittenberg', 'lignite')"] = '#000000'
cdict["('transformer', 'Stadt Dessau-Rosslau', 'lignite')"] = '#000000'
cdict["('sto_simple', 'Landkreis Wittenberg', 'elec')"] = '#ff5e5e'
cdict["('sto_simple', 'Stadt Dessau-Rosslau', 'elec')"] = '#ff5e5e'
cdict["('sink', 'Landkreis Wittenberg', 'elec')"] = '#0cce1e'
cdict["('sink', 'Stadt Dessau-Rosslau', 'elec')"] = '#0cce1e'
# Define the oemof default logger
logger.define_logging()
# Create an energy system
TwoRegExample = es.EnergySystem()
# Restoring a dumped EnergySystem
logging.info(TwoRegExample.restore())
esplot = tpd.DataFramePlot(energy_system=TwoRegExample)
fig = plt.figure(figsize=(24, 14))
plt.rc('legend', **{'fontsize': 19})
plt.rcParams.update({'font.size': 14})
plt.style.use('ggplot')
n = 1
# Loop over the regions to plot them.
for region in TwoRegExample.regions:
uid = str(('bus', region.name, 'elec'))
esplot.ax = fig.add_subplot(2, 1, n)
n += 1
handles, labels = esplot.io_plot(
uid, cdict,
date_from="2010-06-01 00:00:00",
date_to="2010-06-8 00:00:00",
line_kwa={'linewidth': 4})
new_labels = []
for lab in labels:
new_labels.append(rename.get(str(lab), lab))
esplot.ax.set_ylabel('Power in MW')
esplot.ax.set_xlabel('')
esplot.ax.set_title(region.name)
esplot.set_datetime_ticks(tick_distance=24, date_format='%d-%m-%Y')
esplot.outside_legend(handles=handles, labels=new_labels)
plt.show()
| gpl-3.0 |
liberatorqjw/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 9 | 11108 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import check_scoring
from sklearn.metrics import make_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'roc_auc', 'average_precision', 'precision',
'recall', 'log_loss',
'adjusted_rand_score' # not really, but works
]
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
def test_check_scoring():
"""Test all branches of check_scoring"""
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
pattern = (r"The estimator passed should have a 'score'"
r" or a 'predict' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator,
"accuracy")
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_make_scorer():
"""Sanity check on the make_scorer factory function."""
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
"""Test classification scorers."""
X, y = make_blobs(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
score1 = SCORERS['f1'](clf, X_test, y_test)
score2 = f1_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
"""Test regression scorers."""
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = SCORERS['r2'](clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
"""Test scorers that take thresholds."""
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = SCORERS['log_loss'](clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, SCORERS['roc_auc'], clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
"""Test that the scorer work with multilabel-indicator format
for multilabel and multi-output multi-class classifier
"""
X, y = make_multilabel_classification(return_indicator=True,
allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = SCORERS['roc_auc'](clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
"""Test clustering scorers against gold standard labeling."""
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = SCORERS['adjusted_rand_score'](km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
"""Test that when a list of scores is returned, we raise proper errors."""
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
def test_scorer_sample_weight():
"""Test that scorers support sample_weight or raise sensible errors"""
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier()
sensible_clf.fit(X_train, y_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS])
for name, scorer in SCORERS.items():
try:
weighted = scorer(estimator[name], X_test, y_test,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], y_test[10:])
unweighted = scorer(estimator[name], X_test, y_test)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
AmurG/tardis | tardis/io/config_reader.py | 3 | 39949 | # Module to read the rather complex config data
import logging
import os
import pprint
from astropy import constants, units as u
import numpy as np
import pandas as pd
import yaml
import tardis
from tardis.io.model_reader import read_density_file, \
calculate_density_after_time, read_abundances_file
from tardis.io.config_validator import ConfigurationValidator
from tardis import atomic
from tardis.util import species_string_to_tuple, parse_quantity, \
element_symbol2atomic_number
import copy
pp = pprint.PrettyPrinter(indent=4)
logger = logging.getLogger(__name__)
data_dir = os.path.join(tardis.__path__[0], 'data')
default_config_definition_file = os.path.join(data_dir,
'tardis_config_definition.yml')
#File parsers for different file formats:
density_structure_fileparser = {}
inv_ni56_efolding_time = 1 / (8.8 * u.day)
inv_co56_efolding_time = 1 / (113.7 * u.day)
inv_cr48_efolding_time = 1 / (1.29602 * u.day)
inv_v48_efolding_time = 1 / (23.0442 * u.day)
inv_fe52_efolding_time = 1 / (0.497429 * u.day)
inv_mn52_efolding_time = 1 / (0.0211395 * u.day)
class ConfigurationError(ValueError):
pass
def parse_quantity_linspace(quantity_linspace_dictionary, add_one=True):
"""
parse a dictionary of the following kind
{'start': 5000 km/s,
'stop': 10000 km/s,
'num': 1000}
Parameters
----------
quantity_linspace_dictionary: ~dict
add_one: boolean, default: True
Returns
-------
~np.array
"""
start = parse_quantity(quantity_linspace_dictionary['start'])
stop = parse_quantity(quantity_linspace_dictionary['stop'])
try:
stop = stop.to(start.unit)
except u.UnitsError:
raise ConfigurationError('"start" and "stop" keyword must be compatible quantities')
num = quantity_linspace_dictionary['num']
if add_one:
num += 1
return np.linspace(start.value, stop.value, num=num) * start.unit
def parse_spectral_bin(spectral_bin_boundary_1, spectral_bin_boundary_2):
spectral_bin_boundary_1 = parse_quantity(spectral_bin_boundary_1).to('Angstrom', u.spectral())
spectral_bin_boundary_2 = parse_quantity(spectral_bin_boundary_2).to('Angstrom', u.spectral())
spectrum_start_wavelength = min(spectral_bin_boundary_1, spectral_bin_boundary_2)
spectrum_end_wavelength = max(spectral_bin_boundary_1, spectral_bin_boundary_2)
return spectrum_start_wavelength, spectrum_end_wavelength
def calculate_exponential_density(velocities, v_0, rho0):
"""
This function computes the exponential density profile.
:math:`\\rho = \\rho_0 \\times \\exp \\left( -\\frac{v}{v_0} \\right)`
Parameters
----------
velocities : ~astropy.Quantity
Array like velocity profile
velocity_0 : ~astropy.Quantity
reference velocity
rho0 : ~astropy.Quantity
reference density
Returns
-------
densities : ~astropy.Quantity
"""
densities = rho0 * np.exp(-(velocities / v_0))
return densities
def calculate_power_law_density(velocities, velocity_0, rho_0, exponent):
"""
This function computes a descret exponential density profile.
:math:`\\rho = \\rho_0 \\times \\left( \\frac{v}{v_0} \\right)^n`
Parameters
----------
velocities : ~astropy.Quantity
Array like velocity profile
velocity_0 : ~astropy.Quantity
reference velocity
rho0 : ~astropy.Quantity
reference density
exponent : ~float
exponent used in the powerlaw
Returns
-------
densities : ~astropy.Quantity
"""
densities = rho_0 * np.power((velocities / velocity_0), exponent)
return densities
def parse_model_file_section(model_setup_file_dict, time_explosion):
def parse_artis_model_setup_files(model_file_section_dict, time_explosion):
###### Reading the structure part of the ARTIS file pair
structure_fname = model_file_section_dict['structure_fname']
for i, line in enumerate(file(structure_fname)):
if i == 0:
no_of_shells = np.int64(line.strip())
elif i == 1:
time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
elif i == 2:
break
artis_model_columns = ['velocities', 'mean_densities_0', 'ni56_fraction', 'co56_fraction', 'fe52_fraction',
'cr48_fraction']
artis_model = np.recfromtxt(structure_fname, skip_header=2, usecols=(1, 2, 4, 5, 6, 7), unpack=True,
dtype=[(item, np.float64) for item in artis_model_columns])
#converting densities from log(g/cm^3) to g/cm^3 and stretching it to the current ti
velocities = u.Quantity(np.append([0], artis_model['velocities']), 'km/s').to('cm/s')
mean_densities_0 = u.Quantity(10 ** artis_model['mean_densities_0'], 'g/cm^3')
mean_densities = calculate_density_after_time(mean_densities_0, time_of_model, time_explosion)
#Verifying information
if len(mean_densities) == no_of_shells:
logger.debug('Verified ARTIS model structure file %s (no_of_shells=length of dataset)', structure_fname)
else:
raise ConfigurationError(
'Error in ARTIS file %s - Number of shells not the same as dataset length' % structure_fname)
v_inner = velocities[:-1]
v_outer = velocities[1:]
volumes = (4 * np.pi / 3) * (time_of_model ** 3) * ( v_outer ** 3 - v_inner ** 3)
masses = (volumes * mean_densities_0 / constants.M_sun).to(1)
logger.info('Read ARTIS configuration file %s - found %d zones with total mass %g Msun', structure_fname,
no_of_shells, sum(masses.value))
if 'v_lowest' in model_file_section_dict:
v_lowest = parse_quantity(model_file_section_dict['v_lowest']).to('cm/s').value
min_shell = v_inner.value.searchsorted(v_lowest)
else:
min_shell = 1
if 'v_highest' in model_file_section_dict:
v_highest = parse_quantity(model_file_section_dict['v_highest']).to('cm/s').value
max_shell = v_outer.value.searchsorted(v_highest)
else:
max_shell = no_of_shells
artis_model = artis_model[min_shell:max_shell]
v_inner = v_inner[min_shell:max_shell]
v_outer = v_outer[min_shell:max_shell]
mean_densities = mean_densities[min_shell:max_shell]
###### Reading the abundance part of the ARTIS file pair
abundances_fname = model_file_section_dict['abundances_fname']
abundances = pd.DataFrame(np.loadtxt(abundances_fname)[min_shell:max_shell, 1:].transpose(),
index=np.arange(1, 31))
ni_stable = abundances.ix[28] - artis_model['ni56_fraction']
co_stable = abundances.ix[27] - artis_model['co56_fraction']
fe_stable = abundances.ix[26] - artis_model['fe52_fraction']
mn_stable = abundances.ix[25] - 0.0
cr_stable = abundances.ix[24] - artis_model['cr48_fraction']
v_stable = abundances.ix[23] - 0.0
ti_stable = abundances.ix[22] - 0.0
abundances.ix[28] = ni_stable
abundances.ix[28] += artis_model['ni56_fraction'] * np.exp(
-(time_explosion * inv_ni56_efolding_time).to(1).value)
abundances.ix[27] = co_stable
abundances.ix[27] += artis_model['co56_fraction'] * np.exp(
-(time_explosion * inv_co56_efolding_time).to(1).value)
abundances.ix[27] += (inv_ni56_efolding_time * artis_model['ni56_fraction'] /
(inv_ni56_efolding_time - inv_co56_efolding_time)) * \
(np.exp(-(inv_co56_efolding_time * time_explosion).to(1).value) - np.exp(
-(inv_ni56_efolding_time * time_explosion).to(1).value))
abundances.ix[26] = fe_stable
abundances.ix[26] += artis_model['fe52_fraction'] * np.exp(
-(time_explosion * inv_fe52_efolding_time).to(1).value)
abundances.ix[26] += ((artis_model['co56_fraction'] * inv_ni56_efolding_time
- artis_model['co56_fraction'] * inv_co56_efolding_time
+ artis_model['ni56_fraction'] * inv_ni56_efolding_time
- artis_model['ni56_fraction'] * inv_co56_efolding_time
- artis_model['co56_fraction'] * inv_ni56_efolding_time * np.exp(
-(inv_co56_efolding_time * time_explosion).to(1).value)
+ artis_model['co56_fraction'] * inv_co56_efolding_time * np.exp(
-(inv_co56_efolding_time * time_explosion).to(1).value)
- artis_model['ni56_fraction'] * inv_ni56_efolding_time * np.exp(
-(inv_co56_efolding_time * time_explosion).to(1).value)
+ artis_model['ni56_fraction'] * inv_co56_efolding_time * np.exp(
-(inv_ni56_efolding_time * time_explosion).to(1).value))
/ (inv_ni56_efolding_time - inv_co56_efolding_time))
abundances.ix[25] = mn_stable
abundances.ix[25] += (inv_fe52_efolding_time * artis_model['fe52_fraction'] /
(inv_fe52_efolding_time - inv_mn52_efolding_time)) * \
(np.exp(-(inv_mn52_efolding_time * time_explosion).to(1).value) - np.exp(
-(inv_fe52_efolding_time * time_explosion).to(1).value))
abundances.ix[24] = cr_stable
abundances.ix[24] += artis_model['cr48_fraction'] * np.exp(
-(time_explosion * inv_cr48_efolding_time).to(1).value)
abundances.ix[24] += ((artis_model['fe52_fraction'] * inv_fe52_efolding_time
- artis_model['fe52_fraction'] * inv_mn52_efolding_time
- artis_model['fe52_fraction'] * inv_fe52_efolding_time * np.exp(
-(inv_mn52_efolding_time * time_explosion).to(1).value)
+ artis_model['fe52_fraction'] * inv_mn52_efolding_time * np.exp(
-(inv_fe52_efolding_time * time_explosion).to(1).value))
/ (inv_fe52_efolding_time - inv_mn52_efolding_time))
abundances.ix[23] = v_stable
abundances.ix[23] += (inv_cr48_efolding_time * artis_model['cr48_fraction'] /
(inv_cr48_efolding_time - inv_v48_efolding_time)) * \
(np.exp(-(inv_v48_efolding_time * time_explosion).to(1).value) - np.exp(
-(inv_cr48_efolding_time * time_explosion).to(1).value))
abundances.ix[22] = ti_stable
abundances.ix[22] += ((artis_model['cr48_fraction'] * inv_cr48_efolding_time
- artis_model['cr48_fraction'] * inv_v48_efolding_time
- artis_model['cr48_fraction'] * inv_cr48_efolding_time * np.exp(
-(inv_v48_efolding_time * time_explosion).to(1).value)
+ artis_model['cr48_fraction'] * inv_v48_efolding_time * np.exp(
-(inv_cr48_efolding_time * time_explosion).to(1).value))
/ (inv_cr48_efolding_time - inv_v48_efolding_time))
if 'split_shells' in model_file_section_dict:
split_shells = int(model_file_section_dict['split_shells'])
else:
split_shells = 1
if split_shells > 1:
logger.info('Increasing the number of shells by a factor of %s' % split_shells)
no_of_shells = len(v_inner)
velocities = np.linspace(v_inner[0], v_outer[-1], no_of_shells * split_shells + 1)
v_inner = velocities[:-1]
v_outer = velocities[1:]
old_mean_densities = mean_densities
mean_densities = np.empty(no_of_shells * split_shells) * old_mean_densities.unit
new_abundance_data = np.empty((abundances.values.shape[0], no_of_shells * split_shells))
for i in xrange(split_shells):
mean_densities[i::split_shells] = old_mean_densities
new_abundance_data[:, i::split_shells] = abundances.values
abundances = pd.DataFrame(new_abundance_data, index=abundances.index)
#def parser_simple_ascii_model
return v_inner, v_outer, mean_densities, abundances
model_file_section_parser = {}
model_file_section_parser['artis'] = parse_artis_model_setup_files
try:
parser = model_file_section_parser[model_setup_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(model_file_section_parser.keys(), model_file_section_parser['type']))
return parser(model_setup_file_dict, time_explosion)
def parse_density_file_section(density_file_dict, time_explosion):
density_file_parser = {}
def parse_artis_density(density_file_dict, time_explosion):
density_file = density_file_dict['name']
for i, line in enumerate(file(density_file)):
if i == 0:
no_of_shells = np.int64(line.strip())
elif i == 1:
time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
elif i == 2:
break
velocities, mean_densities_0 = np.recfromtxt(density_file, skip_header=2, usecols=(1, 2), unpack=True)
#converting densities from log(g/cm^3) to g/cm^3 and stretching it to the current ti
velocities = u.Quantity(np.append([0], velocities), 'km/s').to('cm/s')
mean_densities_0 = u.Quantity(10 ** mean_densities_0, 'g/cm^3')
mean_densities = calculate_density_after_time(mean_densities_0, time_of_model, time_explosion)
#Verifying information
if len(mean_densities) == no_of_shells:
logger.debug('Verified ARTIS file %s (no_of_shells=length of dataset)', density_file)
else:
raise ConfigurationError(
'Error in ARTIS file %s - Number of shells not the same as dataset length' % density_file)
min_shell = 1
max_shell = no_of_shells
v_inner = velocities[:-1]
v_outer = velocities[1:]
volumes = (4 * np.pi / 3) * (time_of_model ** 3) * ( v_outer ** 3 - v_inner ** 3)
masses = (volumes * mean_densities_0 / constants.M_sun).to(1)
logger.info('Read ARTIS configuration file %s - found %d zones with total mass %g Msun', density_file,
no_of_shells, sum(masses.value))
if 'v_lowest' in density_file_dict:
v_lowest = parse_quantity(density_file_dict['v_lowest']).to('cm/s').value
min_shell = v_inner.value.searchsorted(v_lowest)
else:
min_shell = 1
if 'v_highest' in density_file_dict:
v_highest = parse_quantity(density_file_dict['v_highest']).to('cm/s').value
max_shell = v_outer.value.searchsorted(v_highest)
else:
max_shell = no_of_shells
v_inner = v_inner[min_shell:max_shell]
v_outer = v_outer[min_shell:max_shell]
mean_densities = mean_densities[min_shell:max_shell]
return v_inner, v_outer, mean_densities, min_shell, max_shell
density_file_parser['artis'] = parse_artis_density
try:
parser = density_file_parser[density_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(density_file_parser.keys(), density_file_dict['type']))
return parser(density_file_dict, time_explosion)
def parse_density_section(density_dict, v_inner, v_outer, time_explosion):
density_parser = {}
#Parse density uniform
def parse_uniform(density_dict, v_inner, v_outer, time_explosion):
no_of_shells = len(v_inner)
return density_dict['value'].to('g cm^-3') * np.ones(no_of_shells)
density_parser['uniform'] = parse_uniform
#Parse density branch85 w7
def parse_branch85(density_dict, v_inner, v_outer, time_explosion):
velocities = 0.5 * (v_inner + v_outer)
densities = calculate_power_law_density(velocities,
density_dict['w7_v_0'],
density_dict['w7_rho_0'], -7)
densities = calculate_density_after_time(densities,
density_dict['w7_time_0'],
time_explosion)
return densities
density_parser['branch85_w7'] = parse_branch85
def parse_power_law(density_dict, v_inner, v_outer, time_explosion):
time_0 = density_dict.pop('time_0')
rho_0 = density_dict.pop('rho_0')
v_0 = density_dict.pop('v_0')
exponent = density_dict.pop('exponent')
velocities = 0.5 * (v_inner + v_outer)
densities = calculate_power_law_density(velocities, v_0, rho_0, exponent)
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities
density_parser['power_law'] = parse_power_law
def parse_exponential(density_dict, v_inner, v_outer, time_explosion):
time_0 = density_dict.pop('time_0')
rho_0 = density_dict.pop('rho_0')
v_0 = density_dict.pop('v_0')
velocities = 0.5 * (v_inner + v_outer)
densities = calculate_exponential_density(velocities, v_0, rho_0)
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities
density_parser['exponential'] = parse_exponential
try:
parser = density_parser[density_dict['type']]
except KeyError:
raise ConfigurationError('In density section only types %s are allowed (supplied %s) ' %
(density_parser.keys(), density_dict['type']))
return parser(density_dict, v_inner, v_outer, time_explosion)
def parse_abundance_file_section(abundance_file_dict, abundances, min_shell, max_shell):
abundance_file_parser = {}
def parse_artis(abundance_file_dict, abundances, min_shell, max_shell):
#### ---- debug ----
time_of_model = 0.0
####
fname = abundance_file_dict['name']
max_atom = 30
logger.info("Parsing ARTIS Abundance section from shell %d to %d", min_shell, max_shell)
abundances.values[:max_atom, :] = np.loadtxt(fname)[min_shell:max_shell, 1:].transpose()
return abundances
abundance_file_parser['artis'] = parse_artis
try:
parser = abundance_file_parser[abundance_file_dict['type']]
except KeyError:
raise ConfigurationError('In abundance file section only types %s are allowed (supplied %s) ' %
(abundance_file_parser.keys(), abundance_file_dict['type']))
return parser(abundance_file_dict, abundances, min_shell, max_shell)
def parse_supernova_section(supernova_dict):
"""
Parse the supernova section
Parameters
----------
supernova_dict: dict
YAML parsed supernova dict
Returns
-------
config_dict: dict
"""
config_dict = {}
#parse luminosity
luminosity_value, luminosity_unit = supernova_dict['luminosity_requested'].strip().split()
if luminosity_unit == 'log_lsun':
config_dict['luminosity_requested'] = 10 ** (
float(luminosity_value) + np.log10(constants.L_sun.cgs.value)) * u.erg / u.s
else:
config_dict['luminosity_requested'] = (float(luminosity_value) * u.Unit(luminosity_unit)).to('erg/s')
config_dict['time_explosion'] = parse_quantity(supernova_dict['time_explosion']).to('s')
if 'distance' in supernova_dict:
config_dict['distance'] = parse_quantity(supernova_dict['distance'])
else:
config_dict['distance'] = None
if 'luminosity_wavelength_start' in supernova_dict:
config_dict['luminosity_nu_end'] = parse_quantity(supernova_dict['luminosity_wavelength_start']). \
to('Hz', u.spectral())
else:
config_dict['luminosity_nu_end'] = np.inf * u.Hz
if 'luminosity_wavelength_end' in supernova_dict:
config_dict['luminosity_nu_start'] = parse_quantity(supernova_dict['luminosity_wavelength_end']). \
to('Hz', u.spectral())
else:
config_dict['luminosity_nu_start'] = 0.0 * u.Hz
return config_dict
def parse_spectrum_list2dict(spectrum_list):
"""
Parse the spectrum list [start, stop, num] to a list
"""
if spectrum_list[0].unit.physical_type != 'length' and \
spectrum_list[1].unit.physical_type != 'length':
raise ValueError('start and end of spectrum need to be a length')
spectrum_config_dict = {}
spectrum_config_dict['start'] = spectrum_list[0]
spectrum_config_dict['end'] = spectrum_list[1]
spectrum_config_dict['bins'] = spectrum_list[2]
spectrum_frequency = np.linspace(
spectrum_config_dict['end'].to('Hz', u.spectral()),
spectrum_config_dict['start'].to('Hz', u.spectral()),
num=spectrum_config_dict['bins'] + 1)
spectrum_config_dict['frequency'] = spectrum_frequency
return spectrum_config_dict
def parse_convergence_section(convergence_section_dict):
"""
Parse the convergence section dictionary
Parameters
----------
convergence_section_dict: ~dict
dictionary
"""
for convergence_variable in ['t_inner', 't_rad', 'w']:
if convergence_variable not in convergence_section_dict:
convergence_section_dict[convergence_variable] = {}
updated_convergence_dict = convergence_section_dict[
'global_convergence_parameters'].copy()
updated_convergence_dict.update(
convergence_section_dict[convergence_variable])
convergence_section_dict[convergence_variable] = \
updated_convergence_dict
return convergence_section_dict
def calculate_w7_branch85_densities(velocities, time_explosion, time_0=19.9999584, density_coefficient=3e29):
"""
Generated densities from the fit to W7 in Branch 85 page 620 (citation missing)
Parameters
----------
velocities : `~numpy.ndarray`
velocities in cm/s
time_explosion : `float`
time since explosion needed to descale density with expansion
time_0 : `float`
time in seconds of the w7 model - default 19.999, no reason to change
density_coefficient : `float`
coefficient for the polynomial - obtained by fitting to W7, no reason to change
"""
densities = density_coefficient * (velocities * 1e-5) ** -7
densities = calculate_density_after_time(densities, time_0, time_explosion)
return densities[1:]
class ConfigurationNameSpace(dict):
"""
The configuration name space class allows to wrap a dictionary and adds
utility functions for easy access. Accesses like a.b.c are then possible
Code from http://goo.gl/KIaq8I
Parameters
----------
config_dict: ~dict
configuration dictionary
Returns
-------
config_ns: ConfigurationNameSpace
"""
@classmethod
def from_yaml(cls, fname):
"""
Read a configuration from a YAML file
Parameters
----------
fname: str
filename or path
"""
try:
yaml_dict = yaml.load(file(fname))
except IOError as e:
logger.critical('No config file named: %s', fname)
raise e
return cls.from_config_dict(yaml_dict)
@classmethod
def from_config_dict(cls, config_dict, config_definition_file=None):
"""
Validating a config file.
Parameters
----------
config_dict : ~dict
dictionary of a raw unvalidated config file
Returns
-------
`tardis.config_reader.Configuration`
"""
if config_definition_file is None:
config_definition_file = default_config_definition_file
config_definition = yaml.load(open(config_definition_file))
return cls(ConfigurationValidator(config_definition,
config_dict).get_config())
marker = object()
def __init__(self, value=None):
if value is None:
pass
elif isinstance(value, dict):
for key in value:
self.__setitem__(key, value[key])
else:
raise TypeError, 'expected dict'
def __setitem__(self, key, value):
if isinstance(value, dict) and not isinstance(value,
ConfigurationNameSpace):
value = ConfigurationNameSpace(value)
if key in self and hasattr(self[key], 'unit'):
value = u.Quantity(value, self[key].unit)
dict.__setitem__(self, key, value)
def __getitem__(self, key):
return super(ConfigurationNameSpace, self).__getitem__(key)
def __getattr__(self, item):
if item in self:
return self[item]
else:
super(ConfigurationNameSpace, self).__getattribute__(item)
__setattr__ = __setitem__
def __dir__(self):
return self.keys()
def get_config_item(self, config_item_string):
"""
Get configuration items using a string of type 'a.b.param'
Parameters
----------
config_item_string: ~str
string of shape 'section1.sectionb.param1'
"""
config_item_path = config_item_string.split('.')
if len(config_item_path) == 1:
config_item = config_item_path[0]
if config_item.startswith('item'):
return self[config_item_path[0]]
else:
return self[config_item]
elif len(config_item_path) == 2 and\
config_item_path[1].startswith('item'):
return self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))]
else:
return self[config_item_path[0]].get_config_item(
'.'.join(config_item_path[1:]))
def set_config_item(self, config_item_string, value):
"""
set configuration items using a string of type 'a.b.param'
Parameters
----------
config_item_string: ~str
string of shape 'section1.sectionb.param1'
value:
value to set the parameter with it
"""
config_item_path = config_item_string.split('.')
if len(config_item_path) == 1:
self[config_item_path[0]] = value
elif len(config_item_path) == 2 and \
config_item_path[1].startswith('item'):
current_value = self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))]
if hasattr(current_value, 'unit'):
self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))] =\
u.Quantity(value, current_value.unit)
else:
self[config_item_path[0]][
int(config_item_path[1].replace('item', ''))] = value
else:
self[config_item_path[0]].set_config_item(
'.'.join(config_item_path[1:]), value)
def deepcopy(self):
return ConfigurationNameSpace(copy.deepcopy(dict(self)))
class Configuration(ConfigurationNameSpace):
"""
Tardis configuration class
"""
@classmethod
def from_yaml(cls, fname, test_parser=False):
try:
yaml_dict = yaml.load(open(fname))
except IOError as e:
logger.critical('No config file named: %s', fname)
raise e
tardis_config_version = yaml_dict.get('tardis_config_version', None)
if tardis_config_version != 'v1.0':
raise ConfigurationError('Currently only tardis_config_version v1.0 supported')
return cls.from_config_dict(yaml_dict, test_parser=test_parser)
@classmethod
def from_config_dict(cls, config_dict, atom_data=None, test_parser=False,
config_definition_file=None, validate=True):
"""
Validating and subsequently parsing a config file.
Parameters
----------
config_dict : ~dict
dictionary of a raw unvalidated config file
atom_data: ~tardis.atomic.AtomData
atom data object. if `None` will be tried to be read from
atom data file path in the config_dict [default=None]
test_parser: ~bool
switch on to ignore a working atom_data, mainly useful for
testing this reader
config_definition_file: ~str
path to config definition file, if `None` will be set to the default
in the `data` directory that ships with TARDIS
validate: ~bool
Turn validation on or off.
Returns
-------
`tardis.config_reader.Configuration`
"""
if config_definition_file is None:
config_definition_file = default_config_definition_file
config_definition = yaml.load(open(config_definition_file))
if validate:
validated_config_dict = ConfigurationValidator(config_definition,
config_dict).get_config()
else:
validated_config_dict = config_dict
#First let's see if we can find an atom_db anywhere:
if test_parser:
atom_data = None
elif 'atom_data' in validated_config_dict.keys():
atom_data_fname = validated_config_dict['atom_data']
validated_config_dict['atom_data_fname'] = atom_data_fname
else:
raise ConfigurationError('No atom_data key found in config or command line')
if atom_data is None and not test_parser:
logger.info('Reading Atomic Data from %s', atom_data_fname)
atom_data = atomic.AtomData.from_hdf5(atom_data_fname)
else:
atom_data = atom_data
#Parsing supernova dictionary
validated_config_dict['supernova']['luminosity_nu_start'] = \
validated_config_dict['supernova']['luminosity_wavelength_end'].to(
u.Hz, u.spectral())
try:
validated_config_dict['supernova']['luminosity_nu_end'] = \
(validated_config_dict['supernova']
['luminosity_wavelength_start'].to(u.Hz, u.spectral()))
except ZeroDivisionError:
validated_config_dict['supernova']['luminosity_nu_end'] = (
np.inf * u.Hz)
validated_config_dict['supernova']['time_explosion'] = (
validated_config_dict['supernova']['time_explosion'].cgs)
validated_config_dict['supernova']['luminosity_requested'] = (
validated_config_dict['supernova']['luminosity_requested'].cgs)
#Parsing the model section
model_section = validated_config_dict['model']
v_inner = None
v_outer = None
mean_densities = None
abundances = None
structure_section = model_section['structure']
if structure_section['type'] == 'specific':
start, stop, num = model_section['structure']['velocity']
num += 1
velocities = np.linspace(start, stop, num)
v_inner, v_outer = velocities[:-1], velocities[1:]
mean_densities = parse_density_section(
model_section['structure']['density'], v_inner, v_outer,
validated_config_dict['supernova']['time_explosion']).cgs
elif structure_section['type'] == 'file':
v_inner, v_outer, mean_densities, inner_boundary_index, \
outer_boundary_index = read_density_file(
structure_section['filename'], structure_section['filetype'],
validated_config_dict['supernova']['time_explosion'],
structure_section['v_inner_boundary'],
structure_section['v_outer_boundary'])
r_inner = validated_config_dict['supernova']['time_explosion'] * v_inner
r_outer = validated_config_dict['supernova']['time_explosion'] * v_outer
r_middle = 0.5 * (r_inner + r_outer)
structure_validated_config_dict = {}
structure_section['v_inner'] = v_inner.cgs
structure_section['v_outer'] = v_outer.cgs
structure_section['mean_densities'] = mean_densities.cgs
no_of_shells = len(v_inner)
structure_section['no_of_shells'] = no_of_shells
structure_section['r_inner'] = r_inner.cgs
structure_section['r_outer'] = r_outer.cgs
structure_section['r_middle'] = r_middle.cgs
structure_section['volumes'] = ((4. / 3) * np.pi * \
(r_outer ** 3 -
r_inner ** 3)).cgs
#### TODO the following is legacy code and should be removed
validated_config_dict['structure'] = \
validated_config_dict['model']['structure']
# ^^^^^^^^^^^^^^^^
abundances_section = model_section['abundances']
if abundances_section['type'] == 'uniform':
abundances = pd.DataFrame(columns=np.arange(no_of_shells),
index=pd.Index(np.arange(1, 120), name='atomic_number'), dtype=np.float64)
for element_symbol_string in abundances_section:
if element_symbol_string == 'type': continue
z = element_symbol2atomic_number(element_symbol_string)
abundances.ix[z] = float(abundances_section[element_symbol_string])
elif abundances_section['type'] == 'file':
index, abundances = read_abundances_file(abundances_section['filename'], abundances_section['filetype'],
inner_boundary_index, outer_boundary_index)
if len(index) != no_of_shells:
raise ConfigurationError('The abundance file specified has not the same number of cells'
'as the specified density profile')
abundances = abundances.replace(np.nan, 0.0)
abundances = abundances[abundances.sum(axis=1) > 0]
norm_factor = abundances.sum(axis=0)
if np.any(np.abs(norm_factor - 1) > 1e-12):
logger.warning("Abundances have not been normalized to 1. - normalizing")
abundances /= norm_factor
validated_config_dict['abundances'] = abundances
########### DOING PLASMA SECTION ###############
plasma_section = validated_config_dict['plasma']
if plasma_section['initial_t_inner'] < 0.0 * u.K:
luminosity_requested = validated_config_dict['supernova']['luminosity_requested']
plasma_section['t_inner'] = ((luminosity_requested /
(4 * np.pi * r_inner[0] ** 2 *
constants.sigma_sb)) ** .25).to('K')
logger.info('"initial_t_inner" is not specified in the plasma '
'section - initializing to %s with given luminosity',
plasma_section['t_inner'])
else:
plasma_section['t_inner'] = plasma_section['initial_t_inner']
plasma_section['t_rads'] = np.ones(no_of_shells) * \
plasma_section['initial_t_rad']
if plasma_section['disable_electron_scattering'] is False:
logger.debug("Electron scattering switched on")
validated_config_dict['montecarlo']['sigma_thomson'] = 6.652486e-25 / (u.cm ** 2)
else:
logger.warn('Disabling electron scattering - this is not physical')
validated_config_dict['montecarlo']['sigma_thomson'] = 1e-200 / (u.cm ** 2)
##### NLTE subsection of Plasma start
nlte_validated_config_dict = {}
nlte_species = []
nlte_section = plasma_section['nlte']
nlte_species_list = nlte_section.pop('species')
for species_string in nlte_species_list:
nlte_species.append(species_string_to_tuple(species_string))
nlte_validated_config_dict['species'] = nlte_species
nlte_validated_config_dict['species_string'] = nlte_species_list
nlte_validated_config_dict.update(nlte_section)
if 'coronal_approximation' not in nlte_section:
logger.debug('NLTE "coronal_approximation" not specified in NLTE section - defaulting to False')
nlte_validated_config_dict['coronal_approximation'] = False
if 'classical_nebular' not in nlte_section:
logger.debug('NLTE "classical_nebular" not specified in NLTE section - defaulting to False')
nlte_validated_config_dict['classical_nebular'] = False
elif nlte_section: #checks that the dictionary is not empty
logger.warn('No "species" given - ignoring other NLTE options given:\n%s',
pp.pformat(nlte_section))
if not nlte_validated_config_dict:
nlte_validated_config_dict['species'] = []
plasma_section['nlte'] = nlte_validated_config_dict
#^^^^^^^^^^^^^^ End of Plasma Section
##### Monte Carlo Section
montecarlo_section = validated_config_dict['montecarlo']
if montecarlo_section['last_no_of_packets'] < 0:
montecarlo_section['last_no_of_packets'] = \
montecarlo_section['no_of_packets']
default_convergence_section = {'type': 'damped',
'lock_t_inner_cyles': 1,
't_inner_update_exponent': -0.5,
'global_convergence_parameters' : {
'damping_constant': 0.5}}
if montecarlo_section['convergence_strategy'] is None:
logger.warning('No convergence criteria selected - just damping by 0.5 for w, t_rad and t_inner')
montecarlo_section['convergence_strategy'] = default_convergence_section
else:
1/0
montecarlo_section['convergence_strategy'] = parse_convergence_section(
montecarlo_section['convergence_strategy'])
black_body_section = montecarlo_section['black_body_sampling']
montecarlo_section['black_body_sampling'] = {}
montecarlo_section['black_body_sampling']['start'] = \
black_body_section[0]
montecarlo_section['black_body_sampling']['end'] = \
black_body_section[1]
montecarlo_section['black_body_sampling']['samples'] = \
black_body_section[2]
###### END of convergence section reading
validated_config_dict['spectrum'] = parse_spectrum_list2dict(
validated_config_dict['spectrum'])
return cls(validated_config_dict, atom_data)
def __init__(self, config_dict, atom_data):
super(Configuration, self).__init__(config_dict)
self.atom_data = atom_data
selected_atomic_numbers = self.abundances.index
if atom_data is not None:
self.number_densities = (self.abundances * self.structure.mean_densities.to('g/cm^3').value)
self.number_densities = self.number_densities.div(self.atom_data.atom_data.mass.ix[selected_atomic_numbers],
axis=0)
else:
logger.critical('atom_data is None, only sensible for testing the parser')
| bsd-3-clause |
glenioborges/ibis | ibis/expr/types.py | 5 | 26592 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import six
from ibis.common import IbisError, RelationError
import ibis.common as com
import ibis.compat as compat
import ibis.config as config
import ibis.util as util
class Parameter(object):
"""
Placeholder, to be implemented
"""
pass
# ---------------------------------------------------------------------
class Expr(object):
"""
"""
def __init__(self, arg):
# TODO: all inputs must inherit from a common table API
self._arg = arg
def __repr__(self):
if config.options.interactive:
try:
result = self.execute()
return repr(result)
except com.TranslationError as e:
output = ('Translation to backend failed\n'
'Error message: {0}\n'
'Expression repr follows:\n{1}'
.format(e.args[0], self._repr()))
return output
else:
return self._repr()
def _repr(self, memo=None):
from ibis.expr.format import ExprFormatter
return ExprFormatter(self).get_result()
def pipe(self, f, *args, **kwargs):
"""
Generic composition function to enable expression pipelining
>>> (expr
.pipe(f, *args, **kwargs)
.pipe(g, *args2, **kwargs2))
is equivalent to
>>> g(f(expr, *args, **kwargs), *args2, **kwargs2)
Parameters
----------
f : function or (function, arg_name) tuple
If the expression needs to be passed as anything other than the first
argument to the function, pass a tuple with the argument name. For
example, (f, 'data') if the function f expects a 'data' keyword
args : positional arguments
kwargs : keyword arguments
Examples
--------
>>> def foo(data, a=None, b=None):
pass
>>> def bar(a, b, data=None):
pass
>>> expr.pipe(foo, a=5, b=10)
>>> expr.pipe((bar, 'data'), 1, 2)
Returns
-------
result : result type of passed function
"""
if isinstance(f, tuple):
f, data_keyword = f
kwargs = kwargs.copy()
kwargs[data_keyword] = self
return f(*args, **kwargs)
else:
return f(self, *args, **kwargs)
__call__ = pipe
def op(self):
return self._arg
@property
def _factory(self):
def factory(arg, name=None):
return type(self)(arg, name=name)
return factory
def _can_implicit_cast(self, arg):
return False
def execute(self, limit=None, async=False):
"""
If this expression is based on physical tables in a database backend,
execute it against that backend.
Returns
-------
result : expression-dependent
Result of compiling expression and executing in backend
"""
from ibis.client import execute
return execute(self, limit=limit, async=async)
def compile(self, limit=None):
"""
Compile expression to whatever execution target, to verify
Returns
-------
compiled : value or list
query representation or list thereof
"""
from ibis.client import compile
return compile(self, limit=limit)
def verify(self):
"""
Returns True if expression can be compiled to its attached client
"""
try:
self.compile()
return True
except:
return False
def equals(self, other):
if type(self) != type(other):
return False
return self._arg.equals(other._arg)
def _can_compare(self, other):
return False
def _root_tables(self):
return self.op().root_tables()
def _get_unbound_tables(self):
# The expression graph may contain one or more tables of a particular
# known schema
pass
def _safe_repr(x, memo=None):
return x._repr(memo=memo) if isinstance(x, (Expr, Node)) else repr(x)
class Node(object):
"""
Node is the base class for all relational algebra and analytical
functionality. It transforms the input expressions into an output
expression.
Each node implementation is responsible for validating the inputs,
including any type promotion and / or casting issues, and producing a
well-typed expression
Note that Node is deliberately not made an expression subclass: think
of Node as merely a typed expression builder.
"""
def __init__(self, args):
self.args = args
def __repr__(self):
return self._repr()
def _repr(self, memo=None):
# Quick and dirty to get us started
opname = type(self).__name__
pprint_args = []
memo = memo or {}
if id(self) in memo:
return memo[id(self)]
def _pp(x):
if isinstance(x, Expr):
key = id(x.op())
else:
key = id(x)
if key in memo:
return memo[key]
result = _safe_repr(x, memo=memo)
memo[key] = result
return result
for x in self.args:
if isinstance(x, (tuple, list)):
pp = repr([_pp(y) for y in x])
else:
pp = _pp(x)
pprint_args.append(pp)
return '%s(%s)' % (opname, ', '.join(pprint_args))
def flat_args(self):
for arg in self.args:
if isinstance(arg, (tuple, list)):
for x in arg:
yield x
else:
yield arg
def equals(self, other):
if type(self) != type(other):
return False
if len(self.args) != len(other.args):
return False
for left, right in zip(self.args, other.args):
if not all_equal(left, right):
return False
return True
def is_ancestor(self, other):
if isinstance(other, Expr):
other = other.op()
return self.equals(other)
def to_expr(self):
klass = self.output_type()
return klass(self)
def output_type(self):
"""
This function must resolve the output type of the expression and return
the node wrapped in the appropriate ValueExpr type.
"""
raise NotImplementedError
def all_equal(left, right):
if isinstance(left, list):
if not isinstance(right, list):
return False
for a, b in zip(left, right):
if not all_equal(a, b):
return False
return True
if hasattr(left, 'equals'):
return left.equals(right)
else:
return left == right
return True
class ValueNode(Node):
def __init__(self, *args):
args = self._validate_args(args)
Node.__init__(self, args)
def _validate_args(self, args):
if not hasattr(self, 'input_type'):
return args
return self.input_type.validate(args)
def root_tables(self):
exprs = [arg for arg in self.args if isinstance(arg, Expr)]
return distinct_roots(*exprs)
def resolve_name(self):
raise com.ExpressionError('Expression is not named: %s' % repr(self))
class TableColumn(ValueNode):
"""
Selects a column from a TableExpr
"""
def __init__(self, name, table_expr):
Node.__init__(self, [name, table_expr])
if name not in table_expr.schema():
raise KeyError("'{0}' is not a field".format(name))
self.name = name
self.table = table_expr
def parent(self):
return self.table
def resolve_name(self):
return self.name
def root_tables(self):
return self.table._root_tables()
def to_expr(self):
ctype = self.table._get_type(self.name)
klass = ctype.array_type()
return klass(self, name=self.name)
class ExpressionList(Node):
def __init__(self, exprs):
exprs = [as_value_expr(x) for x in exprs]
Node.__init__(self, exprs)
def root_tables(self):
return distinct_roots(*self.args)
def output_type(self):
return ExprList
class ExprList(Expr):
def exprs(self):
return self.op().args
def names(self):
return [x.get_name() for x in self.exprs()]
def rename(self, f):
new_exprs = [x.name(f(x.get_name())) for x in self.exprs()]
return ExpressionList(new_exprs).to_expr()
def prefix(self, value):
return self.rename(lambda x: value + x)
def suffix(self, value):
return self.rename(lambda x: x + value)
def concat(self, *others):
"""
Concatenate expression lists
Returns
-------
combined : ExprList
"""
exprs = list(self.exprs())
for o in others:
if not isinstance(o, ExprList):
raise TypeError(o)
exprs.extend(o.exprs())
return ExpressionList(exprs).to_expr()
class Literal(ValueNode):
def __init__(self, value):
self.value = value
def __repr__(self):
return 'Literal(%s)' % repr(self.value)
@property
def args(self):
return [self.value]
def equals(self, other):
if not isinstance(other, Literal):
return False
return (isinstance(other.value, type(self.value)) and
self.value == other.value)
def output_type(self):
import ibis.expr.rules as rules
if isinstance(self.value, bool):
klass = BooleanScalar
elif isinstance(self.value, compat.integer_types):
int_type = rules.int_literal_class(self.value)
klass = int_type.scalar_type()
elif isinstance(self.value, float):
klass = DoubleScalar
elif isinstance(self.value, six.string_types):
klass = StringScalar
elif isinstance(self.value, datetime.datetime):
klass = TimestampScalar
else:
raise com.InputTypeError(self.value)
return klass
def root_tables(self):
return []
class TableNode(Node):
def get_type(self, name):
return self.get_schema().get_type(name)
def to_expr(self):
return TableExpr(self)
class BlockingTableNode(TableNode):
# Try to represent the fact that whatever lies here is a semantically
# distinct table. Like projections, aggregations, and so forth
pass
def distinct_roots(*args):
all_roots = []
for arg in args:
all_roots.extend(arg._root_tables())
return util.unique_by_key(all_roots, id)
# ---------------------------------------------------------------------
# Helper / factory functions
class ValueExpr(Expr):
"""
Base class for a data generating expression having a fixed and known type,
either a single value (scalar)
"""
_implicit_casts = set()
def __init__(self, arg, name=None):
Expr.__init__(self, arg)
self._name = name
def equals(self, other):
if not isinstance(other, ValueExpr):
return False
if self._name != other._name:
return False
return Expr.equals(self, other)
def type(self):
import ibis.expr.datatypes as dt
return dt._primitive_types[self._typename]
def _base_type(self):
# Parametric types like "decimal"
return self.type()
def _can_cast_implicit(self, typename):
from ibis.expr.rules import ImplicitCast
rule = ImplicitCast(self.type(), self._implicit_casts)
return rule.can_cast(typename)
def get_name(self):
if self._name is not None:
# This value has been explicitly named
return self._name
# In some but not all cases we can get a name from the node that
# produces the value
return self.op().resolve_name()
def name(self, name):
return self._factory(self._arg, name=name)
class ScalarExpr(ValueExpr):
pass
class ArrayExpr(ValueExpr):
def parent(self):
return self._arg
def to_projection(self):
"""
Promote this column expression to a table projection
"""
roots = self._root_tables()
if len(roots) > 1:
raise RelationError('Cannot convert array expression involving '
'multiple base table references to a '
'projection')
table = TableExpr(roots[0])
return table.projection([self])
class AnalyticExpr(Expr):
@property
def _factory(self):
def factory(arg):
return type(self)(arg)
return factory
def type(self):
return 'analytic'
class TableExpr(Expr):
@property
def _factory(self):
def factory(arg):
return TableExpr(arg)
return factory
def _assert_valid(self, exprs):
from ibis.expr.analysis import ExprValidator
ExprValidator([self]).validate_all(exprs)
def __contains__(self, name):
return name in self.schema()
def __getitem__(self, what):
if isinstance(what, six.string_types):
return self.get_column(what)
if isinstance(what, slice):
step = what.step
if step is not None and step != 1:
raise ValueError('Slice step can only be 1')
start = what.start or 0
stop = what.stop
if stop is None or stop < 0:
raise ValueError('End index must be a positive number')
if start < 0:
raise ValueError('Start index must be a positive number')
return self.limit(stop - start, offset=start)
what = bind_expr(self, what)
if isinstance(what, AnalyticExpr):
what = what._table_getitem()
if isinstance(what, (list, tuple, TableExpr)):
# Projection case
return self.projection(what)
elif isinstance(what, BooleanArray):
# Boolean predicate
return self.filter([what])
else:
raise NotImplementedError
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
if not self._is_materialized() or key not in self.schema():
raise
return self.get_column(key)
def __dir__(self):
attrs = dir(type(self))
if self._is_materialized():
attrs = list(sorted(set(attrs + self.schema().names)))
return attrs
def _resolve(self, exprs):
exprs = util.promote_list(exprs)
# Stash this helper method here for now
out_exprs = []
for expr in exprs:
expr = self._ensure_expr(expr)
out_exprs.append(expr)
return out_exprs
def _ensure_expr(self, expr):
if isinstance(expr, six.string_types):
return self[expr]
elif not isinstance(expr, Expr):
return expr(self)
else:
return expr
def _get_type(self, name):
return self._arg.get_type(name)
def get_columns(self, iterable):
"""
Get multiple columns from the table
Examples
--------
a, b, c = table.get_columns(['a', 'b', 'c'])
Returns
-------
columns : list of column/array expressions
"""
return [self.get_column(x) for x in iterable]
def get_column(self, name):
"""
Get a reference to a single column from the table
Returns
-------
column : array expression
"""
ref = TableColumn(name, self)
return ref.to_expr()
@property
def columns(self):
return self.schema().names
def schema(self):
"""
Get the schema for this table (if one is known)
Returns
-------
schema : Schema
"""
if not self._is_materialized():
raise IbisError('Table operation is not yet materialized')
return self.op().get_schema()
def _is_materialized(self):
# The operation produces a known schema
return self.op().has_schema()
def add_column(self, expr, name=None):
"""
Add indicated column expression to table, producing a new table. Note:
this is a shortcut for performing a projection having the same effect.
Returns
-------
modified_table : TableExpr
"""
expr = self._ensure_expr(expr)
if not isinstance(expr, ArrayExpr):
raise com.InputTypeError('Must pass array expression')
if name is not None:
expr = expr.name(name)
return self.projection([self, expr])
def group_by(self, by):
"""
Create an intermediate grouped table expression, pending some group
operation to be applied with it.
Examples
--------
x.group_by([b1, b2]).aggregate(metrics)
Returns
-------
grouped_expr : GroupedTableExpr
"""
from ibis.expr.groupby import GroupedTableExpr
return GroupedTableExpr(self, by)
# -----------------------------------------------------------------------------
# Declare all typed ValueExprs. This is what the user will actually interact
# with: an instance of each is well-typed and includes all valid methods
# defined for each type.
class AnyValue(ValueExpr):
_typename = 'any'
class NullValue(AnyValue):
_typename = 'null'
def _can_cast_implicit(self, typename):
return True
class NumericValue(AnyValue):
def _can_compare(self, other):
return isinstance(other, NumericValue)
class IntegerValue(NumericValue):
pass
class BooleanValue(NumericValue):
_typename = 'boolean'
class Int8Value(IntegerValue):
_typename = 'int8'
_implicit_casts = set(['int16', 'int32', 'int64', 'float', 'double',
'decimal'])
class Int16Value(IntegerValue):
_typename = 'int16'
_implicit_casts = set(['int32', 'int64', 'float', 'double', 'decimal'])
class Int32Value(IntegerValue):
_typename = 'int32'
_implicit_casts = set(['int64', 'float', 'double', 'decimal'])
class Int64Value(IntegerValue):
_typename = 'int64'
_implicit_casts = set(['float', 'double', 'decimal'])
class FloatingValue(NumericValue):
pass
class FloatValue(FloatingValue):
_typename = 'float'
_implicit_casts = set(['double', 'decimal'])
class DoubleValue(FloatingValue):
_typename = 'double'
_implicit_casts = set(['decimal'])
class StringValue(AnyValue):
_typename = 'string'
def _can_compare(self, other):
return isinstance(other, StringValue)
class DecimalValue(NumericValue):
_typename = 'decimal'
_implicit_casts = set(['float', 'double'])
def __init__(self, meta):
self.meta = meta
self._precision = meta.precision
self._scale = meta.scale
def type(self):
from ibis.expr.datatypes import Decimal
return Decimal(self._precision, self._scale)
def _base_type(self):
return 'decimal'
@classmethod
def _make_constructor(cls, meta):
def constructor(arg, name=None):
return cls(arg, meta, name=name)
return constructor
class TimestampValue(AnyValue):
_typename = 'timestamp'
def _can_implicit_cast(self, arg):
op = arg.op()
if isinstance(op, Literal):
try:
import pandas as pd
pd.Timestamp(op.value)
return True
except ValueError:
return False
return False
def _can_compare(self, other):
return isinstance(other, TimestampValue)
def _implicit_cast(self, arg):
# assume we've checked this is OK at this point...
op = arg.op()
return TimestampScalar(op)
class NumericArray(ArrayExpr, NumericValue):
pass
class NullScalar(NullValue, ScalarExpr):
"""
A scalar value expression representing NULL
"""
pass
class BooleanScalar(ScalarExpr, BooleanValue):
pass
class BooleanArray(NumericArray, BooleanValue):
pass
class Int8Scalar(ScalarExpr, Int8Value):
pass
class Int8Array(NumericArray, Int8Value):
pass
class Int16Scalar(ScalarExpr, Int16Value):
pass
class Int16Array(NumericArray, Int16Value):
pass
class Int32Scalar(ScalarExpr, Int32Value):
pass
class Int32Array(NumericArray, Int32Value):
pass
class Int64Scalar(ScalarExpr, Int64Value):
pass
class Int64Array(NumericArray, Int64Value):
pass
class FloatScalar(ScalarExpr, FloatValue):
pass
class FloatArray(NumericArray, FloatValue):
pass
class DoubleScalar(ScalarExpr, DoubleValue):
pass
class DoubleArray(NumericArray, DoubleValue):
pass
class StringScalar(ScalarExpr, StringValue):
pass
class StringArray(ArrayExpr, StringValue):
pass
class TimestampScalar(ScalarExpr, TimestampValue):
pass
class TimestampArray(ArrayExpr, TimestampValue):
pass
class DecimalScalar(DecimalValue, ScalarExpr):
def __init__(self, arg, meta, name=None):
DecimalValue.__init__(self, meta)
ScalarExpr.__init__(self, arg, name=name)
@property
def _factory(self):
def factory(arg, name=None):
return DecimalScalar(arg, self.meta, name=name)
return factory
class DecimalArray(DecimalValue, NumericArray):
def __init__(self, arg, meta, name=None):
DecimalValue.__init__(self, meta)
ArrayExpr.__init__(self, arg, name=name)
@property
def _factory(self):
def factory(arg, name=None):
return DecimalArray(arg, self.meta, name=name)
return factory
class CategoryValue(AnyValue):
"""
Represents some ordered data categorization; tracked as an int32 value
until explicitly
"""
_typename = 'category'
_implicit_casts = Int16Value._implicit_casts
def __init__(self, meta):
self.meta = meta
def type(self):
return self.meta
def _base_type(self):
return 'category'
def _can_compare(self, other):
return isinstance(other, IntegerValue)
class CategoryScalar(CategoryValue, ScalarExpr):
def __init__(self, arg, meta, name=None):
CategoryValue.__init__(self, meta)
ScalarExpr.__init__(self, arg, name=name)
@property
def _factory(self):
def factory(arg, name=None):
return CategoryScalar(arg, self.meta, name=name)
return factory
class CategoryArray(CategoryValue, ArrayExpr):
def __init__(self, arg, meta, name=None):
CategoryValue.__init__(self, meta)
ArrayExpr.__init__(self, arg, name=name)
@property
def _factory(self):
def factory(arg, name=None):
return CategoryArray(arg, self.meta, name=name)
return factory
class UnnamedMarker(object):
pass
unnamed = UnnamedMarker()
def as_value_expr(val):
import pandas as pd
if not isinstance(val, Expr):
if isinstance(val, (tuple, list)):
val = sequence(val)
elif isinstance(val, pd.Series):
val = sequence(list(val))
else:
val = literal(val)
return val
def literal(value):
"""
Create a scalar expression from a Python value
Parameters
----------
value : some Python basic type
Returns
-------
lit_value : value expression, type depending on input value
"""
if value is None or value is null:
return null()
else:
return Literal(value).to_expr()
_NULL = None
def null():
"""
Create a NULL/NA scalar
"""
global _NULL
if _NULL is None:
_NULL = NullScalar(NullLiteral())
return _NULL
def sequence(values):
"""
Wrap a list of Python values as an Ibis sequence type
Parameters
----------
values : list
Should all be None or the same type
Returns
-------
seq : Sequence
"""
return ValueList(values).to_expr()
class NullLiteral(ValueNode):
"""
Typeless NULL literal
"""
def __init__(self):
pass
@property
def args(self):
return [None]
def equals(self, other):
return isinstance(other, NullLiteral)
def output_type(self):
return NullScalar
def root_tables(self):
return []
class ListExpr(ArrayExpr, AnyValue):
pass
class SortExpr(Expr):
pass
class ValueList(ValueNode):
"""
Data structure for a list of value expressions
"""
def __init__(self, args):
self.values = [as_value_expr(x) for x in args]
ValueNode.__init__(self, self.values)
def root_tables(self):
return distinct_roots(*self.values)
def to_expr(self):
return ListExpr(self)
def bind_expr(table, expr):
if isinstance(expr, (list, tuple)):
return [bind_expr(table, x) for x in expr]
return table._ensure_expr(expr)
def find_base_table(expr):
if isinstance(expr, TableExpr):
return expr
for arg in expr.op().flat_args():
if isinstance(arg, Expr):
r = find_base_table(arg)
if isinstance(r, TableExpr):
return r
def find_all_base_tables(expr, memo=None):
if memo is None:
memo = {}
node = expr.op()
if (isinstance(expr, TableExpr) and
isinstance(node, BlockingTableNode)):
if id(expr) not in memo:
memo[id(expr)] = expr
return memo
for arg in expr.op().flat_args():
if isinstance(arg, Expr):
find_all_base_tables(arg, memo)
return memo
| apache-2.0 |
Nyker510/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
mwaskom/moss | moss/tests/test_statistical.py | 1 | 18508 | import numpy as np
import scipy as sp
from scipy import stats as spstats
import pandas as pd
from six.moves import range
from numpy.testing import assert_array_equal, assert_array_almost_equal
import numpy.testing as npt
import nose.tools
import nose.tools as nt
from nose.tools import assert_equal, assert_almost_equal, raises
import pandas.util.testing as pdt
from .. import statistical as stat
rs = np.random.RandomState(sum(map(ord, "moss_stats")))
a_norm = rs.randn(100)
a_range = np.arange(101)
datasets = [dict(X=spstats.norm(0, 1).rvs((24, 12)),
y=spstats.bernoulli(.5).rvs(24),
runs=np.repeat([0, 1], 12)) for i in range(3)]
datasets_3d = [dict(X=spstats.norm(0, 1).rvs((4, 24, 12)),
y=spstats.bernoulli(.5).rvs(24),
runs=np.repeat([0, 1], 12)) for i in range(3)]
def test_bootstrap():
"""Test that bootstrapping gives the right answer in dumb cases."""
a_ones = np.ones(10)
n_boot = 5
out1 = stat.bootstrap(a_ones, n_boot=n_boot)
assert_array_equal(out1, np.ones(n_boot))
out2 = stat.bootstrap(a_ones, n_boot=n_boot, func=np.median)
assert_array_equal(out2, np.ones(n_boot))
def test_bootstrap_length():
"""Test that we get a bootstrap array of the right shape."""
out = stat.bootstrap(a_norm)
assert_equal(len(out), 10000)
n_boot = 100
out = stat.bootstrap(a_norm, n_boot=n_boot)
assert_equal(len(out), n_boot)
def test_bootstrap_range():
"""Test that boostrapping a random array stays within the right range."""
min, max = a_norm.min(), a_norm.max()
out = stat.bootstrap(a_norm)
nose.tools.assert_less(min, out.min())
nose.tools.assert_greater_equal(max, out.max())
def test_bootstrap_multiarg():
"""Test that bootstrap works with multiple input arrays."""
x = np.vstack([[1, 10] for i in range(10)])
y = np.vstack([[5, 5] for i in range(10)])
def test_func(x, y):
return np.vstack((x, y)).max(axis=0)
out_actual = stat.bootstrap(x, y, n_boot=2, func=test_func)
out_wanted = np.array([[5, 10], [5, 10]])
assert_array_equal(out_actual, out_wanted)
def test_bootstrap_axis():
"""Test axis kwarg to bootstrap function."""
x = rs.randn(10, 20)
n_boot = 100
out_default = stat.bootstrap(x, n_boot=n_boot)
assert_equal(out_default.shape, (n_boot,))
out_axis = stat.bootstrap(x, n_boot=n_boot, axis=0)
assert_equal(out_axis.shape, (n_boot, 20))
def test_bootstrap_random_seed():
"""Test that we can get reproducible resamples by seeding the RNG."""
data = rs.randn(50)
seed = 42
boots1 = stat.bootstrap(data, random_seed=seed)
boots2 = stat.bootstrap(data, random_seed=seed)
assert_array_equal(boots1, boots2)
def test_smooth_bootstrap():
"""Test smooth bootstrap."""
x = rs.randn(15)
n_boot = 100
out_normal = stat.bootstrap(x, n_boot=n_boot, func=np.median)
out_smooth = stat.bootstrap(x, n_boot=n_boot,
smooth=True, func=np.median)
assert(np.median(out_normal) in x)
assert(not np.median(out_smooth) in x)
def test_bootstrap_ols():
"""Test bootstrap of OLS model fit."""
def ols_fit(X, y):
return np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), y)
X = np.column_stack((rs.randn(50, 4), np.ones(50)))
w = [2, 4, 0, 3, 5]
y_noisy = np.dot(X, w) + rs.randn(50) * 20
y_lownoise = np.dot(X, w) + rs.randn(50)
n_boot = 500
w_boot_noisy = stat.bootstrap(X, y_noisy,
n_boot=n_boot,
func=ols_fit)
w_boot_lownoise = stat.bootstrap(X, y_lownoise,
n_boot=n_boot,
func=ols_fit)
assert_equal(w_boot_noisy.shape, (n_boot, 5))
assert_equal(w_boot_lownoise.shape, (n_boot, 5))
nose.tools.assert_greater(w_boot_noisy.std(),
w_boot_lownoise.std())
def test_bootstrap_units():
"""Test that results make sense when passing unit IDs to bootstrap."""
data = rs.randn(50)
ids = np.repeat(range(10), 5)
bwerr = rs.normal(0, 2, 10)
bwerr = bwerr[ids]
data_rm = data + bwerr
seed = 77
boots_orig = stat.bootstrap(data_rm, random_seed=seed)
boots_rm = stat.bootstrap(data_rm, units=ids, random_seed=seed)
nose.tools.assert_greater(boots_rm.std(), boots_orig.std())
@raises(ValueError)
def test_bootstrap_arglength():
"""Test that different length args raise ValueError."""
stat.bootstrap(np.arange(5), np.arange(10))
@raises(TypeError)
def test_bootstrap_noncallable():
"""Test that we get a TypeError with noncallable statfunc."""
non_func = "mean"
stat.bootstrap(a_norm, 100, non_func)
def test_percentiles():
"""Test function to return sequence of percentiles."""
single_val = 5
single = stat.percentiles(a_range, single_val)
assert_equal(single, single_val)
multi_val = [10, 20]
multi = stat.percentiles(a_range, multi_val)
assert_array_equal(multi, multi_val)
array_val = rs.randint(0, 101, 5).astype(float)
array = stat.percentiles(a_range, array_val)
assert_array_almost_equal(array, array_val)
def test_percentiles_acc():
"""Test accuracy of calculation."""
# First a basic case
data = np.array([10, 20, 30])
val = 20
perc = stat.percentiles(data, 50)
assert_equal(perc, val)
# Now test against scoreatpercentile
percentiles = rs.randint(0, 101, 10)
out = stat.percentiles(a_norm, percentiles)
for score, pct in zip(out, percentiles):
assert_equal(score, sp.stats.scoreatpercentile(a_norm, pct))
def test_percentiles_axis():
"""Test use of axis argument to percentils."""
data = rs.randn(10, 10)
# Test against the median with 50th percentile
median1 = np.median(data)
out1 = stat.percentiles(data, 50)
assert_array_almost_equal(median1, out1)
for axis in range(2):
median2 = np.median(data, axis=axis)
out2 = stat.percentiles(data, 50, axis=axis)
assert_array_almost_equal(median2, out2)
median3 = np.median(data, axis=0)
out3 = stat.percentiles(data, [50, 95], axis=0)
assert_array_almost_equal(median3, out3[0])
assert_equal(2, len(out3))
def test_ci():
"""Test ci against percentiles."""
a = rs.randn(100)
p = stat.percentiles(a, [2.5, 97.5])
c = stat.ci(a, 95)
assert_array_equal(p, c)
def test_vector_reject():
"""Test vector rejection function."""
x = rs.randn(30)
y = x + rs.randn(30) / 2
x_ = stat.vector_reject(x, y)
assert_almost_equal(np.dot(x_, y), 0)
def test_add_constant():
"""Test the add_constant function."""
a = rs.randn(10, 5)
wanted = np.column_stack((a, np.ones(10)))
got = stat.add_constant(a)
assert_array_equal(wanted, got)
def test_randomize_onesample():
"""Test performance of randomize_onesample."""
a_zero = rs.normal(0, 1, 50)
t_zero, p_zero = stat.randomize_onesample(a_zero)
nose.tools.assert_greater(p_zero, 0.05)
a_five = rs.normal(5, 1, 50)
t_five, p_five = stat.randomize_onesample(a_five)
nose.tools.assert_greater(0.05, p_five)
t_scipy, p_scipy = sp.stats.ttest_1samp(a_five, 0)
nose.tools.assert_almost_equal(t_scipy, t_five)
def test_randomize_onesample_range():
"""Make sure that output is bounded between 0 and 1."""
for i in range(100):
a = rs.normal(rs.randint(-10, 10),
rs.uniform(.5, 3), 100)
t, p = stat.randomize_onesample(a, 100)
nose.tools.assert_greater_equal(1, p)
nose.tools.assert_greater_equal(p, 0)
def test_randomize_onesample_getdist():
"""Test that we can get the null distribution if we ask for it."""
a = rs.normal(0, 1, 20)
out = stat.randomize_onesample(a, return_dist=True)
assert_equal(len(out), 3)
def test_randomize_onesample_iters():
"""Make sure we get the right number of samples."""
a = rs.normal(0, 1, 20)
t, p, samples = stat.randomize_onesample(a, return_dist=True)
assert_equal(len(samples), 10000)
for n in rs.randint(5, 1e4, 5):
t, p, samples = stat.randomize_onesample(a, n, return_dist=True)
assert_equal(len(samples), n)
def test_randomize_onesample_seed():
"""Test that we can seed the random state and get the same distribution."""
a = rs.normal(0, 1, 20)
seed = 42
t_a, p_a, samples_a = stat.randomize_onesample(a, 1000,
random_seed=seed,
return_dist=True)
t_b, t_b, samples_b = stat.randomize_onesample(a, 1000,
random_seed=seed,
return_dist=True)
assert_array_equal(samples_a, samples_b)
def test_randomize_onesample_multitest():
"""Test that randomizing over multiple tests works."""
a = rs.normal(0, 1, (20, 5))
t, p = stat.randomize_onesample(a, 1000)
assert_equal(len(t), 5)
assert_equal(len(p), 5)
t, p, dist = stat.randomize_onesample(a, 1000, return_dist=True)
assert_equal(dist.shape, (5, 1000))
def test_randomize_onesample_correction():
"""Test that maximum based correction (seems to) work."""
a = rs.normal(0, 1, (100, 10))
t_un, p_un = stat.randomize_onesample(a, 1000, corrected=False)
t_corr, p_corr = stat.randomize_onesample(a, 1000, corrected=True)
assert_array_equal(t_un, t_corr)
npt.assert_array_less(p_un, p_corr)
def test_randomize_onesample_h0():
"""Test that we can supply a null hypothesis for the group mean."""
a = rs.normal(4, 1, 100)
t, p = stat.randomize_onesample(a, 1000, h_0=0)
assert p < 0.01
t, p = stat.randomize_onesample(a, 1000, h_0=4)
assert p > 0.01
def test_randomize_onesample_scalar():
"""Single values returned from randomize_onesample should be scalars."""
a = rs.randn(40)
t, p = stat.randomize_onesample(a)
assert np.isscalar(t)
assert np.isscalar(p)
a = rs.randn(40, 3)
t, p = stat.randomize_onesample(a)
assert not np.isscalar(t)
assert not np.isscalar(p)
def test_randomize_corrmat():
"""Test the correctness of the correlation matrix p values."""
a = rs.randn(30)
b = a + rs.rand(30) * 3
c = rs.randn(30)
d = [a, b, c]
p_mat, dist = stat.randomize_corrmat(d, tail="upper", corrected=False,
return_dist=True)
nose.tools.assert_greater(p_mat[2, 0], p_mat[1, 0])
corrmat = np.corrcoef(d)
pctile = 100 - spstats.percentileofscore(dist[2, 1], corrmat[2, 1])
nose.tools.assert_almost_equal(p_mat[2, 1] * 100, pctile)
d[1] = -a + rs.rand(30)
p_mat = stat.randomize_corrmat(d)
nose.tools.assert_greater(0.05, p_mat[1, 0])
def test_randomize_corrmat_dist():
"""Test that the distribution looks right."""
a = rs.randn(3, 20)
for n_i in [5, 10]:
p_mat, dist = stat.randomize_corrmat(a, n_iter=n_i, return_dist=True)
assert_equal(n_i, dist.shape[-1])
p_mat, dist = stat.randomize_corrmat(a, n_iter=10000, return_dist=True)
diag_mean = dist[0, 0].mean()
assert_equal(diag_mean, 1)
off_diag_mean = dist[0, 1].mean()
nose.tools.assert_greater(0.05, off_diag_mean)
def test_randomize_corrmat_correction():
"""Test that FWE correction works."""
a = rs.randn(3, 20)
p_mat = stat.randomize_corrmat(a, "upper", False)
p_mat_corr = stat.randomize_corrmat(a, "upper", True)
triu = np.triu_indices(3, 1)
npt.assert_array_less(p_mat[triu], p_mat_corr[triu])
def test_randimoize_corrmat_tails():
"""Test that the tail argument works."""
a = rs.randn(30)
b = a + rs.rand(30) * 8
c = rs.randn(30)
d = [a, b, c]
p_mat_b = stat.randomize_corrmat(d, "both", False, random_seed=0)
p_mat_u = stat.randomize_corrmat(d, "upper", False, random_seed=0)
p_mat_l = stat.randomize_corrmat(d, "lower", False, random_seed=0)
assert_equal(p_mat_b[0, 1], p_mat_u[0, 1] * 2)
assert_equal(p_mat_l[0, 1], 1 - p_mat_u[0, 1])
def test_randomise_corrmat_seed():
"""Test that we can seed the corrmat randomization."""
a = rs.randn(3, 20)
_, dist1 = stat.randomize_corrmat(a, random_seed=0, return_dist=True)
_, dist2 = stat.randomize_corrmat(a, random_seed=0, return_dist=True)
assert_array_equal(dist1, dist2)
@raises(ValueError)
def test_randomize_corrmat_tail_error():
"""Test that we are strict about tail paramete."""
a = rs.randn(3, 30)
stat.randomize_corrmat(a, "hello")
# def test_randomize_classifier():
# """Test basic functions of randomize_classifier."""
# data = dict(X=spstats.norm(0, 1).rvs((100, 12)),
# y=spstats.bernoulli(.5).rvs(100),
# runs=np.repeat([0, 1], 50))
# model = GaussianNB()
# p_vals, perm_vals = stat.randomize_classifier(data, model,
# return_dist=True)
# p_min, p_max = p_vals.min(), p_vals.max()
# perm_mean = perm_vals.mean()
#
# # Test that the p value are well behaved
# nose.tools.assert_greater_equal(1, p_max)
# nose.tools.assert_greater_equal(p_min, 0)
#
# # Test that the mean is close to chance (this is probabilistic)
# nose.tools.assert_greater(.1, np.abs(perm_mean - 0.5))
#
# # Test that the distribution looks normal (this is probabilistic)
# val, p = spstats.normaltest(perm_vals)
# nose.tools.assert_greater(p, 0.001)
#
#
# def test_randomize_classifier_dimension():
# """Test that we can have a time dimension and it's where we expect."""
# data = datasets_3d[0]
# n_perm = 30
# model = GaussianNB()
# p_vals, perm_vals = stat.randomize_classifier(data, model, n_perm,
# return_dist=True)
# nose.tools.assert_equal(len(p_vals), len(data["X"]))
# nose.tools.assert_equal(perm_vals.shape, (n_perm, len(data["X"])))
#
#
# def test_randomize_classifier_seed():
# """Test that we can give a particular random seed to the permuter."""
# data = datasets[0]
# model = GaussianNB()
# seed = 1
# out_a = stat.randomize_classifier(data, model, random_seed=seed)
# out_b = stat.randomize_classifier(data, model, random_seed=seed)
# assert_array_equal(out_a, out_b)
#
#
# def test_randomize_classifier_number():
# """Test size of randomize_classifier vectors."""
# data = datasets[0]
# model = GaussianNB()
# for n_iter in rs.randint(10, 250, 5):
# p_vals, perm_dist = stat.randomize_classifier(data, model, n_iter,
# return_dist=True)
# nose.tools.assert_equal(len(perm_dist), n_iter)
def test_transition_probabilities():
# Test basic
sched = [0, 1, 0, 1]
expected = pd.DataFrame([[0, 1], [1, 0]])
actual = stat.transition_probabilities(sched)
npt.assert_array_equal(expected, actual)
sched = [0, 0, 1, 1]
expected = pd.DataFrame([[.5, .5], [0, 1]])
actual = stat.transition_probabilities(sched)
npt.assert_array_equal(expected, actual)
a = rs.rand(100) < .5
a = np.where(a, "foo", "bar")
out = stat.transition_probabilities(a)
npt.assert_array_equal(out.columns.tolist(), ["bar", "foo"])
npt.assert_array_equal(out.columns, out.index)
def test_upsample():
y = np.cumsum(rs.randn(100))
yy1 = stat.upsample(y, 1)
assert_equal(len(yy1), 100)
npt.assert_array_almost_equal(y, yy1)
yy2 = stat.upsample(y, 2)
assert_equal(len(yy2), 199)
npt.assert_array_almost_equal(y, yy2[::2])
class TestRemoveUnitVariance(object):
rs = np.random.RandomState(93)
df = pd.DataFrame(dict(value=rs.rand(8),
group=np.repeat(np.tile(["m", "n"], 2), 2),
cond=np.tile(["x", "y"], 4),
unit=np.repeat(["a", "b"], 4)))
def test_remove_all(self):
df = stat.remove_unit_variance(self.df, "value", "unit")
nt.assert_in("value_within", df)
nt.assert_equal(self.df.value.mean(), self.df.value_within.mean())
nt.assert_equal(self.df.groupby("unit").value_within.mean().var(), 0)
def test_remove_by_group(self):
df = stat.remove_unit_variance(self.df, "value", "unit", "group")
grp = df.groupby("group")
pdt.assert_series_equal(grp.value.mean(), grp.value_within.mean(),
check_names=False)
for _, g in grp:
nt.assert_equal(g.groupby("unit").value_within.mean().var(), 0)
def test_suffix(self):
df = stat.remove_unit_variance(self.df, "value", "unit", suffix="_foo")
nt.assert_in("value_foo", df)
class TestVectorizedCorrelation(object):
rs = np.random.RandomState()
a = rs.randn(50)
b = rs.randn(50)
c = rs.randn(5, 50)
d = rs.randn(5, 50)
def test_vector_to_vector(self):
r_got = stat.vectorized_correlation(self.a, self.b)
r_want, _ = spstats.pearsonr(self.a, self.b)
npt.assert_almost_equal(r_got, r_want)
def test_vector_to_matrix(self):
r_got = stat.vectorized_correlation(self.a, self.c)
nt.assert_equal(r_got.shape, (self.c.shape[0],))
for i, r_got_i in enumerate(r_got):
r_want_i, _ = spstats.pearsonr(self.a, self.c[i])
npt.assert_almost_equal(r_got_i, r_want_i)
def test_matrix_to_matrix(self):
r_got = stat.vectorized_correlation(self.c, self.d)
nt.assert_equal(r_got.shape, (self.c.shape[0],))
for i, r_got_i in enumerate(r_got):
r_want_i, _ = spstats.pearsonr(self.c[i], self.d[i])
npt.assert_almost_equal(r_got_i, r_want_i)
class TestPercentChange(object):
ts_array = np.arange(6).reshape(1, 6)
ts = pd.DataFrame(ts_array)
def test_df(self):
out = stat.percent_change(self.ts)
want = pd.DataFrame([[-100, -60, -20, 20, 60, 100]], dtype=np.float)
pdt.assert_frame_equal(out, want)
def test_df_multirun(self):
out = stat.percent_change(self.ts, 2)
want = pd.DataFrame([[-100, 0, 100, -25, 0, 25]], dtype=np.float)
pdt.assert_frame_equal(out, want)
def test_array(self):
out = stat.percent_change(self.ts_array, 2)
want = np.array([[-100, 0, 100, -25, 0, 25]], np.float)
npt.assert_array_equal(out, want)
| bsd-3-clause |
jdweaver/ds_sandbox | homework2/class_7_bonus_homework.py | 2 | 2572 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 20 18:52:10 2015
@author: joshuaw
"""
'''
OPTIONAL WEB SCRAPING HOMEWORK
First, define a function that accepts an IMDb ID and returns a dictionary of
movie information: title, star_rating, description, content_rating, duration.
The function should gather this information by scraping the IMDb website, not
by calling the OMDb API. (This is really just a wrapper of the web scraping
code we wrote above.)
For example, get_movie_info('tt0111161') should return:
{'content_rating': 'R',
'description': u'Two imprisoned men bond over a number of years...',
'duration': 142,
'star_rating': 9.3,
'title': u'The Shawshank Redemption'}
Then, open the file imdb_ids.txt using Python, and write a for loop that builds
a list in which each element is a dictionary of movie information.
use imdb_ids.txt to build a dataframe that contains the information information defined by the function above
Finally, convert that list into a DataFrame.
'''
# define a function that accepts an IMDb ID and returns a dictionary of movie information
import requests
from bs4 import BeautifulSoup # convert HTML into a structured Soup object
def movie_info(movie_id):
r = requests.get("http://www.imdb.com/title/" + movie_id)
html = BeautifulSoup(r.text)
info = {}
info["title"] = html.find(name='span', attrs={'class':'itemprop', 'itemprop':'name'}).text #title
info["rating"] = html.find_all(name='span', attrs={'itemprop':'contentRating'})[0].text.strip() #content rating
info["duration"] = int(html.find_all(name='time', attrs={'itemprop':'duration'})[0].text.strip().split()[0]) #duration
info["description"] = html.find(attrs={'itemprop':'description'}).text.strip() #description
info["star_rating"] = float(html.find(name='span', attrs={'itemprop':'ratingValue'}).text) #star rating
return info
# test the function
movie_info("tt0111161")
# open the file of IDs (one ID per row), and store the IDs in a list
with open('imdb_ids.txt', 'rU') as f:
f = f.read().strip()
movie_id_list = f.split("\n")
# get the information for each movie, and store the results in a list
from time import sleep
movie_list = []
for movie in movie_id_list:
movie_list.append(movie_info(movie))
sleep(0.5)
# check that the list of IDs and list of movies are the same length
#blank response means that they are the same length
assert(len(movie_id_list) == len(movie_list))
# convert the list of movies into a DataFrame
import pandas as pd
movie_df = pd.DataFrame(movie_list, index=movie_id_list) | apache-2.0 |
lcharleux/spym | doc/conf.py | 1 | 7505 | # -*- coding: utf-8 -*-
#
# abapy documentation build configuration file, created by
# sphinx-quickstart on Sat Mar 31 15:44:59 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath('sphinxext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.pngmath',
#'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.doctest',
'matplotlib.sphinxext.mathmpl',
'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
'ipython_directive',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'ipython_console_highlighting',
'numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'spym'
copyright = u'2012, Ludovic Charleux'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
#html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'stickysidebar': True, 'sidebarwidth': 250}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'abapydoc'
html_copy_source = True
html_show_sourcelink = True
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'spym.tex', u'SPyM Documentation',
u'Ludovic Charleux', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'spym', u'spym Documentation',
[u'Ludovic Charleux'], 1)
]
| gpl-2.0 |
manashmndl/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 286 | 4353 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
flennerhag/mlens | mlens/parallel/backend.py | 1 | 29472 | """ML-Ensemble
:author: Sebastian Flennerhag
:copyright: 2017-2018
:license: MIT
Parallel processing backend classes. Manages memory-mapping of data, estimation
caching and job scheduling.
"""
# pylint: disable=too-few-public-methods
# pylint: disable=too-many-arguments
# pylint: disable=too-many-instance-attributes
# pylint: disable=useless-super-delegation
from __future__ import with_statement, division
import gc
import os
import shutil
import subprocess
import tempfile
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse, hstack
from .. import config
from ..externals.joblib import Parallel, dump, load
from ..utils import check_initialized
from ..utils.exceptions import (ParallelProcessingError,
ParallelProcessingWarning)
from ..externals.sklearn.validation import check_random_state
###############################################################################
def _dtype(a, b=None):
"""Utility for getting a dtype"""
return getattr(a, 'dtype', getattr(b, 'dtype', None))
def dump_array(array, name, path):
"""Dump array for memmapping.
Parameters
----------
array : array-like
Array to be persisted
name : str
Name of file
path : str
Path to cache.
Returns
-------
f: array-like
memory-mapped array.
"""
# First check if the array is on file
if isinstance(array, str):
# Load file from disk. Need to dump if not memmaped already
if not array.split('.')[-1] in ['mmap', 'npy', 'npz']:
# Try loading the file assuming a csv-like format
array = _load(array)
if isinstance(array, str):
# If arr remains a string, it's pointing to an mmap file
f = array
else:
# Dump ndarray on disk
f = os.path.join(path, '%s.mmap' % name)
if os.path.exists(f):
os.unlink(f)
dump(array, f)
return f
def _load(arr):
"""Load array from file using default settings."""
if arr.split('.')[-1] in ['npy', 'npz']:
return np.load(arr)
else:
try:
return np.genfromtxt(arr)
except Exception as e:
raise IOError("Could not load X from %s, does not "
"appear to be a valid ndarray. "
"Details:\n%r" % (arr, e))
def _load_mmap(f):
"""Load a mmap presumably dumped by joblib, otherwise try numpy."""
try:
return load(f, mmap_mode='r')
except (IndexError, KeyError):
# Joblib's 'load' func fails on npy and npz: use numpy.load
return np.load(f, mmap_mode='r')
def _set_path(job, path, threading):
"""Build path as a cache or list depending on whether using threading"""
if path:
if not isinstance(path, str) and not threading:
raise ValueError("Path must be a str with backend=multiprocessing."
" Got %r" % path.__class__)
elif not isinstance(path, (str, dict)):
raise ValueError("Invalid path format. Should be one of "
"str, dict. Got %r" % path.__class__)
job.dir = path
return job
if threading:
# No need to pickle
job.dir = dict()
return job
# Else, need a directory
path = config.get_tmpdir()
try:
job.tmp = tempfile.TemporaryDirectory(
prefix=config.get_prefix(), dir=path)
job.dir = job.tmp.name
except AttributeError:
# Fails on python 2
job.dir = tempfile.mkdtemp(prefix=config.get_prefix(), dir=path)
return job
###############################################################################
class Job(object):
"""Container class for holding and managing job data.
:class:`Job` is intended as a on-the-fly job handler that keeps track
of input data, predictions, and manages estimation caches.
.. versionchanged:: 0.2.0
See Also
--------
:class:`ParallelProcessing`, :class:`ParallelEvaluation`
Parameters
----------
job : str
Type of job to run. One of ``'fit'``, ``'transform'``, ``'predict'``.
stack : bool
Whether to stack outputs when calls to
:func:`~mlens.parallel.backend.Job.update` are made. This will make
the ``predict_out`` array become ``predict_in``.
split : bool
Whether to create a new sub-cache when the
:attr:`~mlens.parallel.backend.Job.args` property is called.
dir : str, dict, optional
estimation cache. Pass dictionary for use with multiprocessing or a
string pointing to the disk directory to create the cache in
tmp : obj, optional
a Tempfile object for temporary directories
targets : array-like of shape [n_in_samples,], optional
input targets
predict_in : array-like of shape [n_in_samples, n_in_features], optional
input data
predict_out : array_like of shape [n_out_samples, n_out_features], optional
prediction output array
"""
__slots__ = ['targets', 'predict_in', 'predict_out', 'dir', 'job', 'tmp',
'_n_dir', 'kwargs', 'stack', 'split']
def __init__(self, job, stack, split, dir=None, tmp=None, predict_in=None,
targets=None, predict_out=None):
self.job = job
self.stack = stack
self.split = split
self.targets = targets
self.predict_in = predict_in
self.predict_out = predict_out
self.tmp = tmp
self.dir = dir
self._n_dir = 0
def clear(self):
"""Clear output data for new task"""
self.predict_out = None
def update(self):
"""Updated output array and shift to input if stacked.
If stacking is en force, the output array will replace the input
array, and used as input for subsequent jobs. Sparse matrices are
force-converted to ``csr`` format.
"""
if self.predict_out is None:
return
if (issparse(self.predict_out) and not
self.predict_out.__class__.__name__.startswith('csr')):
# Enforce csr on spare matrices
self.predict_out = self.predict_out.tocsr()
if self.stack:
self.predict_in = self.predict_out
self.rebase()
def rebase(self):
"""Rebase output labels to input indexing.
Some indexers that only generate predictions for subsets of the
training data require the targets to be rebased. Since indexers
operate in a strictly sequential manner, rebase simply drop the first
``n`` observations in the target vector until number of observations
remaining coincide.
.. seealso::
:class:`~mlens.index.blend.BlendIndex`
"""
if self.targets is not None and (
self.targets.shape[0] > self.predict_in.shape[0]):
# This is legal if X is a prediction matrix generated by predicting
# only a subset of the original training set.
# Since indexing is strictly monotonic, we can simply discard
# the first observations in y to get the corresponding labels.
rebase = self.targets.shape[0] - self.predict_in.shape[0]
self.targets = self.targets[rebase:]
def shuffle(self, random_state):
"""Shuffle inputs.
Permutes the indexing of ``predict_in`` and ``y`` arrays.
Parameters
----------
random_state : int, obj
Random seed number or generator to use.
"""
r = check_random_state(random_state)
idx = r.permutation(self.targets.shape[0])
self.predict_in = self.predict_in[idx]
self.targets = self.targets[idx]
def subdir(self):
"""Return a cache subdirectory
If ``split`` is en force, a new sub-cache will be created in the
main cache. Otherwise the same sub-cache as used in previous call
will be returned.
.. versionadded:: 0.2.0
Returns
-------
cache : str, list
Either a string pointing to a cache persisted to disk, or an
in-memory cache in the form of a list.
"""
path_name = "task_%s" % str(self._n_dir)
if self.split:
# Increment sub-cache counter
self._n_dir += 1
if isinstance(self.dir, str):
path = os.path.join(self.dir, path_name)
cache_exists = os.path.exists(path)
# Persist cache to disk
if cache_exists and self.split:
raise ParallelProcessingError(
"Subdirectory %s exist. Clear cache." % path_name)
elif not cache_exists:
os.mkdir(path)
return path
# Keep in memory
if path_name in self.dir and self.split:
raise ParallelProcessingError(
"Subdirectory %s exist. Clear cache." % path_name)
elif path_name not in self.dir:
self.dir[path_name] = list()
return self.dir[path_name]
def args(self, **kwargs):
"""Produce args dict
.. versionadded:: 0.2.0
Returns the arguments dictionary passed to a task of a parallel
processing manager. Output dictionary has the following form::
out = {'auxiliary':
{'X': self.predict_in, 'P': self.predict_out},
'main':
{'X': self.predict_in, 'P': self.predict_out},
'dir':
self.subdir(),
'job':
self.job
}
Parameters
----------
**kwargs : optional
Optional keyword arguments to pass to the task.
Returns
-------
args : dict
Arguments dictionary
"""
aux_feed = {'X': self.predict_in, 'P': None}
main_feed = {'X': self.predict_in, 'P': self.predict_out}
if self.job in ['fit', 'evaluate']:
main_feed['y'] = self.targets
aux_feed['y'] = self.targets
out = dict()
if kwargs:
out.update(kwargs)
out = {'auxiliary': aux_feed,
'main': main_feed,
'dir': self.subdir(),
'job': self.job}
return out
###############################################################################
class BaseProcessor(object):
"""Parallel processing base class.
Base class for parallel processing engines.
Parameters
----------
backend: str, optional
Type of backend. One of ``'threading'``, ``'multiprocessing'``,
``'sequential'``.
n_jobs : int, optional
Degree of concurrency.
verbose: bool, int, optional
Level of verbosity of the
:class:`~mlens.externals.joblib.parallel.Parallel` instance.
"""
__meta_class__ = ABCMeta
__slots__ = ['caller', '__initialized__', '__threading__', 'job',
'n_jobs', 'backend', 'verbose']
@abstractmethod
def __init__(self, backend=None, n_jobs=None, verbose=None):
self.job = None
self.__initialized__ = 0
self.backend = config.get_backend() if not backend else backend
self.n_jobs = -1 if not n_jobs else n_jobs
self.verbose = False if not verbose else verbose
self.__threading__ = self.backend == 'threading'
def __enter__(self):
return self
def initialize(self, job, X, y, path,
warm_start=False, return_preds=False, **kwargs):
"""Initialize processing engine.
Set up the job parameters before an estimation call. Calling
:func:`~mlens.parallel.backend.BaseProcessor.clear`
undoes initialization.
Parameters
----------
job : str
type of job to complete with each task. One of ``'fit'``,
``'predict'`` and ``'transform'``.
X : array-like of shape [n_samples, n_features]
Input data
y : array-like of shape [n_samples,], optional.
targets. Required for fit, should not be passed to predict or
transform jobs.
path : str or dict, optional
Custom estimation cache. Pass a string to force use of persistent
cache on disk. Pass a ``dict`` for in-memory cache (requires
``backend != 'multiprocessing'``.
return_preds : bool or list, optional
whether to return prediction ouput. If ``True``, final prediction
is returned. Alternatively, pass a list of task names for which
output should be returned.
warm_start : bool, optional
whether to re-use previous input data initialization. Useful if
repeated jobs are made on the same input arrays.
**kwargs : optional
optional keyword arguments to pass onto the task's call method.
Returns
-------
out : dict
An output parameter dictionary to pass to pass to an estimation
method. Either ``None`` (no output), or
``{'final':True}`` for only final prediction, or
``{'final': False, 'return_names': return_preds}`` if a list of
task-specific output was passed.
"""
if not warm_start:
self._initialize(job=job, X=X, y=y, path=path, **kwargs)
if return_preds is True:
return {'return_final': True}
if return_preds is False:
return {}
return {'return_final': False, 'return_names': return_preds}
def _initialize(self, job, X, y=None, path=None, **kwargs):
"""Create a job instance for estimation.
See :func:`~mlens.parallel.backend.BaseProcess.initialize` for
further details.
"""
job = Job(job, **kwargs)
job = _set_path(job, path, self.__threading__)
# --- Prepare inputs
for name, arr in zip(('X', 'y'), (X, y)):
if arr is None:
continue
# Dump data in cache
if self.__threading__:
# No need to memmap
f = None
if isinstance(arr, str):
arr = _load(arr)
else:
f = dump_array(arr, name, job.dir)
# Store data for processing
if name == 'y' and arr is not None:
job.targets = arr if self.__threading__ else _load_mmap(f)
elif name == 'X':
job.predict_in = arr \
if self.__threading__ else _load_mmap(f)
self.job = job
self.__initialized__ = 1
gc.collect()
return self
def __exit__(self, *args):
self.clear()
def clear(self):
"""Destroy cache and reset instance job parameters."""
# Detach Job instance
job = self.job
self.job = None
self.__initialized__ = 0
if job:
path = job.dir
path_handle = job.tmp
# Release shared memory references
del job
gc.collect()
# Destroy cache
try:
# If the cache has been persisted to disk, remove it
if isinstance(path, str):
path_handle.cleanup()
except (AttributeError, OSError):
# Python 2 has no handler, can also fail on windows
# Use explicit shutil process, or fall back on subprocess
try:
shutil.rmtree(path)
except OSError:
# Can fail on windows, need to use the shell
try:
subprocess.Popen(
'rmdir /S /Q %s' % path, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).kill()
except OSError:
warnings.warn(
"Failed to delete cache at %s."
"If created with default settings, will be "
"removed on reboot. For immediate "
"removal, manual removal is required." %
path, ParallelProcessingWarning)
finally:
del path, path_handle
gc.collect()
if gc.garbage:
warnings.warn(
"Clearing cache failed, uncollected:\n%r" %
gc.garbage, ParallelProcessingWarning)
class ParallelProcessing(BaseProcessor):
"""Parallel processing engine.
Engine for running computational graph.
:class:`ParallelProcessing` is a manager for executing a sequence of tasks
in a given caller, where each task is run sequentially, but assumed to be
parallelized internally. The main responsibility of
:class:`ParallelProcessing` is to handle memory-mapping, estimation
cache updates, input and output array updates and output collection.
Parameters
----------
caller : obj
the caller of the job. Either a Layer or a meta layer class
such as Sequential.
*args : optional
Optional arguments to :class:`~mlens.parallel.backend.BaseProcessor`
**kwargs : optional
Optional keyword arguments to
:class:`~mlens.parallel.backend.BaseProcessor`.
"""
def __init__(self, *args, **kwargs):
super(ParallelProcessing, self).__init__(*args, **kwargs)
def map(self, caller, job, X, y=None, path=None,
return_preds=False, wart_start=False, split=False, **kwargs):
"""Parallel task mapping.
Run independent tasks in caller in parallel.
Warning
-------
By default, the :~mlens.parallel.backend.ParallelProcessing.map` runs
on a shallow cache, where all tasks share the same cache. As such, the
user must ensure that each task has a unique name, or cache retrieval
will be corrupted. To commit a seperate sub-cache to each task, set
``split=True``.
Parameters
----------
caller : iterable
Iterable that generates accepted task instances. Caller should be
a child of the :class:`~mlens.parallel.base.BaseBackend` class,
and tasks need to implement an appropriate call method.
job : str
type of job to complete with each task. One of ``'fit'``,
``'predict'`` and ``'transform'``.
X : array-like of shape [n_samples, n_features]
Input data
y : array-like of shape [n_samples,], optional.
targets. Required for fit, should not be passed to predict or
transform jobs.
path : str or dict, optional
Custom estimation cache. Pass a string to force use of persistent
cache on disk. Pass a ``dict`` for in-memory cache (requires
``backend != 'multiprocessing'``.
return_preds : bool or list, optional
whether to return prediction ouput. If ``True``, final prediction
is returned. Alternatively, pass a list of task names for which
output should be returned.
warm_start : bool, optional
whether to re-use previous input data initialization. Useful if
repeated jobs are made on the same input arrays.
split : bool, default = False
whether to commit a separate sub-cache to each task.
**kwargs : optional
optional keyword arguments to pass onto each task.
Returns
-------
out: array-like, list, optional
Prediction array(s).
"""
out = self.initialize(
job=job, X=X, y=y, path=path, warm_start=wart_start,
return_preds=return_preds, split=split, stack=False)
return self.process(caller=caller, out=out, **kwargs)
def stack(self, caller, job, X, y=None, path=None, return_preds=False,
warm_start=False, split=True, **kwargs):
"""Stacked parallel task mapping.
Run stacked tasks in caller in parallel.
This method runs a stack of tasks as a stack, where the output of
each task is the input to the next.
Warning
-------
By default, the :func:`~mlens.parallel.backend.ParallelProcessing.stack`
method runs on a deep cache, where each tasks has a separate cache.
As such, the user must ensure that tasks don't depend on data cached
by previous tasks. To run all tasks in a single sub-cache, set
``split=False``.
Parameters
----------
caller : iterable
Iterable that generates accepted task instances. Caller should be
a child of the :class:`~mlens.parallel.base.BaseBackend` class,
and tasks need to implement an appropriate call method.
job : str
type of job to complete with each task. One of ``'fit'``,
``'predict'`` and ``'transform'``.
X : array-like of shape [n_samples, n_features]
Input data
y : array-like of shape [n_samples,], optional.
targets. Required for fit, should not be passed to predict or
transform jobs.
path : str or dict, optional
Custom estimation cache. Pass a string to force use of persistent
cache on disk. Pass a ``dict`` for in-memory cache (requires
``backend != 'multiprocessing'``.
return_preds : bool or list, optional
whether to return prediction output. If ``True``, final prediction
is returned. Alternatively, pass a list of task names for which
output should be returned.
warm_start : bool, optional
whether to re-use previous input data initialization. Useful if
repeated jobs are made on the same input arrays.
split : bool, default = True
whether to commit a separate sub-cache to each task.
**kwargs : optional
optional keyword arguments to pass onto each task.
Returns
-------
out: array-like, list, optional
Prediction array(s).
"""
out = self.initialize(
job=job, X=X, y=y, path=path, warm_start=warm_start,
return_preds=return_preds, split=split, stack=True)
return self.process(caller=caller, out=out, **kwargs)
def process(self, caller, out, **kwargs):
"""Process job.
Main method for processing a caller. Requires the instance to be
setup by a prior call to
:func:`~mlens.parallel.backend.BaseProcessor.initialize`.
.. seealso::
:func:`~mlens.parallel.backend.ParallelProcessing.map`,
:func:`~mlens.parallel.backend.ParallelProcessing.stack`
Parameters
----------
caller : iterable
Iterable that generates accepted task instances. Caller should be
a child of the :class:`~mlens.parallel.base.BaseBackend` class,
and tasks need to implement an appropriate call method.
out : dict
A dictionary with output parameters. Pass an empty dict for no
output. See
:func:`~mlens.parallel.backend.BaseProcessor.initialize` for more
details.
Returns
-------
out: array-like, list, optional
Prediction array(s).
"""
check_initialized(self)
return_names = out.pop('return_names', [])
return_final = out.pop('return_final', False)
out = list() if return_names else None
tf = self.job.dir if not isinstance(self.job.dir, list) else None
with Parallel(n_jobs=self.n_jobs, temp_folder=tf, max_nbytes=None,
mmap_mode='w+', verbose=self.verbose,
backend=self.backend) as parallel:
for task in caller:
self.job.clear()
self._partial_process(task, parallel, **kwargs)
if task.name in return_names:
out.append(self.get_preds(dtype=_dtype(task)))
self.job.update()
if return_final:
out = self.get_preds(dtype=_dtype(task))
return out
def _partial_process(self, task, parallel, **kwargs):
"""Process given task"""
if self.job.job == 'fit' and getattr(task, 'shuffle', False):
self.job.shuffle(getattr(task, 'random_state', None))
task.setup(self.job.predict_in, self.job.targets, self.job.job)
if not task.__no_output__:
self._gen_prediction_array(task, self.job.job, self.__threading__)
task(self.job.args(**kwargs), parallel=parallel)
if not task.__no_output__ and getattr(task, 'n_feature_prop', 0):
self._propagate_features(task)
def _propagate_features(self, task):
"""Propagate features from input array to output array."""
p_out, p_in = self.job.predict_out, self.job.predict_in
# Check for loss of obs between layers (i.e. with blendindex)
n_in, n_out = p_in.shape[0], p_out.shape[0]
r = int(n_in - n_out)
if not issparse(p_in):
# Simple item setting
p_out[:, :task.n_feature_prop] = p_in[r:, task.propagate_features]
else:
# Need to populate propagated features using scipy sparse hstack
self.job.predict_out = hstack(
[p_in[r:, task.propagate_features],
p_out[:, task.n_feature_prop:]]
).tolil()
def _gen_prediction_array(self, task, job, threading):
"""Generate prediction array either in-memory or persist to disk."""
shape = task.shape(job)
if threading:
self.job.predict_out = np.zeros(shape, dtype=_dtype(task))
else:
f = os.path.join(self.job.dir, '%s_out_array.mmap' % task.name)
try:
self.job.predict_out = np.memmap(
filename=f, dtype=_dtype(task), mode='w+', shape=shape)
except Exception as exc:
raise OSError(
"Cannot create prediction matrix of shape ("
"%i, %i), size %i MBs, for %s.\n Details:\n%r" %
(shape[0], shape[1], 8 * shape[0] * shape[1] / (1024 ** 2),
task.name, exc))
def get_preds(self, dtype=None, order='C'):
"""Return prediction matrix.
Parameters
----------
dtype : numpy dtype object, optional
data type to return
order : str (default = 'C')
data order. See :class:`numpy.asarray` for details.
Returns
-------
P: array-like
Prediction array
"""
if not hasattr(self, 'job'):
raise ParallelProcessingError(
"Processor has been terminated:\ncannot retrieve final "
"prediction array from cache.")
if dtype is None:
dtype = config.get_dtype()
if issparse(self.job.predict_out):
return self.job.predict_out
return np.asarray(self.job.predict_out, dtype=dtype, order=order)
###############################################################################
class ParallelEvaluation(BaseProcessor):
"""Parallel cross-validation engine.
Minimal parallel processing engine. Similar to :class:`ParallelProcessing`,
but offers less features, only fits the *callers* indexer, and excepts
no task output.
"""
def __init__(self, *args, **kwargs):
super(ParallelEvaluation, self).__init__(*args, **kwargs)
def process(self, caller, case, X, y, path=None, **kwargs):
"""Process caller.
Parameters
----------
caller: iterable
Iterable for evaluation job.s Expected caller is a
:class:`Evaluator` instance.
case: str
evaluation case to run on the evaluator. One of
``'preprocess'`` and ``'evaluate'``.
X: array-like of shape [n_samples, n_features]
Input data
y: array-like of shape [n_samples,], optional.
targets. Required for fit, should not be passed to predict or
transform jobs.
path: str or dict, optional
Custom estimation cache. Pass a string to force use of persistent
cache on disk. Pass a ``dict`` for in-memory cache (requires
``backend != 'multiprocessing'``.
"""
self._initialize(
job='fit', X=X, y=y, path=path, split=False, stack=False)
check_initialized(self)
# Use context manager to ensure same parallel job during entire process
tf = self.job.dir if not isinstance(self.job.dir, list) else None
with Parallel(n_jobs=self.n_jobs, temp_folder=tf, max_nbytes=None,
mmap_mode='w+', verbose=self.verbose,
backend=self.backend) as parallel:
caller.indexer.fit(self.job.predict_in, self.job.targets, self.job.job)
caller(parallel, self.job.args(**kwargs), case)
| mit |
zettsu-t/cPlusPlusFriend | scripts/stock_price/yokohama_population.py | 1 | 3391 | #!/usr/bin/python3
# coding: utf-8
'''
Yokohama population statistics
Data source
http://www.city.yokohama.lg.jp/ex/stat/opendata/suikei01.html
'''
import glob
import os
import re
import numpy as np
import pandas
from collections import OrderedDict
COLUMN_NAME_FROM_TO = OrderedDict({'年月日': 'date',
'全国地方公共団体コード': 'code',
'市区名': 'ward',
'面積[平方キロメートル]': 'area',
'世帯数[世帯]': 'household',
'人口総数[人]': 'population',
'男[人]': 'male',
'女[人]': 'female',
'1世帯当たり人員[人]': 'per_household',
'人口密度[人/平方キロメートル]': 'density',
'届出による前月比増減の世帯数[世帯]': 'diff_household',
'届出による前月比増減の人口[人]': 'diff_population'})
COLUMN_NAME_CHARACTERS =[['[','['], [']',']'], ['/','/']]
WARDS = OrderedDict({'横浜市': ['All', 'A'],
'鶴見区': ['Tsurumi', 'N'],
'神奈川区': ['Kanagawa', 'N'],
'西区': ['Nishi', 'N'],
'中区': ['Naka', 'N'],
'南区': ['Minami', 'S'],
'港南区': ['Konan', 'S'],
'保土ケ谷区': ['Hodogaya', 'S'],
'旭区': ['Asahi', 'S'],
'磯子区': ['Isogo', 'S'],
'金沢区': ['Kanazawa', 'S'],
'港北区': ['Kohoku', 'N'],
'緑区': ['Midori', 'N'],
'青葉区': ['Aoba', 'N'],
'都筑区': ['Tsuduki', 'N'],
'戸塚区': ['Totsuka', 'S'],
'栄区': ['Sakae', 'S'],
'泉区': ['Izumi', 'S'],
'瀬谷区': ['Seya', 'S']})
column_names = list(COLUMN_NAME_FROM_TO.values())
out_table = None
def format_date(date_str):
m = re.match(r'(\d+)\D+(\d+)\D(\d+)', date_str.strip())
year = int(m.group(1))
month = int(m.group(2))
day = int(m.group(3))
return '{0:04d}-{1:02d}-{2:02d}'.format(year, month, day)
for filename in sorted(glob.glob('yokohama/*.csv')):
df = pandas.read_csv(filename)
for (from_char, to_char) in COLUMN_NAME_CHARACTERS:
df.columns = df.columns.str.replace(from_char, to_char)
df = df.rename(columns=COLUMN_NAME_FROM_TO)
df['ward_jp'] = df['ward']
df['date'] = df['date'].apply(format_date)
df['region'] = df['ward'].apply(lambda x: WARDS[x][1])
df['ward'] = df['ward'].apply(lambda x: WARDS[x][0])
if out_table is None:
out_table = df
base_population = df.loc[:, 'population']
df.loc[:,'pop_ratio'] = np.full(df.shape[0], 1.0, dtype=float)
else:
df.loc[:,'pop_ratio'] = df.loc[:, 'population'] / base_population
out_table = pandas.concat([out_table, df], ignore_index=True)
out_table.to_csv('incoming/yokohama.csv', index=None)
| mit |
annahs/atmos_research | WHI_long_term_coating_MMD_timeseries.py | 1 | 9148 | import sys
import os
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from matplotlib import dates
from pprint import pprint
import sqlite3
import calendar
from datetime import datetime
from datetime import timedelta
import math
#id INTEGER PRIMARY KEY AUTOINCREMENT,
#sp2b_file TEXT,
#file_index INT,
#instr TEXT,
#instr_locn TEXT,
#particle_type TEXT,
#particle_dia FLOAT,
#unix_ts_utc FLOAT,
#actual_scat_amp FLOAT,
#actual_peak_pos INT,
#FF_scat_amp FLOAT,
#FF_peak_pos INT,
#FF_gauss_width FLOAT,
#zeroX_to_peak FLOAT,
#LF_scat_amp FLOAT,
#incand_amp FLOAT,
#lag_time_fit_to_incand FLOAT,
#LF_baseline_pct_diff FLOAT,
#rBC_mass_fg FLOAT,
#coat_thickness_nm FLOAT,
#zero_crossing_posn FLOAT,
#UNIQUE (sp2b_file, file_index, instr)
timezone = -8
######get spike times
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/')
file = open('WHI_rBC_record_2009to2013-spike_times.rbcpckl', 'r')
spike_times_full = pickle.load(file)
file.close()
spike_times = []
for spike in spike_times_full:
if spike.year >= 2010:
if spike < datetime(2012,06,01):
spike_times.append(spike)
#fire times
fire_time1 = [datetime.strptime('2009/07/27 00:00', '%Y/%m/%d %H:%M'), datetime.strptime('2009/08/08 00:00', '%Y/%m/%d %H:%M')] #row_datetimes follwing Takahama et al (2011) doi:10.5194/acp-11-6367-2011 #PST in LT
fire_time2 = [datetime.strptime('2010/07/26 09:00', '%Y/%m/%d %H:%M'), datetime.strptime('2010/07/28 09:30', '%Y/%m/%d %H:%M')] #jason's BC clear report #PST in LT
#open cluslist and read into a python list
cluslist = []
CLUSLIST_file = 'C:/hysplit4/working/WHI/CLUSLIST_10'
with open(CLUSLIST_file,'r') as f:
for line in f:
newline = line.split()
cluster_no = int(newline[0])
traj_time = datetime(int(newline[2])+2000,int(newline[3]),int(newline[4]),int(newline[5]))
if traj_time.year >=2010:
cluslist.append([traj_time,cluster_no])
#sort cluslist by row_datetime in place
cluslist.sort(key=lambda clus_info: clus_info[0])
#create list of size bins for size distrs
bins = []
start_size = 70 #VED in nm
end_size = 220 #VED in nm
interval_length = 5 #in nm
while start_size < end_size:
bins.append(start_size)
start_size += interval_length
#create dictionary with size bins as keys
binned_data = {}
for bin in bins:
binned_data[bin] = [0,0]
#connect to database
conn = sqlite3.connect('C:/projects/dbs/SP2_data.db')
c = conn.cursor()
instrument = 'UBCSP2'
instrument_locn = 'WHI'
type_particle = 'incand'
rBC_density = 1.8
LF_max = 45000
min_rBC_mass = 0.25#100-#0.94#1.63-#120 2.6-#140 3.86-#160nm 0.25-#65
max_rBC_mass = 10.05#140 3.86-160 5.5-#180nm 10.05-#220
coat_min = 120
coat_max = 160
particles=0
no_scat=0
fit_failure=0
early_evap=0
flat_fit=0
LF_high=0
overall_data = []
for traj in cluslist:
traj_time_UTC = traj[0]
traj_time_PST = traj_time_UTC+timedelta(hours = timezone)
cluster_no = traj[1]
print traj_time_PST, cluster_no
traj_start = traj_time_UTC-timedelta(hours=3) #use UTC to retrieve data b/c table dates are in UTC
traj_end = traj_time_UTC+timedelta(hours=3)
begin_data = calendar.timegm(traj_start.timetuple())
end_data = calendar.timegm(traj_end.timetuple())
coating_data = []
for row in c.execute('''SELECT rBC_mass_fg, coat_thickness_nm, unix_ts_utc, LF_scat_amp, LF_baseline_pct_diff, sp2b_file, file_index, instr,actual_scat_amp
FROM SP2_coating_analysis
WHERE instr_locn=? and particle_type=? and rBC_mass_fg>=? and rBC_mass_fg<? and unix_ts_utc>=? and unix_ts_utc<?''',
(instrument_locn,type_particle, min_rBC_mass, max_rBC_mass, begin_data,end_data)):
particles+=1
rBC_mass = row[0]
coat_thickness = row[1]
event_time = datetime.utcfromtimestamp(row[2])+timedelta(hours = timezone) #db is UTC, convert to LT here
LEO_amp = row[3]
LF_baseline_pctdiff = row[4]
file = row[5]
index = row[6]
instrt = row[7]
meas_scat_amp = row[8]
rBC_VED = (((rBC_mass/(10**15*rBC_density))*6/3.14159)**(1/3.0))*10**7 #VED in nm with 10^15fg/g and 10^7nm/cm
#ignore any spike times (LT)
for spike in spike_times:
spike_start = spike-timedelta(minutes=5)
spike_end = spike+timedelta(minutes=5)
if (spike_start <= event_time < spike_end):
continue
#then get the mass distributions for all particles
for key in binned_data:
key_value = float(key)
interval_end = key_value + interval_length
if rBC_VED >= key_value and rBC_VED < interval_end:
binned_data[key][0] = binned_data[key][0] + rBC_mass
binned_data[key][1] = binned_data[key][1] + 1
#now get the available coating data
#skip if not a good LEO record
if meas_scat_amp < 6 :
no_scat +=1
if meas_scat_amp >= 6 and LEO_amp == 0.0 and LF_baseline_pctdiff == None:
early_evap +=1
continue
if LEO_amp == -2:
early_evap +=1
continue
if LEO_amp == -1:
fit_failure +=1
continue
if LEO_amp == 0.0 and LF_baseline_pctdiff != None:
flat_fit +=1
continue
if LEO_amp > LF_max:
LF_high +=1
continue
#if in a BB time,skip
if (fire_time1[0] <= event_time <= fire_time1[1]) or (fire_time2[0] <= event_time <= fire_time2[1]):
continue
#collect good coating data
if rBC_VED >= coat_min and rBC_VED <coat_max:
if meas_scat_amp < 6:
coat_thickness = 0.0
if meas_scat_amp < 6 or LEO_amp > 0:
Dp = rBC_VED + coat_thickness*2.0
Dc = rBC_VED
coating_data.append([Dp,Dc])
#get dp/dc for traj
Dp_vals = [row[0] for row in coating_data]
Dc_vals = [row[1] for row in coating_data]
sum_of_Dp_cubes = 0
for Dp in Dp_vals:
sum_of_Dp_cubes = sum_of_Dp_cubes + Dp**3
sum_of_Dc_cubes = 0
for Dc in Dc_vals:
sum_of_Dc_cubes = sum_of_Dc_cubes + Dc**3
try:
DpDc = math.pow((sum_of_Dp_cubes/sum_of_Dc_cubes),(1./3.))
except:
DpDc = np.nan
print 'Dp/Dc', sum_of_Dp_cubes, sum_of_Dc_cubes
#fiddle with mass distrs (sort, etc)
mass_distr_list = []
for bin, value in binned_data.iteritems():
bin_mass = value[0]
bin_numb = value[1]
temp = [bin,bin_mass,bin_numb]
mass_distr_list.append(temp)
mass_distr_list.sort()
for row in mass_distr_list: #normalize
row.append(row[1]) #these 2 lines append teh raw mass and number concs
row.append(row[2])
row[1] = row[1]/(math.log(row[0]+interval_length)-math.log(row[0])) #d/dlog(VED)
row[2] = row[2]/(math.log(row[0]+interval_length)-math.log(row[0])) #d/dlog(VED)
row[0] = row[0]+interval_length/2 #correction for our binning code recording bin starts as keys instead of midpoints
overall_data.append([traj_time_PST,cluster_no,DpDc,mass_distr_list])
binned_data = {}
for bin in bins:
binned_data[bin] = [0,0]
conn.close()
print '# of particles', particles
print 'no_scat', no_scat
print 'fit_failure', fit_failure
print 'early_evap', early_evap
print 'flat_fit', flat_fit
print 'LF_high', LF_high
evap_pct = (early_evap)*100.0/particles
no_scat_pct = (no_scat)*100.0/particles
print evap_pct, no_scat_pct
#save overall_data
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/')
file = open('MMD_and_coating_data.binpickl', 'w')
pickle.dump(overall_data, file)
file.close()
#####plotting
datetimes = [dates.date2num(row[0]) for row in overall_data]
DpDc = [row[2] for row in overall_data]
fire_span1_10s=datetime.strptime('2010/07/26 09:00', '%Y/%m/%d %H:%M') #jason's BC clear report
fire_span1_10f=datetime.strptime('2010/07/28 09:30', '%Y/%m/%d %H:%M')
fire_alpha = 0.25
fire_color = '#990000'
fig = plt.figure(figsize=(11.5,7.5))
hfmt = dates.DateFormatter('%b')
#hfmt = dates.DateFormatter('%m-%d')
display_month_interval = 1
startdate_2010 = '2010/05/31'
enddate_2010 = '2010/08/04'
startdate_2012 = '2012/03/29'
enddate_2012 = '2012/06/05'
ax7 = plt.subplot2grid((4,2), (0,0), colspan=1,rowspan = 2)
ax8 = plt.subplot2grid((4,2), (0,1), colspan=1,rowspan = 2, sharey=ax7)
#ax10 = plt.subplot2grid((4,2), (2,0), colspan=1,rowspan = 2)
#ax11 = plt.subplot2grid((4,2), (2,1), colspan=1,rowspan = 2, sharey=ax10)
ax7.plot(datetimes,DpDc, marker = 'o')
ax7.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax7.xaxis.set_visible(True)
ax7.yaxis.set_visible(True)
ax7.set_ylabel('Dp/Dc')
ax7.set_xlim(dates.date2num(datetime.strptime(startdate_2010, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2010, '%Y/%m/%d')))
ax7.axvspan(dates.date2num(fire_span1_10s),dates.date2num(fire_span1_10f), facecolor=fire_color, alpha=fire_alpha)
ax7.text(0.1, 0.9,'2010', transform=ax7.transAxes)
ax8.plot(datetimes,DpDc, marker = 'o')
ax8.xaxis.set_major_formatter(hfmt)
ax8.xaxis.set_major_locator(dates.MonthLocator(interval = display_month_interval))
ax8.xaxis.set_visible(True)
ax8.yaxis.set_visible(False)
ax8.set_xlabel('month')
ax8.set_xlim(dates.date2num(datetime.strptime(startdate_2012, '%Y/%m/%d')), dates.date2num(datetime.strptime(enddate_2012, '%Y/%m/%d')))
ax8.text(0.1, 0.9,'2012', transform=ax8.transAxes)
#legend = ax8.legend(loc='upper center', bbox_to_anchor=(0.5, 1.275), ncol=3, numpoints=1)
plt.show()
| mit |
0x0all/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
briandrawert/pyurdme | examples/yeast_polarization/polarisome_model.py | 5 | 4149 | #!/usr/bin/env python
""" pyURDME model file for the model found in Lawson et al. PloS Comp Bio (2013). """
import os
import pyurdme
import dolfin
import math
import matplotlib.pyplot as plt
import numpy
class Cdc42(pyurdme.URDMEDataFunction):
def __init__(self, a=-4*numpy.pi, b=4*numpy.pi, N=160):
""" 1D domain from a to b. """
pyurdme.URDMEDataFunction.__init__(self, name="Cdc42")
self.a = a
self.b = b
self.N = N
def map(self, x):
#ligand_c[i] = 100*Gradient_max*exp( (-1*pow((i-floor(N/2))*360.0/N,2))/(2*pow(Gradient_sigma,2)) );
# x[0] == i*l
Gradient_max = 3.0*160/self.N
Gradient_max = Gradient_max*0.7917
Gradient_sigma = 20.3837
return 100*Gradient_max*numpy.exp(
-1*((x[0]*(360)/(self.b - self.a))**2) / (2*Gradient_sigma**2)
)
class polarisome_1D(pyurdme.URDMEModel):
def __init__(self,model_name="polarisome_1D"):
pyurdme.URDMEModel.__init__(self,model_name)
default_D = 0.0053
fast_D = 1000*default_D
# Species
Bni1c = pyurdme.Species(name="Bni1c", diffusion_constant=fast_D)
Bni1m = pyurdme.Species(name="Bni1m", diffusion_constant=default_D)
Spa2c = pyurdme.Species(name="Spa2c", diffusion_constant=fast_D)
Spa2m = pyurdme.Species(name="Spa2m", diffusion_constant=default_D)
Actinc = pyurdme.Species(name="Actinc", diffusion_constant=fast_D)
Actinm = pyurdme.Species(name="Actinm", diffusion_constant=default_D)
self.add_species([Bni1c, Bni1m, Spa2c, Spa2m, Actinc, Actinm])
NUM_VOXEL = 160
self.mesh = pyurdme.URDMEMesh.generate_interval_mesh(nx=NUM_VOXEL, a=-4*numpy.pi, b=4*numpy.pi, periodic=True)
Bon = pyurdme.Parameter(name="Bon", expression=1.6e-6)
Boff = pyurdme.Parameter(name="Boff", expression=0.25)
Bfb = pyurdme.Parameter(name="Bfb", expression=1.9e-5)
Aon = pyurdme.Parameter(name="Aon", expression=7.7e-5)
Aoff = pyurdme.Parameter(name="Aoff", expression=0.018)
Km = pyurdme.Parameter(name="Km", expression=3500)
Son = pyurdme.Parameter(name="Son", expression=0.16)
Soff = pyurdme.Parameter(name="Soff", expression=0.35)
self.add_parameter([Bon, Boff, Bfb, Aon, Aoff, Km, Son, Soff])
# Add Data Function to model the mating pheromone gradient.
self.add_data_function(Cdc42())
# Reactions
R0 = pyurdme.Reaction(name="R0", reactants={Bni1c:1}, products={Bni1m:1}, propensity_function="Bon*Bni1c*NUM_VOXELS*Cdc42")
R1 = pyurdme.Reaction(name="R1", reactants={Bni1m:1}, products={Bni1c:1}, massaction=True, rate=Boff)
R2 = pyurdme.Reaction(name="R2", reactants={Actinc:1}, products={Actinm:1}, propensity_function="Aon*Bni1m*Actinc*NUM_VOXELS")
R3 = pyurdme.Reaction(name="R3", reactants={Actinm:1}, products={Actinc:1}, propensity_function="Aoff*Km/(Km+Spa2m)*Actinm")
R4 = pyurdme.Reaction(name="R4", reactants={Spa2c:1}, products={Spa2m:1}, propensity_function="Son*Spa2c*NUM_VOXELS*Actinm")
R5 = pyurdme.Reaction(name="R5", reactants={Spa2m:1}, products={Spa2c:1}, massaction=True, rate=Soff)
R6 = pyurdme.Reaction(name="R6", reactants={Bni1c:1}, products={Bni1m:1}, propensity_function="Bfb*Bni1c*NUM_VOXELS*Spa2m")
self.add_reaction([R0,R1,R2,R3,R4,R5,R6])
# Distribute molecules randomly over the mesh according to their initial values
self.set_initial_condition_scatter({Bni1c:1000})
self.set_initial_condition_scatter({Spa2c:5000})
self.set_initial_condition_scatter({Actinc:40})
#self.timespan(range(0,3601,30))
self.timespan(range(0,201,10))
if __name__=="__main__":
""" Dump model to a file. """
model = polarisome_1D()
result = model.run()
x_vals = model.mesh.coordinates()[:, 0]
Bni1 = result.get_species("Bni1m", timepoints=20)
Spa2 = result.get_species("Spa2m", timepoints=20)
plt.plot(x_vals, Spa2)
plt.title('Spa2_m at t={0}'.format(model.tspan[20]))
plt.show()
| gpl-3.0 |
jstoxrocky/statsmodels | statsmodels/stats/tests/test_pairwise.py | 26 | 12256 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 28 15:34:18 2012
Author: Josef Perktold
"""
import warnings
from statsmodels.compat.python import BytesIO, asbytes, range
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_,
assert_raises, assert_allclose)
from statsmodels.stats.libqsturng import qsturng
ss = '''\
43.9 1 1
39.0 1 2
46.7 1 3
43.8 1 4
44.2 1 5
47.7 1 6
43.6 1 7
38.9 1 8
43.6 1 9
40.0 1 10
89.8 2 1
87.1 2 2
92.7 2 3
90.6 2 4
87.7 2 5
92.4 2 6
86.1 2 7
88.1 2 8
90.8 2 9
89.1 2 10
68.4 3 1
69.3 3 2
68.5 3 3
66.4 3 4
70.0 3 5
68.1 3 6
70.6 3 7
65.2 3 8
63.8 3 9
69.2 3 10
36.2 4 1
45.2 4 2
40.7 4 3
40.5 4 4
39.3 4 5
40.3 4 6
43.2 4 7
38.7 4 8
40.9 4 9
39.7 4 10'''
#idx Treatment StressReduction
ss2 = '''\
1 mental 2
2 mental 2
3 mental 3
4 mental 4
5 mental 4
6 mental 5
7 mental 3
8 mental 4
9 mental 4
10 mental 4
11 physical 4
12 physical 4
13 physical 3
14 physical 5
15 physical 4
16 physical 1
17 physical 1
18 physical 2
19 physical 3
20 physical 3
21 medical 1
22 medical 2
23 medical 2
24 medical 2
25 medical 3
26 medical 2
27 medical 3
28 medical 1
29 medical 3
30 medical 1'''
ss3 = '''\
1 24.5
1 23.5
1 26.4
1 27.1
1 29.9
2 28.4
2 34.2
2 29.5
2 32.2
2 30.1
3 26.1
3 28.3
3 24.3
3 26.2
3 27.8'''
ss5 = '''\
2 - 3\t4.340\t0.691\t7.989\t***
2 - 1\t4.600\t0.951\t8.249\t***
3 - 2\t-4.340\t-7.989\t-0.691\t***
3 - 1\t0.260\t-3.389\t3.909\t-
1 - 2\t-4.600\t-8.249\t-0.951\t***
1 - 3\t-0.260\t-3.909\t3.389\t'''
cylinders = np.array([8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 6, 6, 6, 4, 4,
4, 4, 4, 4, 6, 8, 8, 8, 8, 4, 4, 4, 4, 8, 8, 8, 8, 6, 6, 6, 6, 4, 4, 4, 4, 6, 6,
6, 6, 4, 4, 4, 4, 4, 8, 4, 6, 6, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 6, 6, 4, 6, 4, 4, 4, 4, 4, 4, 4, 4])
cyl_labels = np.array(['USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'France',
'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'Japan', 'USA', 'USA', 'USA', 'Japan',
'Germany', 'France', 'Germany', 'Sweden', 'Germany', 'USA', 'USA', 'USA', 'USA', 'USA', 'Germany',
'USA', 'USA', 'France', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'Germany',
'Japan', 'USA', 'USA', 'USA', 'USA', 'Germany', 'Japan', 'Japan', 'USA', 'Sweden', 'USA', 'France',
'Japan', 'Germany', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA',
'Germany', 'Japan', 'Japan', 'USA', 'USA', 'Japan', 'Japan', 'Japan', 'Japan', 'Japan', 'Japan', 'USA',
'USA', 'USA', 'USA', 'Japan', 'USA', 'USA', 'USA', 'Germany', 'USA', 'USA', 'USA'])
#accommodate recfromtxt for python 3.2, requires bytes
ss = asbytes(ss)
ss2 = asbytes(ss2)
ss3 = asbytes(ss3)
ss5 = asbytes(ss5)
dta = np.recfromtxt(BytesIO(ss), names=("Rust","Brand","Replication"))
dta2 = np.recfromtxt(BytesIO(ss2), names = ("idx", "Treatment", "StressReduction"))
dta3 = np.recfromtxt(BytesIO(ss3), names = ("Brand", "Relief"))
dta5 = np.recfromtxt(BytesIO(ss5), names = ('pair', 'mean', 'lower', 'upper', 'sig'), delimiter='\t')
sas_ = dta5[[1,3,2]]
from statsmodels.stats.multicomp import (tukeyhsd, pairwise_tukeyhsd,
MultiComparison)
#import statsmodels.sandbox.stats.multicomp as multi
#print tukeyhsd(dta['Brand'], dta['Rust'])
def get_thsd(mci, alpha=0.05):
var_ = np.var(mci.groupstats.groupdemean(), ddof=len(mci.groupsunique))
means = mci.groupstats.groupmean
nobs = mci.groupstats.groupnobs
resi = tukeyhsd(means, nobs, var_, df=None, alpha=alpha,
q_crit=qsturng(1-alpha, len(means), (nobs-1).sum()))
#print resi[4]
var2 = (mci.groupstats.groupvarwithin() * (nobs - 1.)).sum() \
/ (nobs - 1.).sum()
#print nobs, (nobs - 1).sum()
#print mci.groupstats.groupvarwithin()
assert_almost_equal(var_, var2, decimal=14)
return resi
class CheckTuckeyHSDMixin(object):
@classmethod
def setup_class_(self):
self.mc = MultiComparison(self.endog, self.groups)
self.res = self.mc.tukeyhsd(alpha=self.alpha)
def test_multicomptukey(self):
assert_almost_equal(self.res.meandiffs, self.meandiff2, decimal=14)
assert_almost_equal(self.res.confint, self.confint2, decimal=2)
assert_equal(self.res.reject, self.reject2)
def test_group_tukey(self):
res_t = get_thsd(self.mc, alpha=self.alpha)
assert_almost_equal(res_t[4], self.confint2, decimal=2)
def test_shortcut_function(self):
#check wrapper function
res = pairwise_tukeyhsd(self.endog, self.groups, alpha=self.alpha)
assert_almost_equal(res.confint, self.res.confint, decimal=14)
class TestTuckeyHSD2(CheckTuckeyHSDMixin):
@classmethod
def setup_class(self):
#balanced case
self.endog = dta2['StressReduction']
self.groups = dta2['Treatment']
self.alpha = 0.05
self.setup_class_() #in super
#from R
tukeyhsd2s = np.array([ 1.5,1,-0.5,0.3214915,
-0.1785085,-1.678509,2.678509,2.178509,
0.6785085,0.01056279,0.1079035,0.5513904]
).reshape(3,4, order='F')
self.meandiff2 = tukeyhsd2s[:, 0]
self.confint2 = tukeyhsd2s[:, 1:3]
pvals = tukeyhsd2s[:, 3]
self.reject2 = pvals < 0.05
def test_table_names_default_group_order(self):
t = self.res._results_table
# if the group_order parameter is not used, the groups should
# be reported in alphabetical order
expected_order = [(b'medical', b'mental'),
(b'medical', b'physical'),
(b'mental', b'physical')]
for i in range(1, 4):
first_group = t[i][0].data
second_group = t[i][1].data
assert_((first_group, second_group) == expected_order[i - 1])
def test_table_names_custom_group_order(self):
# if the group_order parameter is used, the groups should
# be reported in the specified order
mc = MultiComparison(self.endog, self.groups,
group_order=[b'physical', b'medical', b'mental'])
res = mc.tukeyhsd(alpha=self.alpha)
#print(res)
t = res._results_table
expected_order = [(b'physical',b'medical'),
(b'physical',b'mental'),
(b'medical', b'mental')]
for i in range(1, 4):
first_group = t[i][0].data
second_group = t[i][1].data
assert_((first_group, second_group) == expected_order[i - 1])
class TestTuckeyHSD2Pandas(TestTuckeyHSD2):
@classmethod
def setup_class(self):
super(TestTuckeyHSD2Pandas, self).setup_class()
import pandas
self.endog = pandas.Series(self.endog)
# we are working with bytes on python 3, not with strings in this case
self.groups = pandas.Series(self.groups, dtype=object)
def test_incorrect_output(self):
# too few groups
assert_raises(ValueError, MultiComparison, np.array([1] * 10), [1, 2] * 4)
# too many groups
assert_raises(ValueError, MultiComparison, np.array([1] * 10), [1, 2] * 6)
# just one group
assert_raises(ValueError, MultiComparison, np.array([1] * 10), [1] * 10)
# group_order doesn't select all observations, only one group left
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
assert_raises(ValueError, MultiComparison, np.array([1] * 10),
[1, 2] * 5, group_order=[1])
# group_order doesn't select all observations,
# we do tukey_hsd with reduced set of observations
data = np.arange(15)
groups = np.repeat([1, 2, 3], 5)
mod1 = MultiComparison(np.array(data), groups, group_order=[1, 2])
res1 = mod1.tukeyhsd(alpha=0.01)
mod2 = MultiComparison(np.array(data[:10]), groups[:10])
res2 = mod2.tukeyhsd(alpha=0.01)
attributes = ['confint', 'data', 'df_total', 'groups', 'groupsunique',
'meandiffs', 'q_crit', 'reject', 'reject2', 'std_pairs',
'variance']
for att in attributes:
err_msg = att + 'failed'
assert_allclose(getattr(res1, att), getattr(res2, att), rtol=1e-14,
err_msg=err_msg)
attributes = ['data', 'datali', 'groupintlab', 'groups', 'groupsunique',
'ngroups', 'nobs', 'pairindices']
for att in attributes:
err_msg = att + 'failed'
assert_allclose(getattr(mod1, att), getattr(mod2, att), rtol=1e-14,
err_msg=err_msg)
class TestTuckeyHSD2s(CheckTuckeyHSDMixin):
@classmethod
def setup_class(self):
#unbalanced case
self.endog = dta2['StressReduction'][3:29]
self.groups = dta2['Treatment'][3:29]
self.alpha = 0.01
self.setup_class_()
#from R
tukeyhsd2s = np.array(
[1.8888888888888889, 0.888888888888889, -1, 0.2658549,
-0.5908785, -2.587133, 3.511923, 2.368656,
0.5871331, 0.002837638, 0.150456, 0.1266072]
).reshape(3,4, order='F')
self.meandiff2 = tukeyhsd2s[:, 0]
self.confint2 = tukeyhsd2s[:, 1:3]
pvals = tukeyhsd2s[:, 3]
self.reject2 = pvals < 0.01
class TestTuckeyHSD3(CheckTuckeyHSDMixin):
@classmethod
def setup_class(self):
#SAS case
self.endog = dta3['Relief']
self.groups = dta3['Brand']
self.alpha = 0.05
self.setup_class_()
#super(self, self).setup_class_()
#CheckTuckeyHSD.setup_class_()
self.meandiff2 = sas_['mean']
self.confint2 = sas_[['lower','upper']].view(float).reshape((3,2))
self.reject2 = sas_['sig'] == asbytes('***')
class TestTuckeyHSD4(CheckTuckeyHSDMixin):
@classmethod
def setup_class(self):
#unbalanced case verified in Matlab
self.endog = cylinders
self.groups = cyl_labels
self.alpha = 0.05
self.setup_class_()
self.res._simultaneous_ci()
#from Matlab
self.halfwidth2 = np.array([1.5228335685980883, 0.9794949704444682, 0.78673802805533644,
2.3321237694566364, 0.57355135882752939])
self.meandiff2 = np.array([0.22222222222222232, 0.13333333333333375, 0.0, 2.2898550724637685,
-0.088888888888888573, -0.22222222222222232, 2.0676328502415462,
-0.13333333333333375, 2.1565217391304348, 2.2898550724637685])
self.confint2 = np.array([-2.32022210717, 2.76466655161, -2.247517583, 2.51418424967,
-3.66405224956, 3.66405224956, 0.113960166573, 4.46574997835,
-1.87278583908, 1.6950080613, -3.529655688, 3.08521124356, 0.568180988881,
3.5670847116, -3.31822643175, 3.05155976508, 0.951206924521, 3.36183655374,
-0.74487911754, 5.32458926247]).reshape(10,2)
self.reject2 = np.array([False, False, False, True, False, False, True, False, True, False])
def test_hochberg_intervals(self):
assert_almost_equal(self.res.halfwidths, self.halfwidth2, 14)
| bsd-3-clause |
flaviovdf/pyksc | src/scripts/tags_io.py | 1 | 2958 | # -*- coding: utf8
'''This module contains the code used for data conversion'''
from __future__ import division, print_function
from collections import defaultdict
from sklearn.base import BaseEstimator
from sklearn.feature_extraction.text import Vectorizer
import nltk
class NoopAnalyzer(BaseEstimator):
'''
Since we use NLTK to preprocess (more control) this
class is used to bypass sklearns preprocessing
'''
def analyze(self, text_document):
'''Does nothing'''
return text_document
def __tokenize_and_stem(fpath):
'''
Tokenizes and stems the file, converting each line to
an array of words.
Arguments
---------
fpath: a path to a file
Each line is a song, tags are separated by space
'''
tokenizer = nltk.RegexpTokenizer(r'\w+')
stopwords = set(nltk.corpus.stopwords.words('english'))
stemmer = nltk.stem.PorterStemmer()
docs = []
term_pops = defaultdict(int)
with open(fpath) as tags_file:
for line in tags_file:
as_doc = []
for term in tokenizer.tokenize(line)[1:]:
term = term.lower().strip()
if term not in stopwords and term != '':
stemmed = stemmer.stem(term)
as_doc.append(stemmed)
term_pops[stemmed] += 1
docs.append(as_doc)
return docs, term_pops
def clean_up(fpath, bottom_filter=0.01):
'''
Converts a YouTube tag file to a series of tokens. This code
stems the tags, removes stopwords and filters infrequent
tags (whose probability is bellow `bottom_filter`).
Arguments
---------
fpath: a path to a file
Each line is a song, tags are separated by space
bottom_filter: float (defaults to 0.01, one percent)
Minimum probability for tags to be considered useful
'''
docs, term_pops = __tokenize_and_stem(fpath)
for doc in docs:
to_yield = []
for term in doc:
prob_term = term_pops[term] / len(term_pops)
if prob_term > bottom_filter:
to_yield.append(term)
yield to_yield
def vectorize_videos(fpath, use_idf=False):
'''
Converts a YouTube tag file to a sparse matrix pondered. We can assign
weights based on IDF if specified.
Arguments
---------
fpath: a path to a file
Each line is a song, tags are separated by space
use_idf: bool (optinal, defaults to True)
Indicates whether to use IDF.
bottom_filter: float (defaults to 0.005, half of one percent)
Minimum probability for tags to be considered useful
'''
#Vectorizes to TF-IDF
vectorizer = Vectorizer(analyzer=NoopAnalyzer(), use_idf = use_idf)
sparse_matrix = vectorizer.fit_transform(clean_up(fpath, bottom_filter=0))
vocabulary = vectorizer.vocabulary
return sparse_matrix, vocabulary | bsd-3-clause |
srjoglekar246/sympy | sympy/external/importtools.py | 3 | 6629 | """Tools to assist importing optional external modules."""
import sys
# Override these in the module to change the default warning behavior.
# For example, you might set both to False before running the tests so that
# warnings are not printed to the console, or set both to True for debugging.
WARN_NOT_INSTALLED = None # Default is False
WARN_OLD_VERSION = None # Default is True
def __sympy_debug():
# helper function from sympy/__init__.py
# We don't just import SYMPY_DEBUG from that file because we don't want to
# import all of sympy just to use this module.
import os
return eval(os.getenv('SYMPY_DEBUG', 'False'))
if __sympy_debug():
WARN_OLD_VERSION = True
WARN_NOT_INSTALLED = True
def import_module(module, min_module_version=None, min_python_version=None,
warn_not_installed=None, warn_old_version=None,
module_version_attr='__version__', module_version_attr_call_args=None,
__import__kwargs={}, catch=()):
"""
Import and return a module if it is installed.
If the module is not installed, it returns None.
A minimum version for the module can be given as the keyword argument
min_module_version. This should be comparable against the module version.
By default, module.__version__ is used to get the module version. To
override this, set the module_version_attr keyword argument. If the
attribute of the module to get the version should be called (e.g.,
module.version()), then set module_version_attr_call_args to the args such
that module.module_version_attr(*module_version_attr_call_args) returns the
module's version.
If the module version is less than min_module_version using the Python <
comparison, None will be returned, even if the module is installed. You can
use this to keep from importing an incompatible older version of a module.
You can also specify a minimum Python version by using the
min_python_version keyword argument. This should be comparable against
sys.version_info.
If the keyword argument warn_not_installed is set to True, the function will
emit a UserWarning when the module is not installed.
If the keyword argument warn_old_version is set to True, the function will
emit a UserWarning when the library is installed, but cannot be imported
because of the min_module_version or min_python_version options.
Note that because of the way warnings are handled, a warning will be
emitted for each module only once. You can change the default warning
behavior by overriding the values of WARN_NOT_INSTALLED and WARN_OLD_VERSION
in sympy.external.importtools. By default, WARN_NOT_INSTALLED is False and
WARN_OLD_VERSION is True.
This function uses __import__() to import the module. To pass additional
options to __import__(), use the __import__kwargs keyword argument. For
example, to import a submodule A.B, you must pass a nonempty fromlist option
to __import__. See the docstring of __import__().
This catches ImportError to determine if the module is not installed. To
catch additional errors, pass them as a tuple to the catch keyword
argument.
Examples
========
>>> from sympy.external import import_module
>>> numpy = import_module('numpy')
>>> numpy = import_module('numpy', min_python_version=(2, 6),
... warn_old_version=False)
>>> numpy = import_module('numpy', min_module_version='1.5',
... warn_old_version=False) # numpy.__version__ is a string
>>> # gmpy does not have __version__, but it does have gmpy.version()
>>> gmpy = import_module('gmpy', min_module_version='1.14',
... module_version_attr='version', module_version_attr_call_args=(),
... warn_old_version=False)
>>> # To import a submodule, you must pass a nonempty fromlist to
>>> # __import__(). The values do not matter.
>>> p3 = import_module('mpl_toolkits.mplot3d',
... __import__kwargs={'fromlist':['something']})
>>> # matplotlib.pyplot can raise RuntimeError when the display cannot be opened
>>> matplotlib = import_module('matplotlib',
... __import__kwargs={'fromlist':['pyplot']}, catch=(RuntimeError,))
"""
# keyword argument overrides default, and global variable overrides
# keyword argument.
warn_old_version = (WARN_OLD_VERSION if WARN_OLD_VERSION is not None
else warn_old_version or True)
warn_not_installed = (WARN_NOT_INSTALLED if WARN_NOT_INSTALLED is not None
else warn_not_installed or False)
import warnings
# Check Python first so we don't waste time importing a module we can't use
if min_python_version:
if sys.version_info < min_python_version:
if warn_old_version:
warnings.warn("Python version is too old to use %s "
"(%s or newer required)" % (module, '.'.join(map(str, min_python_version))),
UserWarning)
return
# PyPy 1.6 has rudimentary NumPy support and importing it produces errors, so skip it
if module == 'numpy' and '__pypy__' in sys.builtin_module_names:
return
try:
mod = __import__(module, **__import__kwargs)
except ImportError:
if warn_not_installed:
warnings.warn("%s module is not installed" % module, UserWarning)
return
# TODO: After 2.5 is dropped, use new 'as' keyword
#except catch as e:
except catch, e:
if warn_not_installed:
warnings.warn("%s module could not be used (%s)" % (module, repr(e)))
return
if min_module_version:
modversion = getattr(mod, module_version_attr)
if module_version_attr_call_args is not None:
modversion = modversion(*module_version_attr_call_args)
if modversion < min_module_version:
if warn_old_version:
# Attempt to create a pretty string version of the version
if isinstance(min_module_version, basestring):
verstr = min_module_version
elif isinstance(min_module_version, (tuple, list)):
verstr = '.'.join(map(str, min_module_version))
else:
# Either don't know what this is. Hopefully
# it's something that has a nice str version, like an int.
verstr = str(min_module_version)
warnings.warn("%s version is too old to use "
"(%s or newer required)" % (module, verstr),
UserWarning)
return
return mod
| bsd-3-clause |
detrout/debian-statsmodels | statsmodels/nonparametric/kde.py | 7 | 18714 | """
Univariate Kernel Density Estimators
References
----------
Racine, Jeff. (2008) "Nonparametric Econometrics: A Primer," Foundation and
Trends in Econometrics: Vol 3: No 1, pp1-88.
http://dx.doi.org/10.1561/0800000009
http://en.wikipedia.org/wiki/Kernel_%28statistics%29
Silverman, B.W. Density Estimation for Statistics and Data Analysis.
"""
from __future__ import absolute_import, print_function
from statsmodels.compat.python import range
# for 2to3 with extensions
import warnings
import numpy as np
from scipy import integrate, stats
from statsmodels.sandbox.nonparametric import kernels
from statsmodels.tools.decorators import (cache_readonly,
resettable_cache)
from . import bandwidths
from .kdetools import (forrt, revrt, silverman_transform, counts)
from .linbin import fast_linbin
#### Kernels Switch for estimators ####
kernel_switch = dict(gau=kernels.Gaussian, epa=kernels.Epanechnikov,
uni=kernels.Uniform, tri=kernels.Triangular,
biw=kernels.Biweight, triw=kernels.Triweight,
cos=kernels.Cosine, cos2=kernels.Cosine2)
def _checkisfit(self):
try:
self.density
except:
raise ValueError("Call fit to fit the density first")
#### Kernel Density Estimator Class ###
class KDEUnivariate(object):
"""
Univariate Kernel Density Estimator.
Parameters
----------
endog : array-like
The variable for which the density estimate is desired.
Notes
-----
If cdf, sf, cumhazard, or entropy are computed, they are computed based on
the definition of the kernel rather than the FFT approximation, even if
the density is fit with FFT = True.
`KDEUnivariate` is much faster than `KDEMultivariate`, due to its FFT-based
implementation. It should be preferred for univariate, continuous data.
`KDEMultivariate` also supports mixed data.
See Also
--------
KDEMultivariate
kdensity, kdensityfft
Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> nobs = 300
>>> np.random.seed(1234) # Seed random generator
>>> dens = sm.nonparametric.KDEUnivariate(np.random.normal(size=nobs))
>>> dens.fit()
>>> plt.plot(dens.cdf)
>>> plt.show()
"""
def __init__(self, endog):
self.endog = np.asarray(endog)
def fit(self, kernel="gau", bw="normal_reference", fft=True, weights=None,
gridsize=None, adjust=1, cut=3, clip=(-np.inf, np.inf)):
"""
Attach the density estimate to the KDEUnivariate class.
Parameters
----------
kernel : str
The Kernel to be used. Choices are:
- "biw" for biweight
- "cos" for cosine
- "epa" for Epanechnikov
- "gau" for Gaussian.
- "tri" for triangular
- "triw" for triweight
- "uni" for uniform
bw : str, float
The bandwidth to use. Choices are:
- "scott" - 1.059 * A * nobs ** (-1/5.), where A is
`min(std(X),IQR/1.34)`
- "silverman" - .9 * A * nobs ** (-1/5.), where A is
`min(std(X),IQR/1.34)`
- "normal_reference" - C * A * nobs ** (-1/5.), where C is
calculated from the kernel. Equivalent (up to 2 dp) to the
"scott" bandwidth for gaussian kernels. See bandwidths.py
- If a float is given, it is the bandwidth.
fft : bool
Whether or not to use FFT. FFT implementation is more
computationally efficient. However, only the Gaussian kernel
is implemented. If FFT is False, then a 'nobs' x 'gridsize'
intermediate array is created.
gridsize : int
If gridsize is None, max(len(X), 50) is used.
cut : float
Defines the length of the grid past the lowest and highest values
of X so that the kernel goes to zero. The end points are
-/+ cut*bw*{min(X) or max(X)}
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
"""
try:
bw = float(bw)
self.bw_method = "user-given"
except:
self.bw_method = bw
endog = self.endog
if fft:
if kernel != "gau":
msg = "Only gaussian kernel is available for fft"
raise NotImplementedError(msg)
if weights is not None:
msg = "Weights are not implemented for fft"
raise NotImplementedError(msg)
density, grid, bw = kdensityfft(endog, kernel=kernel, bw=bw,
adjust=adjust, weights=weights, gridsize=gridsize,
clip=clip, cut=cut)
else:
density, grid, bw = kdensity(endog, kernel=kernel, bw=bw,
adjust=adjust, weights=weights, gridsize=gridsize,
clip=clip, cut=cut)
self.density = density
self.support = grid
self.bw = bw
self.kernel = kernel_switch[kernel](h=bw) # we instantiate twice,
# should this passed to funcs?
# put here to ensure empty cache after re-fit with new options
self.kernel.weights = weights
if weights is not None:
self.kernel.weights /= weights.sum()
self._cache = resettable_cache()
@cache_readonly
def cdf(self):
"""
Returns the cumulative distribution function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
density = self.density
kern = self.kernel
if kern.domain is None: # TODO: test for grid point at domain bound
a,b = -np.inf,np.inf
else:
a,b = kern.domain
func = lambda x,s: kern.density(s,x)
support = self.support
support = np.r_[a,support]
gridsize = len(support)
endog = self.endog
probs = [integrate.quad(func, support[i-1], support[i],
args=endog)[0] for i in range(1,gridsize)]
return np.cumsum(probs)
@cache_readonly
def cumhazard(self):
"""
Returns the hazard function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
return -np.log(self.sf)
@cache_readonly
def sf(self):
"""
Returns the survival function evaluated at the support.
Notes
-----
Will not work if fit has not been called.
"""
_checkisfit(self)
return 1 - self.cdf
@cache_readonly
def entropy(self):
"""
Returns the differential entropy evaluated at the support
Notes
-----
Will not work if fit has not been called. 1e-12 is added to each
probability to ensure that log(0) is not called.
"""
_checkisfit(self)
def entr(x,s):
pdf = kern.density(s,x)
return pdf*np.log(pdf+1e-12)
pdf = self.density
kern = self.kernel
if kern.domain is not None:
a,b = self.domain
else:
a,b = -np.inf,np.inf
endog = self.endog
#TODO: below could run into integr problems, cf. stats.dist._entropy
return -integrate.quad(entr, a,b, args=(endog,))[0]
@cache_readonly
def icdf(self):
"""
Inverse Cumulative Distribution (Quantile) Function
Notes
-----
Will not work if fit has not been called. Uses
`scipy.stats.mstats.mquantiles`.
"""
_checkisfit(self)
gridsize = len(self.density)
return stats.mstats.mquantiles(self.endog, np.linspace(0,1,
gridsize))
def evaluate(self, point):
"""
Evaluate density at a single point.
Parameters
----------
point : float
Point at which to evaluate the density.
"""
_checkisfit(self)
return self.kernel.density(self.endog, point)
#### Kernel Density Estimator Functions ####
def kdensity(X, kernel="gau", bw="normal_reference", weights=None, gridsize=None,
adjust=1, clip=(-np.inf,np.inf), cut=3, retgrid=True):
"""
Rosenblatt-Parzen univariate kernel density estimator.
Parameters
----------
X : array-like
The variable for which the density estimate is desired.
kernel : str
The Kernel to be used. Choices are
- "biw" for biweight
- "cos" for cosine
- "epa" for Epanechnikov
- "gau" for Gaussian.
- "tri" for triangular
- "triw" for triweight
- "uni" for uniform
bw : str, float
"scott" - 1.059 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
"silverman" - .9 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
If a float is given, it is the bandwidth.
weights : array or None
Optional weights. If the X value is clipped, then this weight is
also dropped.
gridsize : int
If gridsize is None, max(len(X), 50) is used.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
clip : tuple
Observations in X that are outside of the range given by clip are
dropped. The number of observations in X is then shortened.
cut : float
Defines the length of the grid past the lowest and highest values of X
so that the kernel goes to zero. The end points are
-/+ cut*bw*{min(X) or max(X)}
retgrid : bool
Whether or not to return the grid over which the density is estimated.
Returns
-------
density : array
The densities estimated at the grid points.
grid : array, optional
The grid points at which the density is estimated.
Notes
-----
Creates an intermediate (`gridsize` x `nobs`) array. Use FFT for a more
computationally efficient version.
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:,None]
clip_x = np.logical_and(X>clip[0], X<clip[1])
X = X[clip_x]
nobs = float(len(X)) # after trim
if gridsize == None:
gridsize = max(nobs,50) # don't need to resize if no FFT
# handle weights
if weights is None:
weights = np.ones(nobs)
q = nobs
else:
# ensure weights is a numpy array
weights = np.asarray(weights)
if len(weights) != len(clip_x):
msg = "The length of the weights must be the same as the given X."
raise ValueError(msg)
weights = weights[clip_x.squeeze()]
q = weights.sum()
# Get kernel object corresponding to selection
kern = kernel_switch[kernel]()
# if bw is None, select optimal bandwidth for kernel
try:
bw = float(bw)
except:
bw = bandwidths.select_bandwidth(X, bw, kern)
bw *= adjust
a = np.min(X,axis=0) - cut*bw
b = np.max(X,axis=0) + cut*bw
grid = np.linspace(a, b, gridsize)
k = (X.T - grid[:,None])/bw # uses broadcasting to make a gridsize x nobs
# set kernel bandwidth
kern.seth(bw)
# truncate to domain
if kern.domain is not None: # won't work for piecewise kernels like parzen
z_lo, z_high = kern.domain
domain_mask = (k < z_lo) | (k > z_high)
k = kern(k) # estimate density
k[domain_mask] = 0
else:
k = kern(k) # estimate density
k[k<0] = 0 # get rid of any negative values, do we need this?
dens = np.dot(k,weights)/(q*bw)
if retgrid:
return dens, grid, bw
else:
return dens, bw
def kdensityfft(X, kernel="gau", bw="normal_reference", weights=None, gridsize=None,
adjust=1, clip=(-np.inf,np.inf), cut=3, retgrid=True):
"""
Rosenblatt-Parzen univariate kernel density estimator
Parameters
----------
X : array-like
The variable for which the density estimate is desired.
kernel : str
ONLY GAUSSIAN IS CURRENTLY IMPLEMENTED.
"bi" for biweight
"cos" for cosine
"epa" for Epanechnikov, default
"epa2" for alternative Epanechnikov
"gau" for Gaussian.
"par" for Parzen
"rect" for rectangular
"tri" for triangular
bw : str, float
"scott" - 1.059 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
"silverman" - .9 * A * nobs ** (-1/5.), where A is min(std(X),IQR/1.34)
If a float is given, it is the bandwidth.
weights : array or None
WEIGHTS ARE NOT CURRENTLY IMPLEMENTED.
Optional weights. If the X value is clipped, then this weight is
also dropped.
gridsize : int
If gridsize is None, min(len(X), 512) is used. Note that the provided
number is rounded up to the next highest power of 2.
adjust : float
An adjustment factor for the bw. Bandwidth becomes bw * adjust.
clip : tuple
Observations in X that are outside of the range given by clip are
dropped. The number of observations in X is then shortened.
cut : float
Defines the length of the grid past the lowest and highest values of X
so that the kernel goes to zero. The end points are
-/+ cut*bw*{X.min() or X.max()}
retgrid : bool
Whether or not to return the grid over which the density is estimated.
Returns
-------
density : array
The densities estimated at the grid points.
grid : array, optional
The grid points at which the density is estimated.
Notes
-----
Only the default kernel is implemented. Weights aren't implemented yet.
This follows Silverman (1982) with changes suggested by Jones and Lotwick
(1984). However, the discretization step is replaced by linear binning
of Fan and Marron (1994). This should be extended to accept the parts
that are dependent only on the data to speed things up for
cross-validation.
References
---------- ::
Fan, J. and J.S. Marron. (1994) `Fast implementations of nonparametric
curve estimators`. Journal of Computational and Graphical Statistics.
3.1, 35-56.
Jones, M.C. and H.W. Lotwick. (1984) `Remark AS R50: A Remark on Algorithm
AS 176. Kernal Density Estimation Using the Fast Fourier Transform`.
Journal of the Royal Statistical Society. Series C. 33.1, 120-2.
Silverman, B.W. (1982) `Algorithm AS 176. Kernel density estimation using
the Fast Fourier Transform. Journal of the Royal Statistical Society.
Series C. 31.2, 93-9.
"""
X = np.asarray(X)
X = X[np.logical_and(X>clip[0], X<clip[1])] # won't work for two columns.
# will affect underlying data?
# Get kernel object corresponding to selection
kern = kernel_switch[kernel]()
try:
bw = float(bw)
except:
bw = bandwidths.select_bandwidth(X, bw, kern) # will cross-val fit this pattern?
bw *= adjust
nobs = float(len(X)) # after trim
# 1 Make grid and discretize the data
if gridsize == None:
gridsize = np.max((nobs,512.))
gridsize = 2**np.ceil(np.log2(gridsize)) # round to next power of 2
a = np.min(X)-cut*bw
b = np.max(X)+cut*bw
grid,delta = np.linspace(a,b,gridsize,retstep=True)
RANGE = b-a
#TODO: Fix this?
# This is the Silverman binning function, but I believe it's buggy (SS)
# weighting according to Silverman
# count = counts(X,grid)
# binned = np.zeros_like(grid) #xi_{k} in Silverman
# j = 0
# for k in range(int(gridsize-1)):
# if count[k]>0: # there are points of X in the grid here
# Xingrid = X[j:j+count[k]] # get all these points
# # get weights at grid[k],grid[k+1]
# binned[k] += np.sum(grid[k+1]-Xingrid)
# binned[k+1] += np.sum(Xingrid-grid[k])
# j += count[k]
# binned /= (nobs)*delta**2 # normalize binned to sum to 1/delta
#NOTE: THE ABOVE IS WRONG, JUST TRY WITH LINEAR BINNING
binned = fast_linbin(X,a,b,gridsize)/(delta*nobs)
# step 2 compute FFT of the weights, using Munro (1976) FFT convention
y = forrt(binned)
# step 3 and 4 for optimal bw compute zstar and the density estimate f
# don't have to redo the above if just changing bw, ie., for cross val
#NOTE: silverman_transform is the closed form solution of the FFT of the
#gaussian kernel. Not yet sure how to generalize it.
zstar = silverman_transform(bw, gridsize, RANGE)*y # 3.49 in Silverman
# 3.50 w Gaussian kernel
f = revrt(zstar)
if retgrid:
return f, grid, bw
else:
return f, bw
if __name__ == "__main__":
import numpy as np
np.random.seed(12345)
xi = np.random.randn(100)
f,grid, bw1 = kdensity(xi, kernel="gau", bw=.372735, retgrid=True)
f2, bw2 = kdensityfft(xi, kernel="gau", bw="silverman",retgrid=False)
# do some checking vs. silverman algo.
# you need denes.f, http://lib.stat.cmu.edu/apstat/176
#NOTE: I (SS) made some changes to the Fortran
# and the FFT stuff from Munro http://lib.stat.cmu.edu/apstat/97o
# then compile everything and link to denest with f2py
#Make pyf file as usual, then compile shared object
#f2py denest.f -m denest2 -h denest.pyf
#edit pyf
#-c flag makes it available to other programs, fPIC builds a shared library
#/usr/bin/gfortran -Wall -c -fPIC fft.f
#f2py -c denest.pyf ./fft.o denest.f
try:
from denest2 import denest # @UnresolvedImport
a = -3.4884382032045504
b = 4.3671504686785605
RANGE = b - a
bw = bandwidths.bw_silverman(xi)
ft,smooth,ifault,weights,smooth1 = denest(xi,a,b,bw,np.zeros(512),np.zeros(512),0,
np.zeros(512), np.zeros(512))
# We use a different binning algo, so only accurate up to 3 decimal places
np.testing.assert_almost_equal(f2, smooth, 3)
#NOTE: for debugging
# y2 = forrt(weights)
# RJ = np.arange(512/2+1)
# FAC1 = 2*(np.pi*bw/RANGE)**2
# RJFAC = RJ**2*FAC1
# BC = 1 - RJFAC/(6*(bw/((b-a)/M))**2)
# FAC = np.exp(-RJFAC)/BC
# SMOOTH = np.r_[FAC,FAC[1:-1]] * y2
# dens = revrt(SMOOTH)
except:
# ft = np.loadtxt('./ft_silver.csv')
# smooth = np.loadtxt('./smooth_silver.csv')
print("Didn't get the estimates from the Silverman algorithm")
| bsd-3-clause |
alexsavio/scikit-learn | examples/gaussian_process/plot_gpc_isoprobability.py | 64 | 3049 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=================================================================
Iso-probability lines for Gaussian Processes classification (GPC)
=================================================================
A two-dimensional classification example showing iso-probability lines for
the predicted probabilities.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Adapted to GaussianProcessClassifier:
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import DotProduct, ConstantKernel as C
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = np.array(g(X) > 0, dtype=int)
# Instanciate and fit Gaussian Process Model
kernel = C(0.1, (1e-5, np.inf)) * DotProduct(sigma_0=0.1) ** 2
gp = GaussianProcessClassifier(kernel=kernel)
gp.fit(X, y)
print("Learned kernel: %s " % gp.kernel_)
# Evaluate real function and the predicted probability
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_prob = gp.predict_proba(xx)[:, 1]
y_true = y_true.reshape((res, res))
y_prob = y_prob.reshape((res, res))
# Plot the probabilistic classification iso-values
fig = plt.figure(1)
ax = fig.gca()
ax.axes.set_aspect('equal')
plt.xticks([])
plt.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
cax = plt.imshow(y_prob, cmap=cm.gray_r, alpha=0.8,
extent=(-lim, lim, -lim, lim))
norm = plt.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = plt.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
plt.clim(0, 1)
plt.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
plt.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = plt.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = plt.contour(x1, x2, y_prob, [0.666], colors='b',
linestyles='solid')
plt.clabel(cs, fontsize=11)
cs = plt.contour(x1, x2, y_prob, [0.5], colors='k',
linestyles='dashed')
plt.clabel(cs, fontsize=11)
cs = plt.contour(x1, x2, y_prob, [0.334], colors='r',
linestyles='solid')
plt.clabel(cs, fontsize=11)
plt.show()
| bsd-3-clause |
akshaykr/oracle_cb | semibandits/plotting_script.py | 1 | 5733 | import pickle
import matplotlib.pyplot as plt
import matplotlib.patches
import matplotlib as mpl
import numpy as np
import sys, argparse
sys.path.append("../")
import Plotting
Names = {
'mini_gb2': 'VC-GB2',
'mini_gb5': 'VC-GB5',
'mini_lin': 'VC-Lin',
'epsall_gb2': '$\epsilon$-GB2',
'epsall_gb5': '$\epsilon$-GB5',
'epsall_lin': '$\epsilon$-Lin',
'lin': 'LinUCB'
}
Styles = {
'mini_gb2': ['k', 'solid'],
'mini_gb5': ['r', 'solid'],
'mini_lin': ['g', 'solid'],
'epsall_gb2': ['k', 'dashed'],
'epsall_gb5': ['r', 'dashed'],
'epsall_lin': ['g', 'dashed'],
'lin': ['b', 'solid']
}
parser = argparse.ArgumentParser()
parser.add_argument('--save', dest='save', action='store_true')
Args = parser.parse_args(sys.argv[1:])
D1 = Plotting.read_dir("../results/mslr30k_T=36000_L=3_e=0.1/")
D2 = Plotting.read_dir("../results/yahoo_T=40000_L=2_e=0.5/")
print(mpl.rcParams['figure.figsize'])
fig = plt.figure(figsize=(mpl.rcParams['figure.figsize'][0]*2, mpl.rcParams['figure.figsize'][1]-1))
ax = fig.add_subplot(111,frameon=False)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
std = True
legendHandles = []
keys = ['epsall_lin', 'mini_lin', 'epsall_gb2', 'mini_gb2', 'epsall_gb5', 'mini_gb5', 'lin']
for k in keys:
params = []
mus = []
stds = []
for (k1,v1) in D1[0].items():
if k1.find(k) == 0 and len(D1[0][k1]) != 0:
x = np.arange(100, 10*len(D1[0][k1][0])+1, 100)
mus.append(np.mean(D1[0][k1],axis=0)[9::10]/x)
stds.append(2/np.sqrt(len(D1[0][k1]))*(np.std(D1[0][k1],axis=0)[9::10]/x))
params.append(k1.split("_")[-1])
if len(mus) == 0:
continue
A = np.vstack(mus)
B = np.vstack(stds)
ids = np.argmax(A, axis=0)
mu = np.array([A[ids[i], i] for i in range(len(ids))])
stdev = np.array([B[ids[i], i] for i in range(len(ids))])
if k == 'mini_gb5':
mu = np.mean(D1[0]['mini_gb5_0.008'], axis=0)[9::10]/x
stdev = 2/np.sqrt(len(D1[0]['mini_gb5_0.008']))*(np.std(D1[0]['mini_gb5_0.008'], axis=0)[9::10]/x)
l1 = ax1.plot(x,mu,rasterized=True, linewidth=2.0, label=Names[k], color=Styles[k][0], linestyle=Styles[k][1])
legendHandles.append((matplotlib.patches.Patch(color=l1[0].get_color(), label=Names[k]), Names[k]))
if std and k=='mini_gb5' or k=='lin':
ax1.fill_between(x,
mu - stdev,
mu + stdev,
color = l1[0].get_color(), alpha=0.2, rasterized = True)
for k in keys:
params = []
mus = []
stds = []
for (k1,v1) in D2[0].items():
if k1.find(k) == 0 and len(D2[0][k1]) != 0:
x = np.arange(100, 10*len(D2[0][k1][0])+1, 100)
mus.append(np.mean(D2[0][k1],axis=0)[9::10]/x)
stds.append(2/np.sqrt(len(D2[0][k1]))*(np.std(D2[0][k1],axis=0)[9::10]/x))
params.append(k1.split("_")[-1])
if len(mus) == 0:
continue
A = np.vstack(mus)
B = np.vstack(stds)
ids = np.argmax(A, axis=0)
mu = np.array([A[ids[i], i] for i in range(len(ids))])
stdev = np.array([B[ids[i], i] for i in range(len(ids))])
if k == 'mini_gb5':
mu = np.mean(D2[0]['mini_gb5_0.008'], axis=0)[9::10]/x
stdev = 2/np.sqrt(len(D2[0]['mini_gb5_0.008']))*(np.std(D2[0]['mini_gb5_0.008'], axis=0)[9::10]/x)
l1 = ax2.plot(x,mu,rasterized=True, linewidth=2.0, label=Names[k], color=Styles[k][0], linestyle=Styles[k][1])
if std and k=='mini_gb5' or k=='lin':
ax2.fill_between(x,
mu - stdev,
mu + stdev,
color = l1[0].get_color(), alpha=0.2, rasterized = True)
plt.rc('font', size=18)
plt.rcParams['text.usetex'] = True
plt.rc('font', family='sans-serif')
## Ax1 is MSLR
ticks=ax1.get_yticks()
print(ticks)
ax1.set_ylim(2.15, 2.35)
print("Setting ylim to %0.2f, %0.2f" % (ticks[3], ticks[len(ticks)-2]))
ticks = ax1.get_yticks()
print(ticks)
ticks = ["", "", "2.2", "", "2.3", ""]
ax1.set_yticklabels(ticks,size=20)
ticks = ['', '', '10000', '', '20000', '', '30000']
ax1.set_xlim(1000, 31000)
ax1.set_xticklabels(ticks,size=20)
# Ax2 is Yahoo!
ticks=ax2.get_yticks()
print(ticks)
ax2.set_ylim(2.90,3.12)
print("Setting ylim to %0.2f, %0.2f" % (ticks[3], 3.15))
ticks=ax2.get_yticks()
print(ticks)
ticks = ["", "2.9", "", "3.0", "", "3.1"]
ax2.set_yticklabels(ticks,size=20)
ticks = ['', '', '10000', '', '20000', '', '30000']
ax2.set_xlim(1000, 32000)
ax2.set_xticklabels(ticks,size=20)
plt.sca(ax)
plt.ylabel('Average reward')
plt.xlabel('Number of interactions (T)')
leg = ax2.legend([x[1] for x in legendHandles], loc='upper center', bbox_to_anchor=(-0.1, -0.15), fancybox=False, shadow=False, ncol=7, frameon=False,fontsize=18)
for legobj in leg.legendHandles:
legobj.set_linewidth(4.0)
plt.sca(ax1)
tt1 = plt.title('Dataset: MSLR',fontsize=18)
tt1.set_position([0.5, 1.02])
plt.sca(ax2)
tt2 = plt.title('Dataset: Yahoo!',fontsize=18)
tt2.set_position([0.5, 1.02])
plt.gcf().subplots_adjust(bottom=0.25)
if Args.save:
plt.savefig("../figs/plots_grouped.png", format='png', dpi=100, bbox_inches='tight')
plt.savefig("../figs/plots_grouped.pdf", format='pdf', dpi=100, bbox_inches='tight')
else:
plt.show()
## (DONE) No band
## (DONE) markers + update legend
## (DONE) No legend frame
## (DONE) font is too big
## space between title and plot
## space between ylabel and yticks
## Get P-values (paired ttest and regular ttest)
| mit |
LamaHamadeh/Microsoft-DAT210x | Module-4/assignment2.py | 1 | 3877 | '''
author Lama Hamadeh
'''
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import assignment2_helper as helper
# Look pretty...
matplotlib.style.use('ggplot')
# Do * NOT * alter this line, until instructed!
scaleFeatures = True #Features scaling (if it's false no scaling appears and that affects the 2D plot and the variance values)
# TODO: Load up the dataset and remove any and all
# Rows that have a nan. You should be a pro at this
# by now ;-)
#
# .. your code here ..
df=pd.read_csv('/Users/ADB3HAMADL/Desktop/Anaconda_Packages/DAT210x-master/Module4/Datasets/kidney_disease.csv',index_col = 0)
df = df.reset_index(drop=True) #remove the index column
df=df.dropna(axis=0) #remove any and all Rows that have a nan
#print(df)
# Create some color coded labels; the actual label feature
# will be removed prior to executing PCA, since it's unsupervised.
# You're only labeling by color so you can see the effects of PCA
labels = ['red' if i=='ckd' else 'green' for i in df.classification]
# TODO: Use an indexer to select only the following columns:
# ['bgr','wc','rc']
#
# .. your code here ..
df=df[['bgr', 'rc','wc']] #select only the following columns: bgr, rc, and wc
# TODO: Print out and check your dataframe's dtypes. You'll probably
# want to call 'exit()' after you print it out so you can stop the
# program's execution.
#
# You can either take a look at the dataset webpage in the attribute info
# section: https://archive.ics.uci.edu/ml/datasets/Chronic_Kidney_Disease
# or you can actually peek through the dataframe by printing a few rows.
# What kind of data type should these three columns be? If Pandas didn't
# properly detect and convert them to that data type for you, then use
# an appropriate command to coerce these features into the right type.
#
# .. your code here ..
print(df.dtypes) #
df.rc = pd.to_numeric(df.rc, errors='coerce') #
df.wc = pd.to_numeric(df.wc, errors='coerce') #
# TODO: PCA Operates based on variance. The variable with the greatest
# variance will dominate. Go ahead and peek into your data using a
# command that will check the variance of every feature in your dataset.
# Print out the results. Also print out the results of running .describe
# on your dataset.
#
# Hint: If you don't see all three variables: 'bgr','wc' and 'rc', then
# you probably didn't complete the previous step properly.
#
# .. your code here ..
print(df.var()) #
print(df.describe()) #
# TODO: This method assumes your dataframe is called df. If it isn't,
# make the appropriate changes. Don't alter the code in scaleFeatures()
# just yet though!
#
# .. your code adjustment here ..
if scaleFeatures: df = helper.scaleFeatures(df)
# TODO: Run PCA on your dataset and reduce it to 2 components
# Ensure your PCA instance is saved in a variable called 'pca',
# and that the results of your transformation are saved in 'T'.
#
# .. your code here ..
from sklearn import decomposition
pca = decomposition.PCA(n_components=2)
pca.fit(df)
decomposition.PCA(copy=True, n_components=2, whiten=False)
T= pca.transform(df)
# Plot the transformed data as a scatter plot. Recall that transforming
# the data will result in a NumPy NDArray. You can either use MatPlotLib
# to graph it directly, or you can convert it to DataFrame and have pandas
# do it for you.
#
# Since we've already demonstrated how to plot directly with MatPlotLib in
# Module4/assignment1.py, this time we'll convert to a Pandas Dataframe.
#
# Since we transformed via PCA, we no longer have column names. We know we
# are in P.C. space, so we'll just define the coordinates accordingly:
ax = helper.drawVectors(T, pca.components_, df.columns.values, plt, scaleFeatures)
T = pd.DataFrame(T)
T.columns = ['component1', 'component2']
T.plot.scatter(x='component1', y='component2', marker='o', c=labels, alpha=0.75, ax=ax)
plt.show()
| mit |
sunyihuan326/DeltaLab | shuwei_fengge/practice_one/MNIST/kaggle_mnist.py | 1 | 7890 | # coding:utf-8
'''
Created on 2017/11/16.
@author: chk01
'''
import pandas as pd
import numpy as np
import os
import tensorflow as tf
from tensorflow.python.framework import ops
import matplotlib.pyplot as plt
import scipy.io as scio
import math
def convert_to_one_hot(Y, C):
Y = np.eye(C)[Y.reshape(-1)].T
return Y
def init_sets(X, Y, file, distribute):
m = X.shape[1]
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation]
assert len(distribute) == 2
assert sum(distribute) == 1
scio.savemat(file + '_train',
{'X': shuffled_X[:, :int(m * distribute[0])], 'Y': shuffled_Y[:, :int(m * distribute[0])]})
scio.savemat(file + '_test',
{'X': shuffled_X[:, int(m * distribute[0]):], 'Y': shuffled_Y[:, int(m * distribute[0]):]})
return True
def load_data(x, y, file):
init_sets(x, y, file, distribute=[0.8, 0.2])
return True
def initialize_parameters_deep(layer_dims):
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = tf.get_variable(dtype=tf.float32, name='W' + str(l),
shape=(layer_dims[l], layer_dims[l - 1]),
initializer=tf.contrib.layers.xavier_initializer())
parameters['b' + str(l)] = tf.get_variable(dtype=tf.float32, name='b' + str(l),
shape=(layer_dims[l], 1),
initializer=tf.contrib.layers.xavier_initializer())
assert (parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l - 1]))
assert (parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
def create_placeholders(n_x, n_y):
X = tf.placeholder(name='X', shape=(n_x, None), dtype=tf.float32)
Y = tf.placeholder(name='Y', shape=(n_y, None), dtype=tf.float32)
return X, Y
def forward_propagation(X, parameters):
A = X
L = len(parameters) // 2
for l in range(1, L):
A_prev = A
W = parameters['W' + str(l)]
b = parameters['b' + str(l)]
A = tf.nn.relu(tf.add(tf.matmul(W, A_prev), b))
# A = tf.nn.dropout(A, 0.9)
# 94
ZL = tf.add(tf.matmul(parameters['W' + str(L)], A), parameters['b' + str(L)])
return ZL
def compute_cost(ZL, Y):
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=tf.transpose(ZL), labels=tf.transpose(Y)))
return cost
def cost_fig(costs, learning_rate):
costs = np.squeeze(costs)
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epochs (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return True
def random_mini_batches(X, Y, mini_batch_size=64):
m = X.shape[1]
mini_batches = []
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation]
num_complete_minibatches = math.floor(m / mini_batch_size)
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[:, k * mini_batch_size: k * mini_batch_size + mini_batch_size]
mini_batch_Y = shuffled_Y[:, k * mini_batch_size: k * mini_batch_size + mini_batch_size]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size: m]
mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size: m]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def data_check(data):
res = list(np.argmax(data.T, 1))
num = len(res)
classes = data.shape[0]
for i in range(classes):
print(str(i) + '的比例', round(100.0 * res.count(i) / num, 2), '%')
print('<------------------分割线---------------------->')
def model(X_train, Y_train, X_test, Y_test, layer_dims, epochs=2000, minibatch_size=64, learning_rate=0.5,
print_cost=True):
ops.reset_default_graph()
n_x, m = X_train.shape
n_y = Y_train.shape[0]
costs = []
X, Y = create_placeholders(n_x, n_y)
parameters = initialize_parameters_deep(layer_dims)
# layer_dims = [5, 4, 3]
ZL = forward_propagation(X, parameters)
cost = compute_cost(ZL, Y)
# cost = compute_cost(Z1, Y) + tf.contrib.layers.l1_regularizer(.2)(parameters['W1'])
# optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.99).minimize(cost)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(cost)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(epochs):
minibatch_cost = 0.
num_minibatches = int(m / minibatch_size)
minibatches = random_mini_batches(X_train, Y_train, minibatch_size)
for minibatch in minibatches:
(minibatch_X, minibatch_Y) = minibatch
zl, par, _, temp_cost = sess.run([ZL, parameters, optimizer, cost],
feed_dict={X: minibatch_X, Y: minibatch_Y})
minibatch_cost += temp_cost / num_minibatches
if print_cost and epoch % 5 == 0:
print("Cost after epoch %i: %f" % (epoch, temp_cost))
if print_cost and epoch % 1 == 0:
costs.append(minibatch_cost)
cost_fig(costs, learning_rate)
predict_op = tf.argmax(tf.transpose(ZL), 1)
correct_prediction = tf.equal(predict_op, tf.argmax(tf.transpose(Y), 1))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
train_accuracy = accuracy.eval({X: X_train, Y: Y_train})
test_accuracy = accuracy.eval({X: X_test, Y: Y_test})
print("Train Accuracy:", train_accuracy)
print("Test Accuracy:", test_accuracy)
return par
def predict(X, parameters):
L = len(parameters) // 2
params = {}
for l in range(1, L):
params['W' + str(l)] = tf.convert_to_tensor(parameters['W' + str(l)])
params['b' + str(l)] = tf.convert_to_tensor(parameters['b' + str(l)])
x = tf.placeholder("float", [784, None])
z3 = forward_propagation(x, params)
p = tf.argmax(tf.transpose(z3), 1)
sess = tf.Session()
prediction = sess.run(p, feed_dict={x: X})
result = pd.read_csv('C:/Users/chk01/Desktop/sample_submission.csv')
result['Label'] = prediction
result.to_csv('result.csv')
return prediction
if __name__ == '__main__':
file = 'kaggle_mnist'
# parameters = scio.loadmat(file + '_parameter')
# result = pd.read_csv("C:/Users/chk01/Desktop/test.csv")
# result_X = result.values.T[0:]
# print(result_X.shape)
# predict(result_X, parameters)
# assert 1 == 0
train = pd.read_csv("C:/Users/chk01/Desktop/train.csv")
train_X = train.values.T[1:]
train_Y = train['label'].values.reshape(1, -1)
train_Y = convert_to_one_hot(train_Y, 10)
load_data(train_X, train_Y, 'kaggle_mnist')
data_train = scio.loadmat(file + '_train')
X_train = data_train['X']
Y_train = data_train['Y']
# print(X_train.shape)
# (784, 16000)
# print(Y_train.shape)
data_test = scio.loadmat(file + '_test')
X_test = data_test['X']
Y_test = data_test['Y']
layer_dims = [784, 10]
data_check(Y_test)
data_check(Y_train)
parameters = model(X_train, Y_train, X_test, Y_test, layer_dims, epochs=200, learning_rate=0.001)
scio.savemat(file + '_parameter', parameters)
| mit |
adpozuelo/Master | RC/PEC2/ws.py | 1 | 2665 | ## RC - UOC - URV - PEC2
## [email protected]
## Watts-Strogatz (WS)
## run with 'python3 ws.py'
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import scipy as sc
import random
import math
random.seed(1)
def create_network(n, p, k):
G = nx.Graph()
kd2 = int(k / 2) + 1
for ni in range(1, n + 1):
G.add_node(ni)
for nj in range(ni + 1, ni + kd2):
if nj > n:
nj %= n
G.add_edge(ni, nj)
for ni in range(1, n + 1):
for nj in range(ni + 1, ni + kd2):
if random.uniform(0, 1) < p:
if nj > n:
nj %= n
G.remove_edge(ni, nj)
rn = random.randint(1, n)
while G.has_edge(ni, rn) or rn == ni:
rn = random.randint(1, n)
G.add_edge(ni, rn)
if n >= 1000:
nx.draw_networkx(G, node_size=4, with_labels=False)
else:
nx.draw_networkx(G, nx.circular_layout(G), node_size=4, with_labels=False)
plt.title('n = ' + str(n) + ', p = ' + str(p) + ', k = ' + str(k))
filename = 'ws_n' + str(n) + '_p' + str(p) + '_k' + str(k) + '_net.png'
plt.savefig(filename)
#plt.show()
plt.clf()
histo = nx.degree_histogram(G)
total = sum(histo)
norm_histo = np.divide(histo, total)
length = len(norm_histo)
kn = np.arange(length)
plt.plot(kn, norm_histo, 'r-', label = 'empirical')
kd2 -= 1
diracdelta = np.empty(length)
# https://en.wikipedia.org/wiki/Watts%E2%80%93Strogatz_model
for ki in range(0, length):
if ki >= kd2:
sumatory = np.empty(min(ki - kd2, kd2) + 1)
for ndi in range(0, len(sumatory)):
sumatory[ndi] = (math.factorial(kd2) / (math.factorial(ndi) * math.factorial( kd2 - ndi))) * ((1 - p) ** ndi) * (p ** (kd2 - ndi)) * ((p * kd2) ** (ki - kd2 - ndi)) * math.exp(-p * kd2) / math.factorial(ki - kd2 - ndi)
diracdelta[ki] = sum(sumatory)
else:
diracdelta[ki] = 0.0
plt.plot(kn, diracdelta, 'b-', label = 'dirac delta')
plt.title('n = ' + str(n) + ', p = ' + str(p) + ', k = ' + str(k))
plt.xlabel('Grado k')
plt.ylabel('Fracción de nodos')
plt.legend(loc = 1)
filename = 'ws_n' + str(n) + '_p' + str(p) + '_k' + str(k) + '_dg.png'
plt.savefig(filename)
#plt.show()
plt.clf()
return
n = [50, 100, 1000, 10000]
p = [0.0, 0.1, 0.2, 0.5, 0.9, 1.0]
k = [4, 8, 16, 24]
for ni in n:
for pi in p:
for ki in k:
create_network(ni, pi, ki)
| gpl-3.0 |
kernc/scikit-learn | sklearn/kernel_ridge.py | 31 | 6552 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
eWaterCycle/ewatercycle | tests/test_analysis.py | 1 | 1062 | from ewatercycle.analysis import hydrograph
from matplotlib.testing.decorators import image_comparison
import pandas as pd
import numpy as np
@image_comparison(
baseline_images=['hydrograph'],
remove_text=True,
extensions=['png'],
savefig_kwarg={'bbox_inches':'tight'},
)
def test_hydrograph():
ntime = 300
dti = pd.date_range("2018-01-01", periods=ntime, freq="d")
np.random.seed(20210416)
discharge = {
'discharge_a': pd.Series(np.linspace(0, 2, ntime), index=dti),
'discharge_b': pd.Series(3*np.random.random(ntime)**2, index=dti),
'discharge_c': pd.Series(2*np.random.random(ntime)**2, index=dti),
'reference': pd.Series(np.random.random(ntime)**2, index=dti),
}
df = pd.DataFrame(discharge)
precipitation = {
'precipitation_a': pd.Series(np.random.random(ntime)/20, index=dti),
'precipitation_b': pd.Series(np.random.random(ntime)/30, index=dti),
}
df_pr = pd.DataFrame(precipitation)
hydrograph(df, reference='reference', precipitation=df_pr)
| apache-2.0 |
eglxiang/Med | tempSlabCheck.py | 1 | 16337 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 3 18:40:31 2016
@author: apezeshk
"""
import numpy as np
from matplotlib import pyplot as plt
import os
import h5py
import json
import SupportFuncs
import theano.tensor as T
from lasagne.layers import dnn
import lasagne
import theano
import glob
############################################
############################################
def Build_3dcnn(init_norm=lasagne.init.Normal(), inputParamsNetwork=dict(n_layer=2,shape=[10,10,10],dropout=0.1, nonLinearity=lasagne.nonlinearities.rectify,
biasInit=lasagne.init.Constant(0.0)), input_var=None):
# As a third model, we'll create a CNN of two convolution + pooling stages
# and a fully-connected hidden layer in front of the output layer.
# Input layer, as usual:
dropout = inputParamsNetwork['dropout']
# print(dropout)
network = lasagne.layers.InputLayer(shape=(None,1,int(inputParamsNetwork['shape'].split(',')[0]),int(inputParamsNetwork['shape'].split(',')[1]),int(inputParamsNetwork['shape'].split(',')[2])),
input_var=input_var)
# This time we do not apply input dropout, as it tends to work less well
# for convolutional layers.
# Convolutional layer with 32 kernels of size 5x5. Strided and padded
# convolutions are supported as well; see the docstring.
# network=lasagne.layers.dnn.Conv3DDNNLayer(network,num_filters=32,filter_size=(3,3,4),
# stride=(1, 1, 1),pad=1,
# nonlinearity=lasagne.nonlinearities.rectify,
# W=lasagne.init.GlorotUniform()
# )
# network=lasagne.layers.dnn.MaxPool3DDNNLayer(network, pool_size=(2,2,2),stride=(2,2,2))
if inputParamsNetwork['n_layer'] == 2 :
network = lasagne.layers.dnn.Conv3DDNNLayer(network, num_filters=32, pad='same', filter_size=(5, 5, 3),
stride=(1, 1, 1),
nonlinearity=inputParamsNetwork['nonLinearity'],
W=init_norm,
b=inputParamsNetwork['biasInit'],
flip_filters=False
)
network = lasagne.layers.dnn.MaxPool3DDNNLayer(network, pool_size=(2, 2, 1))
network = lasagne.layers.dnn.Conv3DDNNLayer(network, num_filters=32, pad='same', filter_size=(5, 5, 3),
stride=(1, 1, 1),
nonlinearity=inputParamsNetwork['nonLinearity'],
W=init_norm,
b=inputParamsNetwork['biasInit'],
)
network = lasagne.layers.dnn.MaxPool3DDNNLayer(network, pool_size=(2, 2, 2))
else:
network = lasagne.layers.dnn.Conv3DDNNLayer(network, num_filters=32, pad='same', filter_size=(5, 5, 3),
stride=(1, 1, 1),
nonlinearity=inputParamsNetwork['nonLinearity'],
W=init_norm,
b=inputParamsNetwork['biasInit'],
flip_filters=False
)
network = lasagne.layers.dnn.MaxPool3DDNNLayer(network, pool_size=(2, 2, 1))
network = lasagne.layers.dnn.Conv3DDNNLayer(network, num_filters=32, pad='same', filter_size=(5, 5, 3),
stride=(1, 1, 1),
nonlinearity=inputParamsNetwork['nonLinearity'],
W=init_norm,
b=inputParamsNetwork['biasInit'],
)
network = lasagne.layers.dnn.MaxPool3DDNNLayer(network, pool_size=(2, 2, 2))
network = lasagne.layers.dnn.Conv3DDNNLayer(network, num_filters=32, pad='same', filter_size=(3, 3, 1),
stride=(1, 1, 1),
nonlinearity=inputParamsNetwork['nonLinearity'],
W=init_norm,
b=inputParamsNetwork['biasInit'],
)
# network = lasagne.layers.dnn.MaxPool3DDNNLayer(network, pool_size=(2, 2, 1))
# network=lasagne.layers.PadLayer(network,width=[(0,1),(0,1)], batch_ndim=3)
# Another convolution with 32 5x5 kernels, and another 2x2 pooling:
# network = lasagne.layers.dnn.MaxPool3DDNNLayer(network, pool_size=(2, 2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=dropout),
num_units=64,
nonlinearity=lasagne.nonlinearities.sigmoid)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
network = lasagne.layers.DenseLayer(
lasagne.layers.dropout(network, p=dropout),
num_units=2,
nonlinearity=lasagne.nonlinearities.softmax)
# network=lasagne.layers.DenseLayer(network, num_units=2, nonlinearity=None)
return network
############################################
############################################
############################################
if 0:
asd = np.load('/diskStation/LIDC/28288/pos_28288/p0045_20000101_s3000547_3.npy').astype('float32')
#asd = np.load('/diskStation/LIDC/28288/neg_smp_0_28288/p0045_20000101_s3000547_1.npy')
asd = (asd - asd.min())/(asd.max() - asd.min())
#plt.ion()
for i in range(0,8):
plt.figure(i) # create a new figure
plt.imshow(asd[:,:,i], cmap = 'gray')
# plt.draw() # show the figure, non-blocking
#
# _ = raw_input("Press [enter] to continue.") # wait for input from the user
# plt.close(i) # close the figure to show the next one.
for i in range(0,test_pred_full_volume_softmax0.shape[2]):
plt.figure(i) # create a new figure
plt.subplot(121)
plt.imshow(test_pred_full_volume_softmax0[:,:,i], cmap = 'gray')
noduleMaskResizeBin = noduleMaskResize>0.5
noduleMaskResizeBin = noduleMaskResizeBin.astype('int')
plt.subplot(122)
plt.imshow(noduleMaskResizeBin[:,:,i], cmap = 'gray')
else:
#loads a presaved network; runs + or - examples in test set, or those from a particular patient against model
########################
######Input Params######
minmaxFilePath = '/home/apezeshk/Codes/DeepMed/max_min.json' #file containing the min/max of each case; used for normalizing
#minmaxFilePath = '/diskStation/temp/new_changes/max_min.json' #file containing the min/max of each case; used for normalizing
pathSavedNetwork = '/home/apezeshk/Codes/DeepMed/models/cnn_36368_20160914121137.npz'
#pathSavedNetwork = '/diskStation/temp/cnn_36368_20160817161531.npz'
runFlag = 'pos' #'pos' to run only positive patches, 'neg' to only run negative patches
patientMode = 'p0012' #'all' to run for all patients in test set, or only +/- of specific patient (e.g. 'p0012')
testFileHdf5 = '/diskStation/temp/test_500_0.3_28288 .hdf5' #Only used if patientMode=='all'; paths to +/- test samples
masterPatchFolder = '/diskStation/LIDC/' #master folder containing extracted patches; Only used if patientMode!='all'
sardarImplementationFlag = 0
with np.load(pathSavedNetwork) as f:
param_values = [f['arr_%d' % i] for i in range(len(f.files))]
dtensor5 = T.TensorType('float32', (False,) * 5)
input_var = dtensor5('inputs')
target_var = T.ivector('targets')
########################
########################
########################
#####Network Params#####
inputParamsConfigLocal = {}
inputParamsConfigLocal['input_shape'] = '36, 36, 8'
inputParamsConfigLocal['learning_rate'] = '0.05'
inputParamsConfigLocal['momentum'] = '0.9'
inputParamsConfigLocal['num_epochs'] = '50'
inputParamsConfigLocal['batch_size'] = '100'
inputParamsConfigLocal['train_set_size'] = '60000'
inputParamsConfigLocal['test_set_size'] = '500'
inputParamsConfigLocal['positive_set_ratio'] = '0.5'
inputParamsConfigLocal['dropout'] = '0.1'
inputParamsConfigLocal['nonlinearityToUse'] = 'relu'
inputParamsConfigLocal['numberOfLayers'] = 3
inputParamsConfigLocal['augmentationFlag'] = 1
inputParamsConfigLocal['weightInitToUse'] ='He' #weight initialization; either 'normal' or 'He' (for HeNormal)
inputParamsConfigLocal['lrDecayFlag'] = 1 #1 for using learning rate decay, 0 for constant learning rate throughout training
inputParamsConfigLocal['biasInitVal'] = 0.0 #doesn't help; idea was to use bias init 1 when applying relu but it was worse!
#No need to change the following line, it will remove space/comma from input_shape and generate data_path accordingly!
inputParamsConfigLocal['data_path'] = os.path.join('/diskStation/LIDC/',
((inputParamsConfigLocal['input_shape']).replace(' ','')).replace(',',''))
######Input Params######
########################
inputParamsConfigAll = inputParamsConfigLocal
#experiment_id = str(time.strftime("%Y%m%d%H%M%S"))
input_shape = inputParamsConfigAll['input_shape']
learning_rate = inputParamsConfigAll['learning_rate']
momentum = inputParamsConfigAll['momentum']
num_epochs = inputParamsConfigAll['num_epochs']
batch_size = inputParamsConfigAll['batch_size']
data_path = inputParamsConfigAll['data_path']
train_set_size = inputParamsConfigAll['train_set_size']
test_set_size = inputParamsConfigAll['test_set_size']
positive_set_ratio = inputParamsConfigAll['positive_set_ratio']
dropout = inputParamsConfigAll['dropout']
nonlinearityToUse = inputParamsConfigAll['nonlinearityToUse']
numberOfLayers = inputParamsConfigAll['numberOfLayers']
augmentationFlag = inputParamsConfigAll['augmentationFlag']
weightInitToUse = inputParamsConfigAll['weightInitToUse']
lrDecayFlag = inputParamsConfigAll['lrDecayFlag']
biasInitVal = inputParamsConfigAll['biasInitVal']
if nonlinearityToUse == 'relu':
nonLinearity = lasagne.nonlinearities.rectify
elif nonlinearityToUse == 'tanh':
nonLinearity = lasagne.nonlinearities.tanh
elif nonlinearityToUse == 'sigmoid':
nonLinearity = lasagne.nonlinearities.sigmoid
else:
raise Exception(
'nonlinearityToUse: Unsupported nonlinearity type has been selected for the network, retry with a supported one!')
biasInit = lasagne.init.Constant(biasInitVal) #for relu use biasInit=1 s.t. inputs to relu are positive in beginning
if weightInitToUse == 'normal': #according to documentation, different gains should be used depending on nonlinearity
weight_init = lasagne.init.Normal()
elif weightInitToUse == 'He':
if nonlinearityToUse=='relu':
gainToUse = np.sqrt(2)
else:
gainToUse = 1
weight_init = lasagne.init.HeNormal(gain=gainToUse)
else:
raise Exception(
'weightInitToUse: Unsupported weight initialization type has been selected, retry with a supported one!')
#inputParamsNetwork = dict(shape=input_shape,dropout=float(dropout), nonLinearity=nonLinearity)
imSize = input_shape.rsplit(',')
imSize = [int(i) for i in imSize] #strip the commas and convert to list of int
inputParamsNetwork = dict(n_layer=numberOfLayers, shape=input_shape,dropout=float(dropout), nonLinearity=nonLinearity,
biasInit = biasInit)
####End Input Params####
########################
#load the saved network parameters into network_cnn
########################
network_cnn = Build_3dcnn(weight_init, inputParamsNetwork, input_var)
lasagne.layers.set_all_param_values(network_cnn, param_values)
test_prediction = lasagne.layers.get_output(network_cnn, deterministic=True)
val_fn = theano.function([input_var], [test_prediction]) # ,mode='DebugMode')
########################
########################
########################
#set up the input/labels to be pushed into saved network
#First get the list of test cases
#patient_id=[]
#with h5py.File(os.path.join('/diskStation/temp/28288param/test_500_0.5_28288 .hdf5'), 'r') as hf:
if patientMode=='all':
with h5py.File(os.path.join(testFileHdf5), 'r') as hf:
print('List of arrays in this file: \n', hf.keys())
tmp_test_paths = hf.get('Test_set') # Reading list of patients and test file paths
pos_test_paths = np.array(tmp_test_paths) #full paths to all positive test patches
tmp_test_paths = hf.get('neg_test_set')
neg_test_paths = np.array(tmp_test_paths) #full paths to all negative test patches
# patient_in_test = hf.get('Patient_lables')
# for items in patient_in_test:
# patient_id.append(items)
else:
tmp_path = (input_shape.replace(',','')).replace(' ','')
pos_patch_folder = os.path.join(masterPatchFolder, tmp_path, ('pos_' + tmp_path))
neg_patch_folder = os.path.join(masterPatchFolder, tmp_path, ('neg_smp_0_' + tmp_path))
pos_test_paths = np.array(glob.glob(os.path.join(pos_patch_folder, (patientMode + '*'))))#find all patient* files
neg_test_paths = np.array(glob.glob(os.path.join(neg_patch_folder, (patientMode + '*'))))
if runFlag=='pos':
num_test = pos_test_paths.shape[0]
test_data = np.zeros((num_test,1,imSize[0], imSize[1], imSize[2]),dtype='float32') #array in proper format for input to network
test_labels = np.ones((num_test,),dtype = 'float32') #labels for the data array
test_paths = pos_test_paths
elif runFlag=='neg':
num_test = neg_test_paths.shape[0]
test_data = np.zeros((num_test,1,imSize[0], imSize[1], imSize[2]),dtype='float32') #array in proper format for input to network
test_labels = np.zeros((num_test,),dtype = 'float32') #labels for the data array
test_paths = neg_test_paths
with open(minmaxFilePath) as json_data:
minmaxAll = json.load(json_data)
for i in range(0,num_test):
currentPath = test_paths[i]
currentCase = os.path.basename(currentPath)
currentCase = currentCase.split('_')[0:3]
currentCase = "_".join(currentCase)
currentMinMax = minmaxAll[currentCase].split('/')
currentMax = np.float32(currentMinMax[0])
currentMin = np.float32(currentMinMax[1])
if sardarImplementationFlag==0:
currentPatch = (np.load(currentPath)).astype('int32') #converting uint16 to int32/float32 directly is problematic
currentPatch = currentPatch.astype('float32')
currentPatch = (currentPatch - currentMin)/(currentMax - currentMin)
else:
currentPatch = (np.load(currentPath)).astype('int16') #should convert uint16 to int16 then float32 to avoid typecasting problem
currentPatch = currentPatch.astype('float32')
currentPatch = (currentPatch - currentMax)/(currentMax - currentMin)
test_data[i,0,:,:,:] = currentPatch
test_pred = val_fn(test_data)
test_pred = test_pred[0]
test_acc = np.mean(np.equal(np.argmax(test_pred, axis=1), test_labels),
dtype=float)
| bsd-2-clause |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/sklearn/cross_decomposition/cca_.py | 209 | 3150 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| gpl-2.0 |
rahul-c1/scikit-learn | sklearn/covariance/graph_lasso_.py | 17 | 23130 | """GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import _check_cv as check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
max_iter=100, verbose=False, return_costs=False,
eps=np.finfo(np.float).eps, return_n_iter=False):
"""l1-penalized covariance estimator
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars')
coefs = coefs[:, -1]
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Parameters
----------
alpha : positive float, optional
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
plotted at each iteration.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, max_iter=100,
verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = np.asarray(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
max_iter=100, mode='cd', n_jobs=1, verbose=False,
assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = np.asarray(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
max_iter=self.max_iter, verbose=inner_verbose,
return_n_iter=True)
return self
| bsd-3-clause |
ky822/scikit-learn | sklearn/linear_model/ransac.py | 191 | 14261 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <RansacRegression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = residual_metric(diff)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
lewislone/mStocks | gadget/sdhub/tushare/util/dateu.py | 3 | 2480 | # -*- coding:utf-8 -*-
import datetime
import time
import pandas as pd
def year_qua(date):
mon = date[5:7]
mon = int(mon)
return[date[0:4], _quar(mon)]
def _quar(mon):
if mon in [1, 2, 3]:
return '1'
elif mon in [4, 5, 6]:
return '2'
elif mon in [7, 8, 9]:
return '3'
elif mon in [10, 11, 12]:
return '4'
else:
return None
def today():
day = datetime.datetime.today().date()
return str(day)
def get_year():
year = datetime.datetime.today().year
return year
def get_month():
month = datetime.datetime.today().month
return month
def get_hour():
return datetime.datetime.today().hour
def today_last_year():
lasty = datetime.datetime.today().date() + datetime.timedelta(-365)
return str(lasty)
def day_last_week(days=-7):
lasty = datetime.datetime.today().date() + datetime.timedelta(days)
return str(lasty)
def get_now():
return time.strftime('%Y-%m-%d %H:%M:%S')
def diff_day(start=None, end=None):
d1 = datetime.datetime.strptime(end, '%Y-%m-%d')
d2 = datetime.datetime.strptime(start, '%Y-%m-%d')
delta = d1 - d2
return delta.days
def get_quarts(start, end):
idx = pd.period_range('Q'.join(year_qua(start)), 'Q'.join(year_qua(end)),
freq='Q-JAN')
return [str(d).split('Q') for d in idx][::-1]
holiday = ['2015-01-01', '2015-01-02', '2015-02-18', '2015-02-19', '2015-02-20', '2015-02-23', '2015-02-24',
'2015-04-06', '2015-05-01', '2015-06-22', '2015-09-03', '2015-09-04', '2015-10-01', '2015-10-02',
'2015-10-05', '2015-10-06', '2015-10-07',
'2016-01-01', '2016-02-08', '2016-02-09', '2016-02-10', '2016-02-11', '2016-02-12', '2016-04-04',
'2016-05-02', '2016-06-09', '2016-06-10', '2016-09-15', '2016-09-16', '2016-10-03', '2016-10-04',
'2016-10-05', '2016-10-06', '2016-10-07']
def is_holiday(date):
if isinstance(date, str):
today = datetime.datetime.strptime(date, '%Y-%m-%d')
if today.isoweekday() in [6, 7] or date in holiday:
return True
else:
return False
def last_tddate():
today = datetime.datetime.today().date()
today=int(today.strftime("%w"))
if today == 0:
return day_last_week(-2)
else:
return day_last_week(-1)
| mit |
Johnetordoff/modular-file-renderer | tests/extensions/tabular/test_panda_tools.py | 2 | 1499 | import os
from mfr.extensions.tabular.libs import panda_tools
BASE = os.path.dirname(os.path.abspath(__file__))
class TestTabularPandaTools:
def test_data_from_dateframe(self):
with open(os.path.join(BASE, 'files', 'test.csv')) as fp:
sheets = panda_tools.csv_pandas(fp)
sheet = sheets.popitem()[1]
assert type(sheet[0]) == list
assert type(sheet[0][1]) == dict
def test_csv_pandas(self):
with open(os.path.join(BASE, 'files', 'test.csv')) as fp:
sheets = panda_tools.csv_pandas(fp)
sheet = sheets.popitem()[1]
assert sheet[0][0] == {'field': 'one', 'id': 'one', 'name': 'one', 'sortable': True}
assert sheet[1][0] == {'one': 'à', 'two': 'b', 'three': 'c'}
def test_tsv_pandas(self):
with open(os.path.join(BASE, 'files', 'test.tsv')) as fp:
sheets = panda_tools.csv_pandas(fp)
sheet = sheets.popitem()[1]
assert sheet[0][0] == {'field': 'one\ttwo\tthree', 'id': 'one\ttwo\tthree', 'name': 'one\ttwo\tthree', 'sortable': True}
assert sheet[1][0] == {'one\ttwo\tthree': 'a\tb\tc'}
# def test_dta_pandas():
# with open('mfr_tabular/tests/fixtures/test.dta') as fp:
# headers, data = panda_tools.dta_pandas(fp)
# assert headers[0] == {'field': 'one', 'id': 'one', 'name': 'one'}
# assert data[0] == {'one': 'a', 'two': 'b', 'three': 'c'}
# assert len(data) is 2
# assert len(headers) is 3
| apache-2.0 |
altair-viz/altair | altair/utils/tests/test_data.py | 1 | 3861 | import os
import pytest
import pandas as pd
from toolz import pipe
from ..data import limit_rows, MaxRowsError, sample, to_values, to_json, to_csv
def _create_dataframe(N):
data = pd.DataFrame({"x": range(N), "y": range(N)})
return data
def _create_data_with_values(N):
data = {"values": [{"x": i, "y": i + 1} for i in range(N)]}
return data
def test_limit_rows():
"""Test the limit_rows data transformer."""
data = _create_dataframe(10)
result = limit_rows(data, max_rows=20)
assert data is result
with pytest.raises(MaxRowsError):
pipe(data, limit_rows(max_rows=5))
data = _create_data_with_values(10)
result = pipe(data, limit_rows(max_rows=20))
assert data is result
with pytest.raises(MaxRowsError):
limit_rows(data, max_rows=5)
def test_sample():
"""Test the sample data transformer."""
data = _create_dataframe(20)
result = pipe(data, sample(n=10))
assert len(result) == 10
assert isinstance(result, pd.DataFrame)
data = _create_data_with_values(20)
result = sample(data, n=10)
assert isinstance(result, dict)
assert "values" in result
assert len(result["values"]) == 10
data = _create_dataframe(20)
result = pipe(data, sample(frac=0.5))
assert len(result) == 10
assert isinstance(result, pd.DataFrame)
data = _create_data_with_values(20)
result = sample(data, frac=0.5)
assert isinstance(result, dict)
assert "values" in result
assert len(result["values"]) == 10
def test_to_values():
"""Test the to_values data transformer."""
data = _create_dataframe(10)
result = pipe(data, to_values)
assert result == {"values": data.to_dict(orient="records")}
def test_type_error():
"""Ensure that TypeError is raised for types other than dict/DataFrame."""
for f in (sample, limit_rows, to_values):
with pytest.raises(TypeError):
pipe(0, f)
def test_dataframe_to_json():
"""Test to_json
- make certain the filename is deterministic
- make certain the file contents match the data
"""
data = _create_dataframe(10)
try:
result1 = pipe(data, to_json)
result2 = pipe(data, to_json)
filename = result1["url"]
output = pd.read_json(filename)
finally:
os.remove(filename)
assert result1 == result2
assert output.equals(data)
def test_dict_to_json():
"""Test to_json
- make certain the filename is deterministic
- make certain the file contents match the data
"""
data = _create_data_with_values(10)
try:
result1 = pipe(data, to_json)
result2 = pipe(data, to_json)
filename = result1["url"]
output = pd.read_json(filename).to_dict(orient="records")
finally:
os.remove(filename)
assert result1 == result2
assert data == {"values": output}
def test_dataframe_to_csv():
"""Test to_csv with dataframe input
- make certain the filename is deterministic
- make certain the file contents match the data
"""
data = _create_dataframe(10)
try:
result1 = pipe(data, to_csv)
result2 = pipe(data, to_csv)
filename = result1["url"]
output = pd.read_csv(filename)
finally:
os.remove(filename)
assert result1 == result2
assert output.equals(data)
def test_dict_to_csv():
"""Test to_csv with dict input
- make certain the filename is deterministic
- make certain the file contents match the data
"""
data = _create_data_with_values(10)
try:
result1 = pipe(data, to_csv)
result2 = pipe(data, to_csv)
filename = result1["url"]
output = pd.read_csv(filename).to_dict(orient="records")
finally:
os.remove(filename)
assert result1 == result2
assert data == {"values": output}
| bsd-3-clause |
joosthoeks/jhTAlib | example/example-4-plot-quandl.py | 1 | 1459 | #!/usr/bin/env python
import quandl
import jhtalib as jhta
import matplotlib.pyplot as plt
def main():
# quandl_data = quandl.get('BCHARTS/BITSTAMPUSD', start_date='2011-01-01', end_date='2018-11-01', order='asc', collapse='daily', returns='numpy', authtoken='YOUR_AUTH_TOKEN')
quandl_data = quandl.get('BCHARTS/BITSTAMPUSD', start_date='2011-01-01', end_date='2018-11-01', order='asc', collapse='daily', returns='numpy')
df = {'datetime': [], 'Open': [], 'High': [], 'Low': [], 'Close': [], 'Volume': []}
i = 0
while i < len(quandl_data['Close']):
df['datetime'].append(str(quandl_data['Date'][i]))
df['Open'].append(float(quandl_data['Open'][i]))
df['High'].append(float(quandl_data['High'][i]))
df['Low'].append(float(quandl_data['Low'][i]))
df['Close'].append(float(quandl_data['Close'][i]))
df['Volume'].append(int(quandl_data['Volume (BTC)'][i]))
i += 1
x = df['datetime']
plt.figure(1)
plt.subplot(211)
plt.title('Time / Price')
plt.xlabel('Time')
plt.ylabel('Price')
plt.grid(True)
plt.plot(x, df['Close'], color='blue')
plt.plot(x, df['High'], color='grey')
plt.plot(x, df['Low'], color='grey')
plt.plot(x, jhta.ATH(df)['ath'], color='red')
plt.plot(x, jhta.LMC(df)['lmc'], color='green')
plt.legend(['Close', 'High', 'Low', 'ATH', 'LMC'], loc='upper left')
plt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
jseabold/scipy | scipy/misc/common.py | 8 | 13260 | """
Functions which are common and require SciPy Base and Level 1 SciPy
(special, linalg)
"""
from __future__ import division, print_function, absolute_import
import numpy
import numpy as np
from numpy import (exp, log, asarray, arange, newaxis, hstack, product, array,
zeros, eye, poly1d, r_, sum, fromstring, isfinite,
squeeze, amax, reshape, sign, broadcast_arrays)
from scipy._lib._version import NumpyVersion
__all__ = ['logsumexp', 'central_diff_weights', 'derivative', 'pade', 'lena',
'ascent', 'face']
_NUMPY_170 = (NumpyVersion(numpy.__version__) >= NumpyVersion('1.7.0'))
def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False):
"""Compute the log of the sum of exponentials of input elements.
Parameters
----------
a : array_like
Input array.
axis : None or int or tuple of ints, optional
Axis or axes over which the sum is taken. By default `axis` is None,
and all elements are summed. Tuple of ints is not accepted if NumPy
version is lower than 1.7.0.
.. versionadded:: 0.11.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array.
.. versionadded:: 0.15.0
b : array-like, optional
Scaling factor for exp(`a`) must be of the same shape as `a` or
broadcastable to `a`. These values may be negative in order to
implement subtraction.
.. versionadded:: 0.12.0
return_sign : bool, optional
If this is set to True, the result will be a pair containing sign
information; if False, results that are negative will be returned
as NaN. Default is False (no sign information).
.. versionadded:: 0.16.0
Returns
-------
res : ndarray
The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically
more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))``
is returned.
sgn : ndarray
If return_sign is True, this will be an array of floating-point
numbers matching res and +1, 0, or -1 depending on the sign
of the result. If False, only one result is returned.
See Also
--------
numpy.logaddexp, numpy.logaddexp2
Notes
-----
Numpy has a logaddexp function which is very similar to `logsumexp`, but
only handles two arguments. `logaddexp.reduce` is similar to this
function, but may be less stable.
Examples
--------
>>> from scipy.misc import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
With weights
>>> a = np.arange(10)
>>> b = np.arange(10, 0, -1)
>>> logsumexp(a, b=b)
9.9170178533034665
>>> np.log(np.sum(b*np.exp(a)))
9.9170178533034647
Returning a sign flag
>>> logsumexp([1,2],b=[1,-1],return_sign=True)
(1.5413248546129181, -1.0)
"""
a = asarray(a)
if b is not None:
a, b = broadcast_arrays(a,b)
if np.any(b == 0):
a = a + 0. # promote to at least float
a[b == 0] = -np.inf
# keepdims is available in numpy.sum and numpy.amax since NumPy 1.7.0
#
# Because SciPy supports versions earlier than 1.7.0, we have to handle
# those old versions differently
if not _NUMPY_170:
# When support for Numpy < 1.7.0 is dropped, this implementation can be
# removed. This implementation is a bit hacky. Similarly to old NumPy's
# sum and amax functions, 'axis' must be an integer or None, tuples and
# lists are not supported. Although 'keepdims' is not supported by these
# old NumPy's functions, this function supports it.
# Solve the shape of the reduced array
if axis is None:
sh_keepdims = (1,) * a.ndim
else:
sh_keepdims = list(a.shape)
sh_keepdims[axis] = 1
a_max = amax(a, axis=axis)
if a_max.ndim > 0:
a_max[~isfinite(a_max)] = 0
elif not isfinite(a_max):
a_max = 0
if b is not None:
tmp = b * exp(a - reshape(a_max, sh_keepdims))
else:
tmp = exp(a - reshape(a_max, sh_keepdims))
# suppress warnings about log of zero
with np.errstate(divide='ignore'):
s = sum(tmp, axis=axis)
if return_sign:
sgn = sign(s)
s *= sgn # /= makes more sense but we need zero -> zero
out = log(s)
out += a_max
if keepdims:
# Put back the reduced axes with size one
out = reshape(out, sh_keepdims)
if return_sign:
sgn = reshape(sgn, sh_keepdims)
else:
# This is a more elegant implementation, requiring NumPy >= 1.7.0
a_max = amax(a, axis=axis, keepdims=True)
if a_max.ndim > 0:
a_max[~isfinite(a_max)] = 0
elif not isfinite(a_max):
a_max = 0
if b is not None:
b = asarray(b)
tmp = b * exp(a - a_max)
else:
tmp = exp(a - a_max)
# suppress warnings about log of zero
with np.errstate(divide='ignore'):
s = sum(tmp, axis=axis, keepdims=keepdims)
if return_sign:
sgn = sign(s)
s *= sgn # /= makes more sense but we need zero -> zero
out = log(s)
if not keepdims:
a_max = squeeze(a_max, axis=axis)
out += a_max
if return_sign:
return out, sgn
else:
return out
def central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative.
Assumes equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
Parameters
----------
Np : int
Number of points for the central derivative.
ndiv : int, optional
Number of divisions. Default is 1.
Notes
-----
Can be inaccurate for large number of points.
"""
if Np < ndiv + 1:
raise ValueError("Number of points must be at least the derivative order + 1.")
if Np % 2 == 0:
raise ValueError("The number of points must be odd.")
from scipy import linalg
ho = Np >> 1
x = arange(-ho,ho+1.0)
x = x[:,newaxis]
X = x**0.0
for k in range(1,Np):
X = hstack([X,x**k])
w = product(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv]
return w
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the n-th derivative of a function at a point.
Given a function, use a central difference formula with spacing `dx` to
compute the `n`-th derivative at `x0`.
Parameters
----------
func : function
Input function.
x0 : float
The point at which `n`-th derivative is found.
dx : int, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> def f(x):
... return x**3 + x**2
...
>>> derivative(f, 1.0, dx=1e-6)
4.9999999999217337
"""
if order < n + 1:
raise ValueError("'order' (the number of points used to compute the derivative), "
"must be at least the derivative order 'n' + 1.")
if order % 2 == 0:
raise ValueError("'order' (the number of points used to compute the derivative) "
"must be odd.")
# pre-computed for n=1 and 2 and low-order for speed.
if n == 1:
if order == 3:
weights = array([-1,0,1])/2.0
elif order == 5:
weights = array([1,-8,0,8,-1])/12.0
elif order == 7:
weights = array([-1,9,-45,0,45,-9,1])/60.0
elif order == 9:
weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0
else:
weights = central_diff_weights(order,1)
elif n == 2:
if order == 3:
weights = array([1,-2.0,1])
elif order == 5:
weights = array([-1,16,-30,16,-1])/12.0
elif order == 7:
weights = array([2,-27,270,-490,270,-27,2])/180.0
elif order == 9:
weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0
else:
weights = central_diff_weights(order,2)
else:
weights = central_diff_weights(order, n)
val = 0.0
ho = order >> 1
for k in range(order):
val += weights[k]*func(x0+(k-ho)*dx,*args)
return val / product((dx,)*n,axis=0)
def pade(an, m):
"""
Return Pade approximation to a polynomial as the ratio of two polynomials.
Parameters
----------
an : (N,) array_like
Taylor series coefficients.
m : int
The order of the returned approximating polynomials.
Returns
-------
p, q : Polynomial class
The pade approximation of the polynomial defined by `an` is
`p(x)/q(x)`.
Examples
--------
>>> from scipy import misc
>>> e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0]
>>> p, q = misc.pade(e_exp, 2)
>>> e_exp.reverse()
>>> e_poly = np.poly1d(e_exp)
Compare ``e_poly(x)`` and the pade approximation ``p(x)/q(x)``
>>> e_poly(1)
2.7166666666666668
>>> p(1)/q(1)
2.7179487179487181
"""
from scipy import linalg
an = asarray(an)
N = len(an) - 1
n = N - m
if n < 0:
raise ValueError("Order of q <m> must be smaller than len(an)-1.")
Akj = eye(N+1, n+1)
Bkj = zeros((N+1, m), 'd')
for row in range(1, m+1):
Bkj[row,:row] = -(an[:row])[::-1]
for row in range(m+1, N+1):
Bkj[row,:] = -(an[row-m:row])[::-1]
C = hstack((Akj, Bkj))
pq = linalg.solve(C, an)
p = pq[:n+1]
q = r_[1.0, pq[n+1:]]
return poly1d(p[::-1]), poly1d(q[::-1])
def lena():
"""
Get classic image processing example image, Lena, at 8-bit grayscale
bit-depth, 512 x 512 size.
Parameters
----------
None
Returns
-------
lena : ndarray
Lena image
Notes
-----
Though safe for work in most places, this sexualized image is drawn from
Playboy and makes some viewers uncomfortable. It has been very widely
used as an example in image processing and is therefore made available
for compatibility. For new code that needs an example image we recommend
`face` or `ascent`.
Examples
--------
>>> import scipy.misc
>>> lena = scipy.misc.lena()
>>> lena.shape
(512, 512)
>>> lena.max()
245
>>> lena.dtype
dtype('int32')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(lena)
>>> plt.show()
"""
import pickle
import os
fname = os.path.join(os.path.dirname(__file__),'lena.dat')
f = open(fname,'rb')
lena = array(pickle.load(f))
f.close()
return lena
def ascent():
"""
Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos
The image is derived from accent-to-the-top.jpg at
http://www.public-domain-image.com/people-public-domain-images-pictures/
Parameters
----------
None
Returns
-------
ascent : ndarray
convenient image to use for testing and demonstration
Examples
--------
>>> import scipy.misc
>>> ascent = scipy.misc.ascent()
>>> ascent.shape
(512, 512)
>>> ascent.max()
255
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(ascent)
>>> plt.show()
"""
import pickle
import os
fname = os.path.join(os.path.dirname(__file__),'ascent.dat')
with open(fname, 'rb') as f:
ascent = array(pickle.load(f))
return ascent
def face(gray=False):
"""
Get a 1024 x 768, color image of a raccoon face.
raccoon-procyon-lotor.jpg at http://www.public-domain-image.com
Parameters
----------
gray : bool, optional
If True then return color image, otherwise return an 8-bit gray-scale
Returns
-------
face : ndarray
image of a racoon face
Examples
--------
>>> import scipy.misc
>>> face = scipy.misc.face()
>>> face.shape
(768, 1024, 3)
>>> face.max()
230
>>> face.dtype
dtype('uint8')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(face)
>>> plt.show()
"""
import bz2
import os
with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f:
rawdata = f.read()
data = bz2.decompress(rawdata)
face = fromstring(data, dtype='uint8')
face.shape = (768, 1024, 3)
if gray is True:
face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8')
return face
| bsd-3-clause |
zorroblue/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 20 | 4271 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_estimate_bandwidth_1sample():
# Test estimate_bandwidth when n_samples=1 and quantile<1, so that
# n_neighbors is set to 1.
bandwidth = estimate_bandwidth(X, n_samples=1, quantile=0.3)
assert_equal(bandwidth, 0.)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_estimate_bandwidth_with_sparse_matrix():
# Test estimate_bandwidth with sparse matrix
X = sparse.lil_matrix((1000, 1000))
msg = "A sparse matrix was passed, but dense data is required."
assert_raise_message(TypeError, msg, estimate_bandwidth, X, 200)
def test_parallel():
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_array_almost_equal(ms1.cluster_centers_, ms2.cluster_centers_)
assert_array_equal(ms1.labels_, ms2.labels_)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_almost_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
leonidk/centest | learning/nin-smaller-basic.py | 1 | 2959 | import numpy as np
from sklearn.metrics import accuracy_score
# load data
gt = np.loadtxt('gt.csv',delimiter=',')
sgbm = np.loadtxt('sgbm.csv',delimiter=',')
raw = np.loadtxt('raw.csv',delimiter=',')
# data shape
disp_dim = raw.shape[1]
N = gt.shape[0]
# split things
def split_data(data,pt):
return data[:pt], data[pt:]
rand_perm = np.random.permutation(N)
RAND_FRAC = int(round(0.66 * N))
gt = gt[rand_perm]
raw_orig = np.copy(raw)
raw = raw[rand_perm,:]
sgbm = sgbm[rand_perm,:]
train_gt, test_gt = split_data(gt,RAND_FRAC)
train_raw, test_raw = split_data(raw,RAND_FRAC)
train_sgbm, test_sgbm = split_data(sgbm,RAND_FRAC)
naive_raw = np.argmin(test_raw,1)
naive_sgbm = np.argmin(test_sgbm,1)
print 'raw accuracy ', accuracy_score(test_gt,naive_raw)
print 'sgbm accuracy ', accuracy_score(test_gt,naive_sgbm)
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.ensemble import RandomForestClassifier
#clf = SGDClassifier(n_jobs=-1)
#clf.fit(train_raw,train_gt)
#pred = clf.predict(test_raw)
#print 'linearsvm accuracy ', accuracy_score(test_gt,pred)
#clf = LogisticRegression(n_jobs=-1)
#clf.fit(train_raw,train_gt)
#pred = clf.predict(test_raw)
#print 'logistic accuracy ', accuracy_score(test_gt,pred)
#clf = RandomForestClassifier(min_samples_leaf=20,n_jobs=-1)
#clf.fit(train_raw,train_gt)
#pred = clf.predict(test_raw)
#print 'rfc accuracy ', accuracy_score(test_gt,pred)
#pred = clf.predict(raw_orig)
#with open('rfc.txt','w') as otf:
# for p in pred:
# otf.write(str(int(p)) + '\n')
from keras.utils.np_utils import to_categorical
one_hot_train = to_categorical(train_gt,disp_dim)
one_hot_test = to_categorical(test_gt,disp_dim)
from keras.models import Sequential
from keras.optimizers import SGD,Adam
from keras.regularizers import *
from keras.layers import Dense,Activation,Convolution1D,Flatten,Dropout,AveragePooling1D
model = Sequential()
model.add(Convolution1D(8,9,border_mode='same',input_dim=1,input_length=70))
model.add(Activation('relu'))
model.add(Convolution1D(16,9,border_mode='same',subsample_length=2))
model.add(Activation('relu'))
model.add(Convolution1D(32,9,border_mode='same',subsample_length=5))
model.add(Activation('relu'))
model.add(Convolution1D(64,5,border_mode='same'))
model.add(Activation('relu'))
model.add(AveragePooling1D(7))
model.add(Flatten())
model.add(Dense(disp_dim))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001),metrics=['accuracy'])
X = -train_raw + train_raw.mean()
X = X.reshape((-1,70,1))
model.fit(X,one_hot_train,nb_epoch=24,batch_size=128,verbose=2)
X = -test_raw + test_raw.mean()
X = X.reshape((-1,70,1))
pred = model.predict_classes(X)
print '2lyer nn accuracy ', accuracy_score(test_gt,pred)
X = -raw_orig + train_raw.mean()
X = X.reshape((-1,70,1))
pred = model.predict_classes(X)
with open('1dcnn-nin.txt','w') as otf:
for p in pred:
otf.write(str(int(p)) + '\n')
| mpl-2.0 |
playingaround2017/test123 | gamera/gui/matplotlib_support.py | 2 | 10853 | #
# Copyright (C) 2001-2005 Ichiro Fujinaga, Michael Droettboom,
# and Karl MacMillan
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from sys import stderr
try:
import matplotlib
# if (not hasattr(matplotlib, '__version__') or
# matplotlib.__version__ not in ("0.73.1", "0.84", "0.90.1")):
# print >>stderr, "WARNING: The version of matplotlib you have installed has not been officially"
# print >>stderr, "tested with Gamera. It may work fine, or you may experience strange"
# print >>stderr, "problems using the matplotlib functionality. Please include the"
# print >>stderr, "version of your matplotlib (%s) in any bug reports to the Gamera" % (matplotlib.__version__)
# print >>stderr, "developers.\n"
try:
matplotlib.use("WXAgg")
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends import backend_wxagg as underlying_backend
except ImportError:
matplotlib.use("WX")
from matplotlib.backends.backend_wx import FigureCanvasWx as FigureCanvas
from matplotlib.backends import backend_wx as underlying_backend
from matplotlib.backends import backend_wx
from matplotlib import backend_bases
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import wx
from gamera.gui import toolbar, gui_util, gamera_icons
except ImportError:
def plot(*args, **kwargs):
raise RuntimeError("Plotting is not supported because the optional matplotlib library\n"
"could not be found.\n\n"
"Download and install matplotlib from matplotlib.sourceforge.net,\n"
"then restart Gamera to have plotting support.")
show_figure = plot
matplotlib_installed = False
else:
cursord = backend_wx.cursord
class GameraPlotToolbar(backend_bases.NavigationToolbar2, toolbar.ToolBar):
def __init__(self, parent, canvas):
self.canvas = canvas
self._idle = True
self.statbar = None
toolbar.ToolBar.__init__(self, parent)
backend_bases.NavigationToolbar2.__init__(self, canvas)
def _init_toolbar(self):
load_bitmap = backend_wx._load_bitmap
self.AddSimpleTool(10, gamera_icons.getIconHomeBitmap(),
'Reset original view', self.home)
self.AddSimpleTool(20, gamera_icons.getIconBackBitmap(),
'Back navigational view', self.back)
self.AddSimpleTool(30, gamera_icons.getIconForwardBitmap(),
'Forward navigational view', self.forward)
self.AddSeparator()
self.pan_button = self.AddSimpleTool(40, gamera_icons.getIconMoveBitmap(),
'Pan/zoom mode', self.pan, True)
self.zoom_button = self.AddSimpleTool(50, gamera_icons.getIconZoomViewBitmap(),
'Zoom to rectangle', self.zoom, True)
self.AddSeparator()
self.AddSimpleTool(60, gamera_icons.getIconSaveBitmap(),
'Save plot contents to file', self.save)
self.AddSimpleTool(70, gamera_icons.getIconPrinterBitmap(),
'Print', self.print_plot)
def save(self, evt):
filename = gui_util.save_file_dialog(self, self.canvas._get_imagesave_wildcards())
if filename is not None:
self.canvas.print_figure(filename)
def print_plot(self, evt):
printout = backend_wx.PrintoutWx(self.canvas)
dialog_data = wx.PrintDialogData()
if wx.VERSION < (2, 5):
dialog_data.EnableHelp(False)
dialog_data.EnablePageNumbers(False)
dialog_data.EnableSelection(False)
printer = wx.Printer(dialog_data)
if not printer.Print(self, printout, True):
if printer.GetLastError() == wx.PRINTER_ERROR:
gui_util.message("A printing error occurred.")
def zoom(self, evt):
if evt.GetIsDown():
self.pan_button.SetValue(False)
else:
self.zoom_button.SetValue(True)
backend_bases.NavigationToolbar2.zoom(self, evt)
def pan(self, evt):
if evt.GetIsDown():
self.zoom_button.SetValue(False)
else:
self.pan_button.SetValue(True)
backend_bases.NavigationToolbar2.pan(self, evt)
# This is all verbatim from backend_wx.py, which for various
# multiple-ineheritance-related reasons can not just be directly
# imported
def set_cursor(self, cursor):
cursor = wx.StockCursor(cursord[cursor])
self.canvas.SetCursor( cursor )
def release(self, event):
try: del self.lastrect
except AttributeError: pass
def dynamic_update(self):
d = self._idle
self._idle = False
if d:
self.canvas.draw()
self._idle = True
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
canvas = self.canvas
dc = wx.ClientDC(canvas)
# Set logical function to XOR for rubberbanding
dc.SetLogicalFunction(wx.XOR)
# Set dc brush and pen
# Here I set brush and pen to white and grey respectively
# You can set it to your own choices
# The brush setting is not really needed since we
# dont do any filling of the dc. It is set just for
# the sake of completion.
wbrush = wx.Brush(wx.Colour(255,255,255), wx.TRANSPARENT)
wpen = wx.Pen(wx.Colour(200, 200, 200), 1, wx.SOLID)
dc.SetBrush(wbrush)
dc.SetPen(wpen)
dc.ResetBoundingBox()
dc.BeginDrawing()
height = self.canvas.figure.bbox.height()
y1 = height - y1
y0 = height - y0
if y1<y0: y0, y1 = y1, y0
if x1<y0: x0, x1 = x1, x0
w = x1 - x0
h = y1 - y0
rect = int(x0), int(y0), int(w), int(h)
try: lastrect = self.lastrect
except AttributeError: pass
else: dc.DrawRectangle(*lastrect) #erase last
self.lastrect = rect
dc.DrawRectangle(*rect)
dc.EndDrawing()
def set_status_bar(self, statbar):
self.statbar = statbar
def set_message(self, s):
if self.statbar is not None: self.statbar.set_function(s)
class GameraPlotDropTarget(wx.PyDropTarget):
def __init__(self, figure):
wx.PyDropTarget.__init__(self)
self.df = wx.CustomDataFormat("Vector")
self.data = wx.CustomDataObject(self.df)
self.SetDataObject(self.data)
self.figure = figure
def OnEnter(self, *args):
return wx.DragCopy
def OnDrop(self, *args):
return True
def OnDragOver(self, *args):
return wx.DragCopy
def OnData(self, x, y, d):
if self.GetData():
data = eval(self.data.GetData())
self.figure.axes[0].plot(data)
return d
class GameraPlotFrame(wx.Frame):
def __init__(self, num, figure):
self.num = num
wx.Frame.__init__(self, None, -1, 'matplotlib Plot', size=(550, 350))
self.figure = figure
self.canvas = FigureCanvas(self, -1, self.figure)
self.canvas.SetDropTarget(GameraPlotDropTarget(self.figure))
statbar = backend_wx.StatusBarWx(self)
self.SetStatusBar(statbar)
self.toolbar = GameraPlotToolbar(self, self.canvas)
self.toolbar.set_status_bar(statbar)
box = wx.BoxSizer(wx.VERTICAL)
box.Add(self.toolbar, 0, wx.EXPAND)
box.Add(self.canvas, 1, wx.EXPAND)
self.SetSizer(box)
self.Fit()
self.figmgr = GameraFigureManager(self.canvas, num, self)
def GetToolBar(self):
return self.toolbar
def get_figure_manager(self):
return self.figmgr
_plot_num = 0
def plot(*args):
# Having inf values in the array raises a cryptic error
# message from matplotlib
inf = 1e300
line = args[0]
for x in line:
if x > inf or x < -inf:
raise ValueError("Line contains 'inf' or '-inf' values which can not be plotted.")
figure = Figure()
axis = figure.add_subplot(111)
axis.plot(*args)
show_figure(figure)
return figure
def show_figure(figure):
display = GameraPlotFrame(0, figure)
display.Show()
return display
# Everything below here is just to support pylab mode
def show():
for figwin in Gcf.get_all_fig_managers():
figwin.frame.Show()
figwin.canvas.realize()
figwin.canvas.draw()
def new_figure_manager(num, *args, **kwargs):
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
fig = Figure(*args, **kwargs)
frame = GameraPlotFrame(num, fig)
figmgr = frame.get_figure_manager()
figmgr.canvas.realize()
figmgr.frame.Show()
return figmgr
class GameraFigureManager(backend_bases.FigureManagerBase):
def __init__(self, canvas, num, frame):
backend_bases.FigureManagerBase.__init__(self, canvas, num)
self.frame = frame
self.window = frame
self.tb = frame.GetToolBar()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.tb != None: self.tb.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def destroy(self, *args):
self.frame.Destroy()
self.canvas.Destroy()
import wx
wx.WakeUpIdle()
from matplotlib import backends
backends.show = show
backends.new_figure_manager = new_figure_manager
matplotlib_installed = True
__all__ = "plot show_figure".split()
| gpl-2.0 |
roxyboy/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 85 | 8565 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) | bsd-3-clause |
cmoutard/mne-python | mne/coreg.py | 5 | 38814 | """Coregistration between different coordinate frames"""
# Authors: Christian Brodbeck <[email protected]>
#
# License: BSD (3-clause)
from .externals.six.moves import configparser
import fnmatch
from glob import glob, iglob
import os
import stat
import sys
import re
import shutil
from warnings import warn
from functools import reduce
import numpy as np
from numpy import dot
from .io import read_fiducials, write_fiducials
from .label import read_label, Label
from .source_space import (add_source_space_distances, read_source_spaces,
write_source_spaces)
from .surface import read_surface, write_surface
from .bem import read_bem_surfaces, write_bem_surfaces
from .transforms import rotation, rotation3d, scaling, translation
from .utils import get_config, get_subjects_dir, logger, pformat
from .externals.six.moves import zip
# some path templates
trans_fname = os.path.join('{raw_dir}', '{subject}-trans.fif')
subject_dirname = os.path.join('{subjects_dir}', '{subject}')
bem_dirname = os.path.join(subject_dirname, 'bem')
surf_dirname = os.path.join(subject_dirname, 'surf')
bem_fname = os.path.join(bem_dirname, "{subject}-{name}.fif")
head_bem_fname = pformat(bem_fname, name='head')
fid_fname = pformat(bem_fname, name='fiducials')
fid_fname_general = os.path.join(bem_dirname, "{head}-fiducials.fif")
src_fname = os.path.join(bem_dirname, '{subject}-{spacing}-src.fif')
def _make_writable(fname):
os.chmod(fname, stat.S_IMODE(os.lstat(fname)[stat.ST_MODE]) | 128) # write
def _make_writable_recursive(path):
"""Recursively set writable"""
if sys.platform.startswith('win'):
return # can't safely set perms
for root, dirs, files in os.walk(path, topdown=False):
for f in dirs + files:
_make_writable(os.path.join(root, f))
def create_default_subject(mne_root=None, fs_home=None, update=False,
subjects_dir=None):
"""Create an average brain subject for subjects without structural MRI
Create a copy of fsaverage from the Freesurfer directory in subjects_dir
and add auxiliary files from the mne package.
Parameters
----------
mne_root : None | str
The mne root directory (only needed if MNE_ROOT is not specified as
environment variable).
fs_home : None | str
The freesurfer home directory (only needed if FREESURFER_HOME is not
specified as environment variable).
update : bool
In cases where a copy of the fsaverage brain already exists in the
subjects_dir, this option allows to only copy files that don't already
exist in the fsaverage directory.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(os.environ['SUBJECTS_DIR']) as destination for the new subject.
Notes
-----
When no structural MRI is available for a subject, an average brain can be
substituted. Freesurfer comes with such an average brain model, and MNE
comes with some auxiliary files which make coregistration easier.
:py:func:`create_default_subject` copies the relevant files from Freesurfer
into the current subjects_dir, and also adds the auxiliary files provided
by MNE.
The files provided by MNE are listed below and can be found under
``share/mne/mne_analyze/fsaverage`` in the MNE directory (see MNE manual
section 7.19 Working with the average brain):
fsaverage_head.fif:
The approximate head surface triangulation for fsaverage.
fsaverage_inner_skull-bem.fif:
The approximate inner skull surface for fsaverage.
fsaverage-fiducials.fif:
The locations of the fiducial points (LPA, RPA, and nasion).
fsaverage-trans.fif:
Contains a default MEG-MRI coordinate transformation suitable for
fsaverage.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if fs_home is None:
fs_home = get_config('FREESURFER_HOME', fs_home)
if fs_home is None:
raise ValueError(
"FREESURFER_HOME environment variable not found. Please "
"specify the fs_home parameter in your call to "
"create_default_subject().")
if mne_root is None:
mne_root = get_config('MNE_ROOT', mne_root)
if mne_root is None:
raise ValueError("MNE_ROOT environment variable not found. Please "
"specify the mne_root parameter in your call to "
"create_default_subject().")
# make sure freesurfer files exist
fs_src = os.path.join(fs_home, 'subjects', 'fsaverage')
if not os.path.exists(fs_src):
raise IOError('fsaverage not found at %r. Is fs_home specified '
'correctly?' % fs_src)
for name in ('label', 'mri', 'surf'):
dirname = os.path.join(fs_src, name)
if not os.path.isdir(dirname):
raise IOError("Freesurfer fsaverage seems to be incomplete: No "
"directory named %s found in %s" % (name, fs_src))
# make sure destination does not already exist
dest = os.path.join(subjects_dir, 'fsaverage')
if dest == fs_src:
raise IOError(
"Your subjects_dir points to the freesurfer subjects_dir (%r). "
"The default subject can not be created in the freesurfer "
"installation directory; please specify a different "
"subjects_dir." % subjects_dir)
elif (not update) and os.path.exists(dest):
raise IOError(
"Can not create fsaverage because %r already exists in "
"subjects_dir %r. Delete or rename the existing fsaverage "
"subject folder." % ('fsaverage', subjects_dir))
# make sure mne files exist
mne_fname = os.path.join(mne_root, 'share', 'mne', 'mne_analyze',
'fsaverage', 'fsaverage-%s.fif')
mne_files = ('fiducials', 'head', 'inner_skull-bem', 'trans')
for name in mne_files:
fname = mne_fname % name
if not os.path.isfile(fname):
raise IOError("MNE fsaverage incomplete: %s file not found at "
"%s" % (name, fname))
# copy fsaverage from freesurfer
logger.info("Copying fsaverage subject from freesurfer directory...")
if (not update) or not os.path.exists(dest):
shutil.copytree(fs_src, dest)
_make_writable_recursive(dest)
# add files from mne
dest_bem = os.path.join(dest, 'bem')
if not os.path.exists(dest_bem):
os.mkdir(dest_bem)
logger.info("Copying auxiliary fsaverage files from mne directory...")
dest_fname = os.path.join(dest_bem, 'fsaverage-%s.fif')
_make_writable_recursive(dest_bem)
for name in mne_files:
if not os.path.exists(dest_fname % name):
shutil.copy(mne_fname % name, dest_bem)
def _decimate_points(pts, res=10):
"""Decimate the number of points using a voxel grid
Create a voxel grid with a specified resolution and retain at most one
point per voxel. For each voxel, the point closest to its center is
retained.
Parameters
----------
pts : array, shape (n_points, 3)
The points making up the head shape.
res : scalar
The resolution of the voxel space (side length of each voxel).
Returns
-------
pts : array, shape = (n_points, 3)
The decimated points.
"""
from scipy.spatial.distance import cdist
pts = np.asarray(pts)
# find the bin edges for the voxel space
xmin, ymin, zmin = pts.min(0) - res / 2.
xmax, ymax, zmax = pts.max(0) + res
xax = np.arange(xmin, xmax, res)
yax = np.arange(ymin, ymax, res)
zax = np.arange(zmin, zmax, res)
# find voxels containing one or more point
H, _ = np.histogramdd(pts, bins=(xax, yax, zax), normed=False)
# for each voxel, select one point
X, Y, Z = pts.T
out = np.empty((np.sum(H > 0), 3))
for i, (xbin, ybin, zbin) in enumerate(zip(*np.nonzero(H))):
x = xax[xbin]
y = yax[ybin]
z = zax[zbin]
xi = np.logical_and(X >= x, X < x + res)
yi = np.logical_and(Y >= y, Y < y + res)
zi = np.logical_and(Z >= z, Z < z + res)
idx = np.logical_and(zi, np.logical_and(yi, xi))
ipts = pts[idx]
mid = np.array([x, y, z]) + res / 2.
dist = cdist(ipts, [mid])
i_min = np.argmin(dist)
ipt = ipts[i_min]
out[i] = ipt
return out
def _trans_from_params(param_info, params):
"""Convert transformation parameters into a transformation matrix
Parameters
----------
param_info : tuple, len = 3
Tuple describing the parameters in x (do_translate, do_rotate,
do_scale).
params : tuple
The transformation parameters.
Returns
-------
trans : array, shape = (4, 4)
Transformation matrix.
"""
do_rotate, do_translate, do_scale = param_info
i = 0
trans = []
if do_rotate:
x, y, z = params[:3]
trans.append(rotation(x, y, z))
i += 3
if do_translate:
x, y, z = params[i:i + 3]
trans.insert(0, translation(x, y, z))
i += 3
if do_scale == 1:
s = params[i]
trans.append(scaling(s, s, s))
elif do_scale == 3:
x, y, z = params[i:i + 3]
trans.append(scaling(x, y, z))
trans = reduce(dot, trans)
return trans
def fit_matched_points(src_pts, tgt_pts, rotate=True, translate=True,
scale=False, tol=None, x0=None, out='trans'):
"""Find a transform that minimizes the squared distance between two
matching sets of points.
Uses :func:`scipy.optimize.leastsq` to find a transformation involving
a combination of rotation, translation, and scaling (in that order).
Parameters
----------
src_pts : array, shape = (n, 3)
Points to which the transform should be applied.
tgt_pts : array, shape = (n, 3)
Points to which src_pts should be fitted. Each point in tgt_pts should
correspond to the point in src_pts with the same index.
rotate : bool
Allow rotation of the ``src_pts``.
translate : bool
Allow translation of the ``src_pts``.
scale : bool
Number of scaling parameters. With False, points are not scaled. With
True, points are scaled by the same factor along all axes.
tol : scalar | None
The error tolerance. If the distance between any of the matched points
exceeds this value in the solution, a RuntimeError is raised. With
None, no error check is performed.
x0 : None | tuple
Initial values for the fit parameters.
out : 'params' | 'trans'
In what format to return the estimate: 'params' returns a tuple with
the fit parameters; 'trans' returns a transformation matrix of shape
(4, 4).
Returns
-------
One of the following, depending on the ``out`` parameter:
trans : array, shape = (4, 4)
Transformation that, if applied to src_pts, minimizes the squared
distance to tgt_pts.
params : array, shape = (n_params, )
A single tuple containing the translation, rotation and scaling
parameters in that order.
"""
from scipy.optimize import leastsq
src_pts = np.atleast_2d(src_pts)
tgt_pts = np.atleast_2d(tgt_pts)
if src_pts.shape != tgt_pts.shape:
raise ValueError("src_pts and tgt_pts must have same shape (got "
"{0}, {1})".format(src_pts.shape, tgt_pts.shape))
rotate = bool(rotate)
translate = bool(translate)
scale = int(scale)
if translate:
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
param_info = (rotate, translate, scale)
if param_info == (True, False, 0):
def error(x):
rx, ry, rz = x
trans = rotation3d(rx, ry, rz)
est = dot(src_pts, trans.T)
return (tgt_pts - est).ravel()
if x0 is None:
x0 = (0, 0, 0)
elif param_info == (True, False, 1):
def error(x):
rx, ry, rz, s = x
trans = rotation3d(rx, ry, rz) * s
est = dot(src_pts, trans.T)
return (tgt_pts - est).ravel()
if x0 is None:
x0 = (0, 0, 0, 1)
elif param_info == (True, True, 0):
def error(x):
rx, ry, rz, tx, ty, tz = x
trans = dot(translation(tx, ty, tz), rotation(rx, ry, rz))
est = dot(src_pts, trans.T)
return (tgt_pts - est[:, :3]).ravel()
if x0 is None:
x0 = (0, 0, 0, 0, 0, 0)
elif param_info == (True, True, 1):
def error(x):
rx, ry, rz, tx, ty, tz, s = x
trans = reduce(dot, (translation(tx, ty, tz), rotation(rx, ry, rz),
scaling(s, s, s)))
est = dot(src_pts, trans.T)
return (tgt_pts - est[:, :3]).ravel()
if x0 is None:
x0 = (0, 0, 0, 0, 0, 0, 1)
else:
raise NotImplementedError(
"The specified parameter combination is not implemented: "
"rotate=%r, translate=%r, scale=%r" % param_info)
x, _, _, _, _ = leastsq(error, x0, full_output=True)
# re-create the final transformation matrix
if (tol is not None) or (out == 'trans'):
trans = _trans_from_params(param_info, x)
# assess the error of the solution
if tol is not None:
if not translate:
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
est_pts = dot(src_pts, trans.T)[:, :3]
err = np.sqrt(np.sum((est_pts - tgt_pts) ** 2, axis=1))
if np.any(err > tol):
raise RuntimeError("Error exceeds tolerance. Error = %r" % err)
if out == 'params':
return x
elif out == 'trans':
return trans
else:
raise ValueError("Invalid out parameter: %r. Needs to be 'params' or "
"'trans'." % out)
def _point_cloud_error(src_pts, tgt_pts):
"""Find the distance from each source point to its closest target point
Parameters
----------
src_pts : array, shape = (n, 3)
Source points.
tgt_pts : array, shape = (m, 3)
Target points.
Returns
-------
dist : array, shape = (n, )
For each point in ``src_pts``, the distance to the closest point in
``tgt_pts``.
"""
from scipy.spatial.distance import cdist
Y = cdist(src_pts, tgt_pts, 'euclidean')
dist = Y.min(axis=1)
return dist
def _point_cloud_error_balltree(src_pts, tgt_tree):
"""Find the distance from each source point to its closest target point
Uses sklearn.neighbors.BallTree for greater efficiency
Parameters
----------
src_pts : array, shape = (n, 3)
Source points.
tgt_tree : sklearn.neighbors.BallTree
BallTree of the target points.
Returns
-------
dist : array, shape = (n, )
For each point in ``src_pts``, the distance to the closest point in
``tgt_pts``.
"""
dist, _ = tgt_tree.query(src_pts)
return dist.ravel()
def fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=True,
scale=0, x0=None, leastsq_args={}, out='params'):
"""Find a transform that minimizes the squared distance from each source
point to its closest target point
Uses :func:`scipy.optimize.leastsq` to find a transformation involving
a combination of rotation, translation, and scaling (in that order).
Parameters
----------
src_pts : array, shape = (n, 3)
Points to which the transform should be applied.
tgt_pts : array, shape = (m, 3)
Points to which src_pts should be fitted. Each point in tgt_pts should
correspond to the point in src_pts with the same index.
rotate : bool
Allow rotation of the ``src_pts``.
translate : bool
Allow translation of the ``src_pts``.
scale : 0 | 1 | 3
Number of scaling parameters. With 0, points are not scaled. With 1,
points are scaled by the same factor along all axes. With 3, points are
scaled by a separate factor along each axis.
x0 : None | tuple
Initial values for the fit parameters.
leastsq_args : dict
Additional parameters to submit to :func:`scipy.optimize.leastsq`.
out : 'params' | 'trans'
In what format to return the estimate: 'params' returns a tuple with
the fit parameters; 'trans' returns a transformation matrix of shape
(4, 4).
Returns
-------
x : array, shape = (n_params, )
Estimated parameters for the transformation.
Notes
-----
Assumes that the target points form a dense enough point cloud so that
the distance of each src_pt to the closest tgt_pt can be used as an
estimate of the distance of src_pt to tgt_pts.
"""
from scipy.optimize import leastsq
kwargs = {'epsfcn': 0.01}
kwargs.update(leastsq_args)
# assert correct argument types
src_pts = np.atleast_2d(src_pts)
tgt_pts = np.atleast_2d(tgt_pts)
translate = bool(translate)
rotate = bool(rotate)
scale = int(scale)
if translate:
src_pts = np.hstack((src_pts, np.ones((len(src_pts), 1))))
try:
from sklearn.neighbors import BallTree
tgt_pts = BallTree(tgt_pts)
errfunc = _point_cloud_error_balltree
except ImportError:
warn("Sklearn could not be imported. Fitting points will be slower. "
"To improve performance, install the sklearn module.")
errfunc = _point_cloud_error
# for efficiency, define parameter specific error function
param_info = (rotate, translate, scale)
if param_info == (True, False, 0):
x0 = x0 or (0, 0, 0)
def error(x):
rx, ry, rz = x
trans = rotation3d(rx, ry, rz)
est = dot(src_pts, trans.T)
err = errfunc(est, tgt_pts)
return err
elif param_info == (True, False, 1):
x0 = x0 or (0, 0, 0, 1)
def error(x):
rx, ry, rz, s = x
trans = rotation3d(rx, ry, rz) * s
est = dot(src_pts, trans.T)
err = errfunc(est, tgt_pts)
return err
elif param_info == (True, False, 3):
x0 = x0 or (0, 0, 0, 1, 1, 1)
def error(x):
rx, ry, rz, sx, sy, sz = x
trans = rotation3d(rx, ry, rz) * [sx, sy, sz]
est = dot(src_pts, trans.T)
err = errfunc(est, tgt_pts)
return err
elif param_info == (True, True, 0):
x0 = x0 or (0, 0, 0, 0, 0, 0)
def error(x):
rx, ry, rz, tx, ty, tz = x
trans = dot(translation(tx, ty, tz), rotation(rx, ry, rz))
est = dot(src_pts, trans.T)
err = errfunc(est[:, :3], tgt_pts)
return err
else:
raise NotImplementedError(
"The specified parameter combination is not implemented: "
"rotate=%r, translate=%r, scale=%r" % param_info)
est, _, info, msg, _ = leastsq(error, x0, full_output=True, **kwargs)
logger.debug("fit_point_cloud leastsq (%i calls) info: %s", info['nfev'],
msg)
if out == 'params':
return est
elif out == 'trans':
return _trans_from_params(param_info, est)
else:
raise ValueError("Invalid out parameter: %r. Needs to be 'params' or "
"'trans'." % out)
def _find_label_paths(subject='fsaverage', pattern=None, subjects_dir=None):
"""Find paths to label files in a subject's label directory
Parameters
----------
subject : str
Name of the mri subject.
pattern : str | None
Pattern for finding the labels relative to the label directory in the
MRI subject directory (e.g., "aparc/*.label" will find all labels
in the "subject/label/aparc" directory). With None, find all labels.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(sys.environ['SUBJECTS_DIR'])
Returns
------
paths : list
List of paths relative to the subject's label directory
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
subject_dir = os.path.join(subjects_dir, subject)
lbl_dir = os.path.join(subject_dir, 'label')
if pattern is None:
paths = []
for dirpath, _, filenames in os.walk(lbl_dir):
rel_dir = os.path.relpath(dirpath, lbl_dir)
for filename in fnmatch.filter(filenames, '*.label'):
path = os.path.join(rel_dir, filename)
paths.append(path)
else:
paths = [os.path.relpath(path, lbl_dir) for path in iglob(pattern)]
return paths
def _find_mri_paths(subject='fsaverage', subjects_dir=None):
"""Find all files of an mri relevant for source transformation
Parameters
----------
subject : str
Name of the mri subject.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable
(sys.environ['SUBJECTS_DIR'])
Returns
-------
paths | dict
Dictionary whose keys are relevant file type names (str), and whose
values are lists of paths.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
paths = {}
# directories to create
paths['dirs'] = [bem_dirname, surf_dirname]
# surf/ files
paths['surf'] = surf = []
surf_fname = os.path.join(surf_dirname, '{name}')
surf_names = ('inflated', 'sphere', 'sphere.reg', 'white')
if os.getenv('_MNE_FEW_SURFACES', '') != 'true': # for testing
surf_names = surf_names + (
'orig', 'orig_avg', 'inflated_avg', 'inflated_pre', 'pial',
'pial_avg', 'smoothwm', 'white_avg', 'sphere.reg.avg')
for name in surf_names:
for hemi in ('lh.', 'rh.'):
fname = pformat(surf_fname, name=hemi + name)
surf.append(fname)
# BEM files
paths['bem'] = bem = []
path = head_bem_fname.format(subjects_dir=subjects_dir, subject=subject)
if os.path.exists(path):
bem.append('head')
bem_pattern = pformat(bem_fname, subjects_dir=subjects_dir,
subject=subject, name='*-bem')
re_pattern = pformat(bem_fname, subjects_dir=subjects_dir, subject=subject,
name='(.+)')
for path in iglob(bem_pattern):
match = re.match(re_pattern, path)
name = match.group(1)
bem.append(name)
# fiducials
paths['fid'] = [fid_fname]
# duplicate curvature files
paths['duplicate'] = dup = []
path = os.path.join(surf_dirname, '{name}')
for name in ['lh.curv', 'rh.curv']:
fname = pformat(path, name=name)
dup.append(fname)
# check presence of required files
for ftype in ['surf', 'fid', 'duplicate']:
for fname in paths[ftype]:
path = fname.format(subjects_dir=subjects_dir, subject=subject)
path = os.path.realpath(path)
if not os.path.exists(path):
raise IOError("Required file not found: %r" % path)
# find source space files
paths['src'] = src = []
bem_dir = bem_dirname.format(subjects_dir=subjects_dir, subject=subject)
fnames = fnmatch.filter(os.listdir(bem_dir), '*-src.fif')
prefix = subject + '-'
for fname in fnames:
if fname.startswith(prefix):
fname = "{subject}-%s" % fname[len(prefix):]
path = os.path.join(bem_dirname, fname)
src.append(path)
return paths
def _is_mri_subject(subject, subjects_dir=None):
"""Check whether a directory in subjects_dir is an mri subject directory
Parameters
----------
subject : str
Name of the potential subject/directory.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
is_mri_subject : bool
Whether ``subject`` is an mri subject.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
fname = head_bem_fname.format(subjects_dir=subjects_dir, subject=subject)
if not os.path.exists(fname):
return False
return True
def _mri_subject_has_bem(subject, subjects_dir=None):
"""Check whether an mri subject has a file matching the bem pattern
Parameters
----------
subject : str
Name of the subject.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
has_bem_file : bool
Whether ``subject`` has a bem file.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
pattern = bem_fname.format(subjects_dir=subjects_dir, subject=subject,
name='*-bem')
fnames = glob(pattern)
return bool(len(fnames))
def read_mri_cfg(subject, subjects_dir=None):
"""Read information from the cfg file of a scaled MRI brain
Parameters
----------
subject : str
Name of the scaled MRI subject.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
Returns
-------
cfg : dict
Dictionary with entries from the MRI's cfg file.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
fname = os.path.join(subjects_dir, subject, 'MRI scaling parameters.cfg')
if not os.path.exists(fname):
raise IOError("%r does not seem to be a scaled mri subject: %r does "
"not exist." % (subject, fname))
logger.info("Reading MRI cfg file %s" % fname)
config = configparser.RawConfigParser()
config.read(fname)
n_params = config.getint("MRI Scaling", 'n_params')
if n_params == 1:
scale = config.getfloat("MRI Scaling", 'scale')
elif n_params == 3:
scale_str = config.get("MRI Scaling", 'scale')
scale = np.array([float(s) for s in scale_str.split()])
else:
raise ValueError("Invalid n_params value in MRI cfg: %i" % n_params)
out = {'subject_from': config.get("MRI Scaling", 'subject_from'),
'n_params': n_params, 'scale': scale}
return out
def _write_mri_config(fname, subject_from, subject_to, scale):
"""Write the cfg file describing a scaled MRI subject
Parameters
----------
fname : str
Target file.
subject_from : str
Name of the source MRI subject.
subject_to : str
Name of the scaled MRI subject.
scale : float | array_like, shape = (3,)
The scaling parameter.
"""
scale = np.asarray(scale)
if np.isscalar(scale) or scale.shape == ():
n_params = 1
else:
n_params = 3
config = configparser.RawConfigParser()
config.add_section("MRI Scaling")
config.set("MRI Scaling", 'subject_from', subject_from)
config.set("MRI Scaling", 'subject_to', subject_to)
config.set("MRI Scaling", 'n_params', str(n_params))
if n_params == 1:
config.set("MRI Scaling", 'scale', str(scale))
else:
config.set("MRI Scaling", 'scale', ' '.join([str(s) for s in scale]))
config.set("MRI Scaling", 'version', '1')
with open(fname, 'w') as fid:
config.write(fid)
def _scale_params(subject_to, subject_from, scale, subjects_dir):
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
if (subject_from is None) != (scale is None):
raise TypeError("Need to provide either both subject_from and scale "
"parameters, or neither.")
if subject_from is None:
cfg = read_mri_cfg(subject_to, subjects_dir)
subject_from = cfg['subject_from']
n_params = cfg['n_params']
scale = cfg['scale']
else:
scale = np.asarray(scale)
if scale.ndim == 0:
n_params = 1
elif scale.shape == (3,):
n_params = 3
else:
raise ValueError("Invalid shape for scale parameer. Need scalar "
"or array of length 3. Got %s." % str(scale))
return subjects_dir, subject_from, n_params, scale
def scale_bem(subject_to, bem_name, subject_from=None, scale=None,
subjects_dir=None):
"""Scale a bem file
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination mri subject).
bem_name : str
Name of the bem file. For example, to scale
``fsaverage-inner_skull-bem.fif``, the bem_name would be
"inner_skull-bem".
subject_from : None | str
The subject from which to read the source space. If None, subject_from
is read from subject_to's config file.
scale : None | float | array, shape = (3,)
Scaling factor. Has to be specified if subjects_from is specified,
otherwise it is read from subject_to's config file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
"""
subjects_dir, subject_from, _, scale = _scale_params(subject_to,
subject_from, scale,
subjects_dir)
src = bem_fname.format(subjects_dir=subjects_dir, subject=subject_from,
name=bem_name)
dst = bem_fname.format(subjects_dir=subjects_dir, subject=subject_to,
name=bem_name)
if os.path.exists(dst):
raise IOError("File alredy exists: %s" % dst)
surfs = read_bem_surfaces(src)
if len(surfs) != 1:
raise NotImplementedError("BEM file with more than one surface: %r"
% src)
surf0 = surfs[0]
surf0['rr'] = surf0['rr'] * scale
write_bem_surfaces(dst, surf0)
def scale_labels(subject_to, pattern=None, overwrite=False, subject_from=None,
scale=None, subjects_dir=None):
"""Scale labels to match a brain that was previously created by scaling
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination brain).
pattern : str | None
Pattern for finding the labels relative to the label directory in the
MRI subject directory (e.g., "lh.BA3a.label" will scale
"fsaverage/label/lh.BA3a.label"; "aparc/\*.label" will find all labels
in the "fsaverage/label/aparc" directory). With None, scale all labels.
overwrite : bool
Overwrite any label file that already exists for subject_to (otherwise
existsing labels are skipped).
subject_from : None | str
Name of the original MRI subject (the brain that was scaled to create
subject_to). If None, the value is read from subject_to's cfg file.
scale : None | float | array_like, shape = (3,)
Scaling parameter. If None, the value is read from subject_to's cfg
file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
"""
# read parameters from cfg
if scale is None or subject_from is None:
cfg = read_mri_cfg(subject_to, subjects_dir)
if subject_from is None:
subject_from = cfg['subject_from']
if scale is None:
scale = cfg['scale']
# find labels
paths = _find_label_paths(subject_from, pattern, subjects_dir)
if not paths:
return
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
src_root = os.path.join(subjects_dir, subject_from, 'label')
dst_root = os.path.join(subjects_dir, subject_to, 'label')
# scale labels
for fname in paths:
dst = os.path.join(dst_root, fname)
if not overwrite and os.path.exists(dst):
continue
dirname = os.path.dirname(dst)
if not os.path.exists(dirname):
os.makedirs(dirname)
src = os.path.join(src_root, fname)
l_old = read_label(src)
pos = l_old.pos * scale
l_new = Label(l_old.vertices, pos, l_old.values, l_old.hemi,
l_old.comment, subject=subject_to)
l_new.save(dst)
def scale_mri(subject_from, subject_to, scale, overwrite=False,
subjects_dir=None):
"""Create a scaled copy of an MRI subject
Parameters
----------
subject_from : str
Name of the subject providing the MRI.
subject_to : str
New subject name for which to save the scaled MRI.
scale : float | array_like, shape = (3,)
The scaling factor (one or 3 parameters).
overwrite : bool
If an MRI already exists for subject_to, overwrite it.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
See Also
--------
scale_labels : add labels to a scaled MRI
scale_source_space : add a source space to a scaled MRI
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
paths = _find_mri_paths(subject_from, subjects_dir=subjects_dir)
scale = np.asarray(scale)
# make sure we have an empty target directory
dest = subject_dirname.format(subject=subject_to,
subjects_dir=subjects_dir)
if os.path.exists(dest):
if overwrite:
shutil.rmtree(dest)
else:
raise IOError("Subject directory for %s already exists: %r"
% (subject_to, dest))
for dirname in paths['dirs']:
dir_ = dirname.format(subject=subject_to, subjects_dir=subjects_dir)
os.makedirs(dir_)
# save MRI scaling parameters
fname = os.path.join(dest, 'MRI scaling parameters.cfg')
_write_mri_config(fname, subject_from, subject_to, scale)
# surf files [in mm]
for fname in paths['surf']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
src = os.path.realpath(src)
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
pts, tri = read_surface(src)
write_surface(dest, pts * scale, tri)
# BEM files [in m]
for bem_name in paths['bem']:
scale_bem(subject_to, bem_name, subject_from, scale, subjects_dir)
# fiducials [in m]
for fname in paths['fid']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
src = os.path.realpath(src)
pts, cframe = read_fiducials(src)
for pt in pts:
pt['r'] = pt['r'] * scale
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
write_fiducials(dest, pts, cframe)
# duplicate files
for fname in paths['duplicate']:
src = fname.format(subject=subject_from, subjects_dir=subjects_dir)
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
shutil.copyfile(src, dest)
# source spaces
for fname in paths['src']:
src_name = os.path.basename(fname)
scale_source_space(subject_to, src_name, subject_from, scale,
subjects_dir)
# labels [in m]
scale_labels(subject_to, subject_from=subject_from, scale=scale,
subjects_dir=subjects_dir)
def scale_source_space(subject_to, src_name, subject_from=None, scale=None,
subjects_dir=None, n_jobs=1):
"""Scale a source space for an mri created with scale_mri()
Parameters
----------
subject_to : str
Name of the scaled MRI subject (the destination mri subject).
src_name : str
Source space name. Can be a spacing parameter (e.g., ``'7'``,
``'ico4'``, ``'oct6'``) or a file name of a source space file relative
to the bem directory; if the file name contains the subject name, it
should be indicated as "{subject}" in ``src_name`` (e.g.,
``"{subject}-my_source_space-src.fif"``).
subject_from : None | str
The subject from which to read the source space. If None, subject_from
is read from subject_to's config file.
scale : None | float | array, shape = (3,)
Scaling factor. Has to be specified if subjects_from is specified,
otherwise it is read from subject_to's config file.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
n_jobs : int
Number of jobs to run in parallel if recomputing distances (only
applies if scale is an array of length 3, and will not use more cores
than there are source spaces).
"""
subjects_dir, subject_from, n_params, scale = _scale_params(subject_to,
subject_from,
scale,
subjects_dir)
# find the source space file names
if src_name.isdigit():
spacing = src_name # spacing in mm
src_pattern = src_fname
else:
match = re.match("(oct|ico)-?(\d+)$", src_name)
if match:
spacing = '-'.join(match.groups())
src_pattern = src_fname
else:
spacing = None
src_pattern = os.path.join(bem_dirname, src_name)
src = src_pattern.format(subjects_dir=subjects_dir, subject=subject_from,
spacing=spacing)
dst = src_pattern.format(subjects_dir=subjects_dir, subject=subject_to,
spacing=spacing)
# prepare scaling parameters
if n_params == 1:
norm_scale = None
elif n_params == 3:
norm_scale = 1. / scale
else:
raise RuntimeError("Invalid n_params entry in MRI cfg file: %s"
% str(n_params))
# read and scale the source space [in m]
sss = read_source_spaces(src)
logger.info("scaling source space %s: %s -> %s", spacing, subject_from,
subject_to)
logger.info("Scale factor: %s", scale)
add_dist = False
for ss in sss:
ss['subject_his_id'] = subject_to
ss['rr'] *= scale
# distances and patch info
if norm_scale is None:
if ss['dist'] is not None:
ss['dist'] *= scale
ss['nearest_dist'] *= scale
ss['dist_limit'] *= scale
else:
nn = ss['nn']
nn *= norm_scale
norm = np.sqrt(np.sum(nn ** 2, 1))
nn /= norm[:, np.newaxis]
if ss['dist'] is not None:
add_dist = True
if add_dist:
logger.info("Recomputing distances, this might take a while")
dist_limit = np.asscalar(sss[0]['dist_limit'])
add_source_space_distances(sss, dist_limit, n_jobs)
write_source_spaces(dst, sss)
| bsd-3-clause |
jseabold/statsmodels | statsmodels/tsa/forecasting/theta.py | 2 | 23110 | r"""
Implementation of the Theta forecasting method of
Assimakopoulos, V., & Nikolopoulos, K. (2000). The theta model: a decomposition
approach to forecasting. International journal of forecasting, 16(4), 521-530.
and updates in
Hyndman, R. J., & Billah, B. (2003). Unmasking the Theta method. International
Journal of Forecasting, 19(2), 287-290.
Fioruci, J. A., Pellegrini, T. R., Louzada, F., & Petropoulos, F. (2015).
The optimized theta method. arXiv preprint arXiv:1503.03529.
"""
from typing import TYPE_CHECKING, Optional, Tuple
import numpy as np
import pandas as pd
from scipy import stats
from statsmodels.iolib.summary import Summary
from statsmodels.iolib.table import SimpleTable
from statsmodels.tools.validation import (
array_like,
bool_like,
float_like,
int_like,
string_like,
)
from statsmodels.tsa.deterministic import DeterministicTerm
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.statespace.exponential_smoothing import (
ExponentialSmoothing,
)
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.stattools import acf
from statsmodels.tsa.tsatools import add_trend, freq_to_period
if TYPE_CHECKING:
import matplotlib.figure
def extend_index(steps: int, index: pd.Index) -> pd.Index:
return DeterministicTerm._extend_index(index, steps)
class ThetaModel:
r"""
The Theta forecasting model of Assimakopoulos and Nikolopoulos (2000)
Parameters
----------
endog : array_like, 1d
The data to forecast.
period : int, default None
The period of the data that is used in the seasonality test and
adjustment. If None then the period is determined from y's index,
if available.
deseasonalize : bool, default True
A flag indicating whether the deseasonalize the data. If True and
use_test is True, the data is only deseasonalized if the null of no
seasonal component is rejected.
use_test : bool, default True
A flag indicating whether test the period-th autocorrelation. If this
test rejects using a size of 10%, then decomposition is used. Set to
False to skip the test.
method : {"auto", "additive", "multiplicative"}, default "auto"
The model used for the seasonal decomposition. "auto" uses a
multiplicative if y is non-negative and all estimated seasonal
components are positive. If either of these conditions is False,
then it uses an additive decomposition.
difference : bool, default False
A flag indicating to difference the data before testing for
seasonality.
See Also
--------
statsmodels.tsa.statespace.exponential_smoothing.ExponentialSmoothing
Exponential smoothing parameter estimation and forecasting
statsmodels.tsa.statespace.sarimax.SARIMAX
Seasonal ARIMA parameter estimation and forecasting
Notes
-----
The Theta model forecasts the future as a weighted combination of two
Theta lines. This class supports combinations of models with two
thetas: 0 and a user-specified choice (default 2). The forecasts are
then
.. math::
\hat{X}_{T+h|T} = \frac{\theta-1}{\theta} b_0
\left[h - 1 + \frac{1}{\alpha}
- \frac{(1-\alpha)^T}{\alpha} \right]
+ \tilde{X}_{T+h|T}
where :math:`\tilde{X}_{T+h|T}` is the SES forecast of the endogenous
variable using the parameter :math:`\alpha`. :math:`b_0` is the
slope of a time trend line fitted to X using the terms 0, 1, ..., T-1.
The model is estimated in steps:
1. Test for seasonality
2. Deseasonalize if seasonality detected
3. Estimate :math:`\alpha` by fitting a SES model to the data and
:math:`b_0` by OLS.
4. Forecast the series
5. Reseasonalize if the data was deseasonalized.
The seasonality test examines where the autocorrelation at the
seasonal period is different from zero. The seasonality is then
removed using a seasonal decomposition with a multiplicative trend.
If the seasonality estimate is non-positive then an additive trend
is used instead. The default deseasonalizing method can be changed
using the options.
References
----------
.. [1] Assimakopoulos, V., & Nikolopoulos, K. (2000). The theta model: a
decomposition approach to forecasting. International Journal of
Forecasting, 16(4), 521-530.
.. [2] Hyndman, R. J., & Billah, B. (2003). Unmasking the Theta method.
International Journal of Forecasting, 19(2), 287-290.
.. [3] Fioruci, J. A., Pellegrini, T. R., Louzada, F., & Petropoulos, F.
(2015). The optimized theta method. arXiv preprint arXiv:1503.03529.
"""
def __init__(
self,
endog,
*,
period: Optional[int] = None,
deseasonalize: bool = True,
use_test: bool = True,
method: str = "auto",
difference: bool = False
) -> None:
self._y = array_like(endog, "endog", ndim=1)
if isinstance(endog, pd.DataFrame):
self.endog_orig = endog.iloc[:, 0]
else:
self.endog_orig = endog
self._period = int_like(period, "period", optional=True)
self._deseasonalize = bool_like(deseasonalize, "deseasonalize")
self._use_test = (
bool_like(use_test, "use_test") and self._deseasonalize
)
self._diff = bool_like(difference, "difference")
self._method = string_like(
method,
"model",
options=("auto", "additive", "multiplicative", "mul", "add"),
)
if self._period is None and self._deseasonalize:
idx = getattr(endog, "index", None)
pfreq = None
if idx is not None:
pfreq = getattr(idx, "freq", None)
if pfreq is None:
pfreq = getattr(idx, "inferred_freq", None)
if pfreq is not None:
self._period = freq_to_period(pfreq)
else:
raise ValueError(
"You must specify a period or endog must be a "
"pandas object with a DatetimeIndex with "
"a freq not set to None"
)
self._has_seasonality = self._deseasonalize
def _test_seasonality(self) -> None:
y = self._y
if self._diff:
y = np.diff(y)
rho = acf(y, nlags=self._period, fft=True)
nobs = y.shape[0]
stat = nobs * rho[-1] ** 2 / np.sum(rho[:-1] ** 2)
# CV is 10% from a chi2(1), 1.645**2
self._has_seasonality = stat > 2.705543454095404
def _deseasonalize_data(self) -> Tuple[np.ndarray, np.ndarray]:
y = self._y
if not self._has_seasonality:
return self._y, np.empty(0)
self._method = (
"mul" if self._method == "auto" and self._y.min() > 0 else "add"
)
res = seasonal_decompose(y, model=self._method, period=self._period)
if res.seasonal.min() <= 0:
self._method = "add"
res = seasonal_decompose(y, model="add", period=self._period)
return y - res.seasonal, res.seasonal[: self._period]
else:
return y / res.seasonal, res.seasonal[: self._period]
def fit(
self, use_mle: bool = False, disp: bool = False
) -> "ThetaModelResults":
r"""
Estimate model parameters.
Parameters
----------
use_mle : bool, default False
Estimate the parameters using MLE by fitting an ARIMA(0,1,1) with
a drift. If False (the default), estimates parameters using OLS
of a constant and a time-trend and by fitting a SES to the model
data.
disp : bool, default True
Display iterative output from fitting the model.
Notes
-----
When using MLE, the parameters are estimated from the ARIMA(0,1,1)
.. math::
X_t = X_{t-1} + b_0 + (\alpha-1)\epsilon_{t-1} + \epsilon_t
When estimating the model using 2-step estimation, the model
parameters are estimated using the OLS regression
.. math::
X_t = a_0 + b_0 (t-1) + \eta_t
and the SES
.. math::
\tilde{X}_{t+1} = \alpha X_{t} + (1-\alpha)\tilde{X}_{t}
Returns
-------
ThetaModelResult
Model results and forecasting
"""
if self._deseasonalize and self._use_test:
self._test_seasonality()
y, seasonal = self._deseasonalize_data()
if use_mle:
mod = SARIMAX(y, order=(0, 1, 1), trend="c")
res = mod.fit(disp=disp)
params = np.asarray(res.params)
alpha = params[1] + 1
if alpha > 1:
alpha = 0.9998
res = mod.fit_constrained({"ma.L1": alpha - 1})
params = np.asarray(res.params)
b0 = params[0]
sigma2 = params[-1]
one_step = res.forecast(1) - b0
else:
ct = add_trend(y, "ct", prepend=True)[:, :2]
ct[:, 1] -= 1
_, b0 = np.linalg.lstsq(ct, y, rcond=None)[0]
res = ExponentialSmoothing(
y, initial_level=y[0], initialization_method="known"
).fit(disp=disp)
alpha = res.params[0]
sigma2 = None
one_step = res.forecast(1)
return ThetaModelResults(
b0, alpha, sigma2, one_step, seasonal, use_mle, self
)
@property
def deseasonalize(self) -> bool:
"""Whether to deseasonalize the data"""
return self._deseasonalize
@property
def period(self) -> int:
"""The period of the seasonality"""
return self._period
@property
def use_test(self) -> bool:
"""Whether to test the data for seasonality"""
return self._use_test
@property
def difference(self) -> bool:
"""Whether the data is differenced in the seasonality test"""
return self._diff
@property
def method(self) -> str:
"""The method used to deseasonalize the data"""
return self._method
class ThetaModelResults:
"""
Results class from estimated Theta Models.
Parameters
----------
b0 : float
The estimated trend slope.
alpha : float
The estimated SES parameter.
sigma2 : float
The estimated residual variance from the SES/IMA model.
one_step : float
The one-step forecast from the SES.
seasonal : ndarray
An array of estimated seasonal terms.
use_mle : bool
A flag indicating that the parameters were estimated using MLE.
model : ThetaModel
The model used to produce the results.
"""
def __init__(
self,
b0: float,
alpha: float,
sigma2: Optional[float],
one_step: float,
seasonal: np.ndarray,
use_mle: bool,
model: ThetaModel,
) -> None:
self._b0 = b0
self._alpha = alpha
self._sigma2 = sigma2
self._one_step = one_step
self._nobs = model.endog_orig.shape[0]
self._model = model
self._seasonal = seasonal
self._use_mle = use_mle
@property
def params(self) -> pd.Series:
"""The forecasting model parameters"""
return pd.Series([self._b0, self._alpha], index=["b0", "alpha"])
@property
def sigma2(self) -> float:
"""The estimated residual variance"""
if self._sigma2 is None:
mod = SARIMAX(self.model._y, order=(0, 1, 1), trend="c")
res = mod.fit(disp=False)
self._sigma2 = np.asarray(res.params)[-1]
assert self._sigma2 is not None
return self._sigma2
@property
def model(self) -> ThetaModel:
"""The model used to produce the results"""
return self._model
def forecast(self, steps: int = 1, theta: float = 2) -> pd.Series:
r"""
Forecast the model for a given theta
Parameters
----------
steps : int
The number of steps ahead to compute the forecast components.
theta : float
The theta value to use when computing the weight to combine
the trend and the SES forecasts.
Returns
-------
Series
A Series containing the forecasts
Notes
-----
The forecast is computed as
.. math::
\hat{X}_{T+h|T} = \frac{\theta-1}{\theta} b_0
\left[h - 1 + \frac{1}{\alpha}
- \frac{(1-\alpha)^T}{\alpha} \right]
+ \tilde{X}_{T+h|T}
where :math:`\tilde{X}_{T+h|T}` is the SES forecast of the endogenous
variable using the parameter :math:`\alpha`. :math:`b_0` is the
slope of a time trend line fitted to X using the terms 0, 1, ..., T-1.
This expression follows from [1]_ and [2]_ when the combination
weights are restricted to be (theta-1)/theta and 1/theta. This nests
the original implementation when theta=2 and the two weights are both
1/2.
References
----------
.. [1] Hyndman, R. J., & Billah, B. (2003). Unmasking the Theta method.
International Journal of Forecasting, 19(2), 287-290.
.. [2] Fioruci, J. A., Pellegrini, T. R., Louzada, F., & Petropoulos,
F. (2015). The optimized theta method. arXiv preprint
arXiv:1503.03529.
"""
steps = int_like(steps, "steps")
if steps < 1:
raise ValueError("steps must be a positive integer")
theta = float_like(theta, "theta")
if theta < 1:
raise ValueError("theta must be a float >= 1")
thresh = 4.0 / np.finfo(np.double).eps
trend_weight = (theta - 1) / theta if theta < thresh else 1.0
comp = self.forecast_components(steps=steps)
fcast = trend_weight * comp.trend + np.asarray(comp.ses)
# Re-seasonalize if needed
if self.model.deseasonalize:
seasonal = np.asarray(comp.seasonal)
if self.model.method.startswith("mul"):
fcast *= seasonal
else:
fcast += seasonal
fcast.name = "forecast"
return fcast
def forecast_components(self, steps: int = 1) -> pd.DataFrame:
r"""
Compute the three components of the Theta model forecast
Parameters
----------
steps : int
The number of steps ahead to compute the forecast components.
Returns
-------
DataFrame
A DataFrame with three columns: trend, ses and seasonal containing
the forecast values of each of the three components.
Notes
-----
For a given value of :math:`\theta`, the deseasonalized forecast is
`fcast = w * trend + ses` where :math:`w = \frac{theta - 1}{theta}`.
The reseasonalized forecasts are then `seasonal * fcast` if the
seasonality is multiplicative or `seasonal + fcast` if the seasonality
is additive.
"""
steps = int_like(steps, "steps")
if steps < 1:
raise ValueError("steps must be a positive integer")
alpha = self._alpha
b0 = self._b0
nobs = self._nobs
h = np.arange(1, steps + 1, dtype=np.float64) - 1
if alpha > 0:
h += 1 / alpha - ((1 - alpha) ** nobs / alpha)
trend = b0 * h
ses = self._one_step * np.ones(steps)
if self.model.method.startswith("add"):
season = np.zeros(steps)
else:
season = np.ones(steps)
# Re-seasonalize
if self.model.deseasonalize:
seasonal = self._seasonal
period = self.model.period
oos_idx = nobs + np.arange(1, steps + 1)
seasonal_locs = oos_idx % period
if seasonal.shape[0]:
season[:] = seasonal[seasonal_locs]
index = getattr(self.model.endog_orig, "index", None)
if index is None:
index = pd.RangeIndex(0, self.model.endog_orig.shape[0])
index = extend_index(steps, index)
df = pd.DataFrame(
{"trend": trend, "ses": ses, "seasonal": season}, index=index
)
return df
def summary(self) -> Summary:
"""
Summarize the model
Returns
-------
Summary
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
model = self.model
smry = Summary()
model_name = type(model).__name__
title = model_name + " Results"
method = "MLE" if self._use_mle else "OLS/SES"
is_series = isinstance(model.endog_orig, pd.Series)
index = getattr(model.endog_orig, "index", None)
if is_series and isinstance(index, (pd.DatetimeIndex, pd.PeriodIndex)):
sample = [index[0].strftime("%m-%d-%Y")]
sample += ["- " + index[-1].strftime("%m-%d-%Y")]
else:
sample = [str(0), str(model.endog_orig.shape[0])]
dep_name = getattr(model.endog_orig, "name", "endog") or "endog"
top_left = [
("Dep. Variable:", [dep_name]),
("Method:", [method]),
("Date:", None),
("Time:", None),
("Sample:", [sample[0]]),
("", [sample[1]]),
]
method = (
"Multiplicative" if model.method.startswith("mul") else "Additive"
)
top_right = [
("No. Observations:", [str(self._nobs)]),
("Deseasonalized:", [str(model.deseasonalize)]),
]
if model.deseasonalize:
top_right.extend(
[
("Deseas. Method:", [method]),
("Period:", [str(model.period)]),
("", [""]),
("", [""]),
]
)
else:
top_right.extend([("", [""])] * 4)
smry.add_table_2cols(
self, gleft=top_left, gright=top_right, title=title
)
table_fmt = {"data_fmts": ["%s", "%#0.4g"], "data_aligns": "r"}
data = np.asarray(self.params)[:, None]
st = SimpleTable(
data,
["Parameters", "Estimate"],
list(self.params.index),
title="Parameter Estimates",
txt_fmt=table_fmt,
)
smry.tables.append(st)
return smry
def prediction_intervals(
self, steps: int = 1, theta: float = 2, alpha: float = 0.05
) -> pd.DataFrame:
r"""
Parameters
----------
steps : int, default 1
The number of steps ahead to compute the forecast components.
theta : float, default 2
The theta value to use when computing the weight to combine
the trend and the SES forecasts.
alpha : float, default 0.05
Significance level for the confidence intervals.
Returns
-------
DataFrame
DataFrame with columns lower and upper
Notes
-----
The variance of the h-step forecast is assumed to follow from the
integrated Moving Average structure of the Theta model, and so is
:math:`\sigma^2(1 + (h-1)(1 + (\alpha-1)^2)`. The prediction interval
assumes that innovations are normally distributed.
"""
model_alpha = self.params[1]
sigma2_h = (
1 + np.arange(steps) * (1 + (model_alpha - 1) ** 2)
) * self.sigma2
sigma_h = np.sqrt(sigma2_h)
quantile = stats.norm.ppf(alpha / 2)
predictions = self.forecast(steps, theta)
return pd.DataFrame(
{
"lower": predictions + sigma_h * quantile,
"upper": predictions + sigma_h * -quantile,
}
)
def plot_predict(
self,
steps: int = 1,
theta: float = 2,
alpha: Optional[float] = 0.05,
in_sample: bool = False,
fig: Optional["matplotlib.figure.Figure"] = None,
figsize: Tuple[float, float] = None,
) -> "matplotlib.figure.Figure":
r"""
Plot forecasts, prediction intervals and in-sample values
Parameters
----------
steps : int, default 1
The number of steps ahead to compute the forecast components.
theta : float, default 2
The theta value to use when computing the weight to combine
the trend and the SES forecasts.
alpha : {float, None}, default 0.05
The tail probability not covered by the confidence interval. Must
be in (0, 1). Confidence interval is constructed assuming normally
distributed shocks. If None, figure will not show the confidence
interval.
in_sample : bool, default False
Flag indicating whether to include the in-sample period in the
plot.
fig : Figure, default None
An existing figure handle. If not provided, a new figure is
created.
figsize: tuple[float, float], default None
Tuple containing the figure size.
Returns
-------
Figure
Figure handle containing the plot.
Notes
-----
The variance of the h-step forecast is assumed to follow from the
integrated Moving Average structure of the Theta model, and so is
:math:`\sigma^2(\alpha^2 + (h-1))`. The prediction interval assumes
that innovations are normally distributed.
"""
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
_import_mpl()
fig = create_mpl_fig(fig, figsize)
assert fig is not None
predictions = self.forecast(steps, theta)
pred_index = predictions.index
ax = fig.add_subplot(111)
nobs = self.model.endog_orig.shape[0]
index = pd.Int64Index(np.arange(nobs))
if in_sample:
if isinstance(self.model.endog_orig, pd.Series):
index = self.model.endog_orig.index
ax.plot(index, self.model.endog_orig)
ax.plot(pred_index, predictions)
if alpha is not None:
pi = self.prediction_intervals(steps, theta, alpha)
label = "{0:.0%} confidence interval".format(1 - alpha)
ax.fill_between(
pred_index,
pi["lower"],
pi["upper"],
color="gray",
alpha=0.5,
label=label,
)
ax.legend(loc="best", frameon=False)
fig.tight_layout(pad=1.0)
return fig
| bsd-3-clause |
joshloyal/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 30 | 19128 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
print('accuracies:', accuracies)
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
@ignore_warnings
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points,
random_state=42).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slightly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
ignore_warnings(lshf.fit)(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consistent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
ignore_warnings(lshf.partial_fit)(X)
assert_array_equal(X, lshf._fit_X)
ignore_warnings(lshf.fit)(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
ignore_warnings(lshf.partial_fit)(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
ignore_warnings(lshf.fit)(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
ignore_warnings(lshf.fit)(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
rwblair/cogat | cognitive/apps/atlas/utils.py | 1 | 9276 | from datetime import datetime, date
from urllib.parse import quote
from urllib.request import urlopen
from lxml import etree
from py2neo import Path, Node, Relationship
import pandas
from django.utils.crypto import get_random_string
from cognitive.settings import graph
def generate_uid(node_type):
'''generte_uid will generate a unique identifier for a new node, with first three letters
dependent on the term type
:param node_type: one of concept, battery, condition, etc.
'''
nodetypes = {
"concept": "trm",
"task": "tsk",
"theory": "thc",
"contrast": "cnt",
"battery": "tco",
"disorder": "dso",
"collection": "tco",
"condition": "con",
"implementation": "imp",
"external_dataset": "dst",
"indicator": "ind",
"citation": "cit",
"assertion": "ass",
"concept_class": "ctp",
"disambiguation": "disam",
"trait": "trt",
"behavior": "bvr"
}
# generate new node uid that doesn't exist
result = " "
while len(result) > 0:
suffix = get_random_string(13)
uid = "{}_{}".format(nodetypes.get(node_type, None), suffix)
query = """start n=node(*)
match n
where n.id = '%s'
return n.id""" %(uid)
result = do_query(query, fields=["n.id"])
return uid
def get_relation_nodetype(relation):
'''get_relation_nodetype will return the node type for a particular relation
(eg --RELATION-->[NODE]
'''
node_types = {"HASCONDITION":"condition",
"MEASUREDBY":"contrast",
"DERIVEDFROM":"task",
"ASSERTS":"concept",
"HASCONTRAST":"contrast",
"KINDOF":"concept",
"PARTOF":"concept"}
if relation in node_types:
return node_types[relation]
return None
def add_update(field, value, updates=None):
'''add_update will update the updates dictionary only given that a value is defined (not None or '') for a field
:param field: the name of the field to update
:param value: the value to update with
:param updates: the dictionary to update (optional)
'''
if updates == None:
updates = dict()
if value not in ["", None]:
updates[field] = value
return updates
def clean_html(html, replacements=None):
'''clean_html will replace newlines with <br> for rendering, along with \r characters
:param html: the html to clean
:param replacements: additional list of paired lists eg [["string","replace"]...] (optional)
'''
replace_sets = [["\n", "<br>"], ["\r", ""]]
if replacements != None:
replace_sets = replace_sets + replacements
for replace_set in replace_sets:
text = replace_set[0]
replacement = replace_set[1]
html = html.replace(text, replacement)
return html
def update_lookup(lookup, key, entry):
'''update_lookup will update a lookup dictionary with an entry. If the key exists, the entry is appended to
the existing list. if not, it is added.
:param lookup: dictionary to update
:param entry: the entry to add to the list
'''
if not isinstance(entry, list):
entry = [entry]
if key in lookup:
lookup[key] = lookup[key] + entry
else:
lookup[key] = entry
return lookup
def merge_cypher(cypher1, cypher2):
'''merge_cypher will combine two cypher objects, meaning dictionaries with lists of nodes and links
it is assumed that there are not repeated nodes
:param cypher1: the first cypher dict
:param cypher2: the second cypher dict
'''
cypher = dict()
cypher["nodes"] = cypher1["nodes"] + cypher2["nodes"]
cypher["links"] = cypher1["links"] + cypher2["links"]
return cypher
def color_by_relation(relation_name):
'''color_by_relation returns node color based on relation type
:param relation_name: the name of the relation to look up color for
'''
colors = {"ASSERTS":"#3C7263", # task --asserts--> concept
"MEASUREDBY": "#D89013", # concept --measuredby--> contrast
"DERIVEDFROM": "#63506D", # task --derivedfrom--> task
"HASCONDITION":"#BC1079", # contrast --hascondition--> condition
"HASCONTRAST": "#D89013", # condition --hascontrast--> contrast
"PARTOF":"#3C7263", # concept
"KINDOF":"#3C7263"} # concept
if relation_name in colors:
return colors[relation_name]
return "#FFFFFF"
# Query helper functions ######################################################
def do_query(query, fields, output_format="dict", drop_duplicates=True):
''' do_query will return the result of a cypher query in the format
specified (default is dict)
:param query: string of cypher query
:param output_format: desired output format. Default is "dict"
'''
if isinstance(fields, str):
fields = [fields]
result = graph.cypher.execute(query)
df = pandas.DataFrame(result.records, columns=result.columns)
df.columns = fields
if drop_duplicates == True:
df = df.drop_duplicates()
if output_format == "df":
return df
elif output_format == "list":
return df.values.tolist()
elif output_format == "dict":
return df.to_dict(orient="records")
def do_transaction(tx=None, query=None, params=None):
''' do_transaction will return the result of a cypher transaction in the
format specified (default is dict). If a transaction object is not
supplied, query must be defined, and the function will call
get_transactions first. If tx is defined and query is also defined,
the query will be added to the transaction before running it.
:param tx: string of cypher query (optional) if provided, will first call
get_transactions to
:param query: string of cypher query (optional) if provided, will first
call get_transactions to
:param params: a list of dictionaries, each dictionary with keys as values
to sub in the query, and values as the thing to substitute.
Eg: [{"A":name,"B":classification}]
'''
if tx is None and query is None:
print("Please define either transaction or query.")
return None
if query != None:
tx = get_transactions(query, tx=tx, params=params)
# Return as pandas data frame
results = tx.commit()
if not results or sum(len(res) for res in results) == 0:
return None
# Return as pandas Data Frame
column_names = [x.split(".")[-1] for x in results[0].columns]
df = pandas.DataFrame(columns=column_names)
for r in range(len(results)):
df.loc[r] = [x for x in results[r].one]
return df
def get_transactions(query, tx=None, params=None):
'''get_transactions will append new transactions to a transaction object, or return a new transaction if one does not exist.
:param query: string of cypher query
:param tx: a transaction object (optional)
:param params: a list of dictionaries, each dictionary with keys as values to sub in the query, and values as the thing to substitute. Eg: [{"A":name,"B":classification}]
'''
# Combine queries into transaction
if tx is None:
tx = graph.cypher.begin()
if params:
for param in params:
tx.append(query, param)
else:
tx.append(query)
return tx
class InvalidDoiException(Exception):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_paper_properties(doi):
xmlurl = 'http://doi.crossref.org/servlet/query'
xmlpath = xmlurl + '[email protected]&format=unixref&id=' + quote(doi)
print(xmlpath)
xml_str = urlopen(xmlpath).read()
doc = etree.fromstring(xml_str)
if len(doc.getchildren()) == 0 or len(doc.findall('.//crossref/error')) > 0:
raise InvalidDoiException("DOI %s was not found" % doi)
journal_name = doc.findall(".//journal/journal_metadata/full_title")[0].text
title = doc.findall('.//title')[0].text
authors = [author.findall('given_name')[0].text + " " + author.findall('surname')[0].text
for author in doc.findall('.//contributors/person_name')]
if len(authors) > 1:
authors = ", ".join(authors[:-1]) + " and " + authors[-1]
else:
authors = authors[0]
url = doc.findall('.//doi_data/resource')[0].text
date_node = doc.findall('.//publication_date')[0]
if len(date_node.findall('day')) > 0:
publication_date = date(int(date_node.findall('year')[0].text),
int(date_node.findall('month')[0].text),
int(date_node.findall('day')[0].text))
elif len(date_node.findall('month')) > 0:
publication_date = date(int(date_node.findall('year')[0].text),
int(date_node.findall('month')[0].text),
1)
else:
publication_date = date(int(date_node.findall('year')[0].text),
1,
1)
return title, authors, url, publication_date, journal_name
| mit |
alfonsokim/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/pyplot.py | 69 | 77521 | import sys
import matplotlib
from matplotlib import _pylab_helpers, interactive
from matplotlib.cbook import dedent, silent_list, is_string_like, is_numlike
from matplotlib.figure import Figure, figaspect
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.image import imread as _imread
from matplotlib import rcParams, rcParamsDefault, get_backend
from matplotlib.rcsetup import interactive_bk as _interactive_bk
from matplotlib.artist import getp, get, Artist
from matplotlib.artist import setp as _setp
from matplotlib.axes import Axes
from matplotlib.projections import PolarAxes
from matplotlib import mlab # for csv2rec in plotfile
from matplotlib.scale import get_scale_docs, get_scale_names
from matplotlib import cm
from matplotlib.cm import get_cmap
# We may not need the following imports here:
from matplotlib.colors import Normalize, normalize # latter for backwards compat.
from matplotlib.lines import Line2D
from matplotlib.text import Text, Annotation
from matplotlib.patches import Polygon, Rectangle, Circle, Arrow
from matplotlib.widgets import SubplotTool, Button, Slider, Widget
from ticker import TickHelper, Formatter, FixedFormatter, NullFormatter,\
FuncFormatter, FormatStrFormatter, ScalarFormatter,\
LogFormatter, LogFormatterExponent, LogFormatterMathtext,\
Locator, IndexLocator, FixedLocator, NullLocator,\
LinearLocator, LogLocator, AutoLocator, MultipleLocator,\
MaxNLocator
## Backend detection ##
def _backend_selection():
""" If rcParams['backend_fallback'] is true, check to see if the
current backend is compatible with the current running event
loop, and if not switches to a compatible one.
"""
backend = rcParams['backend']
if not rcParams['backend_fallback'] or \
backend not in _interactive_bk:
return
is_agg_backend = rcParams['backend'].endswith('Agg')
if 'wx' in sys.modules and not backend in ('WX', 'WXAgg'):
import wx
if wx.App.IsMainLoopRunning():
rcParams['backend'] = 'wx' + 'Agg' * is_agg_backend
elif 'qt' in sys.modules and not backend == 'QtAgg':
import qt
if not qt.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qtAgg'
elif 'PyQt4.QtCore' in sys.modules and not backend == 'Qt4Agg':
import PyQt4.QtGui
if not PyQt4.QtGui.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qt4Agg'
elif 'gtk' in sys.modules and not backend in ('GTK', 'GTKAgg',
'GTKCairo'):
import gobject
if gobject.MainLoop().is_running():
rcParams['backend'] = 'gtk' + 'Agg' * is_agg_backend
elif 'Tkinter' in sys.modules and not backend == 'TkAgg':
#import Tkinter
pass #what if anything do we need to do for tkinter?
_backend_selection()
## Global ##
from matplotlib.backends import pylab_setup
new_figure_manager, draw_if_interactive, show = pylab_setup()
def findobj(o=None, match=None):
if o is None:
o = gcf()
return o.findobj(match)
findobj.__doc__ = Artist.findobj.__doc__
def switch_backend(newbackend):
"""
Switch the default backend to newbackend. This feature is
**experimental**, and is only expected to work switching to an
image backend. Eg, if you have a bunch of PostScript scripts that
you want to run from an interactive ipython session, you may want
to switch to the PS backend before running them to avoid having a
bunch of GUI windows popup. If you try to interactively switch
from one GUI backend to another, you will explode.
Calling this command will close all open windows.
"""
close('all')
global new_figure_manager, draw_if_interactive, show
matplotlib.use(newbackend, warn=False)
reload(matplotlib.backends)
from matplotlib.backends import pylab_setup
new_figure_manager, draw_if_interactive, show = pylab_setup()
def isinteractive():
"""
Return the interactive status
"""
return matplotlib.is_interactive()
def ioff():
'Turn interactive mode off.'
matplotlib.interactive(False)
def ion():
'Turn interactive mode on.'
matplotlib.interactive(True)
def rc(*args, **kwargs):
matplotlib.rc(*args, **kwargs)
if matplotlib.rc.__doc__ is not None:
rc.__doc__ = dedent(matplotlib.rc.__doc__)
def rcdefaults():
matplotlib.rcdefaults()
draw_if_interactive()
if matplotlib.rcdefaults.__doc__ is not None:
rcdefaults.__doc__ = dedent(matplotlib.rcdefaults.__doc__)
# The current "image" (ScalarMappable) is tracked here on a
# per-pylab-session basis:
def gci():
"""
Get the current :class:`~matplotlib.cm.ScalarMappable` instance
(image or patch collection), or *None* if no images or patch
collections have been defined. The commands
:func:`~matplotlib.pyplot.imshow` and
:func:`~matplotlib.pyplot.figimage` create
:class:`~matplotlib.image.Image` instances, and the commands
:func:`~matplotlib.pyplot.pcolor` and
:func:`~matplotlib.pyplot.scatter` create
:class:`~matplotlib.collections.Collection` instances.
"""
return gci._current
gci._current = None
def sci(im):
"""
Set the current image (target of colormap commands like
:func:`~matplotlib.pyplot.jet`, :func:`~matplotlib.pyplot.hot` or
:func:`~matplotlib.pyplot.clim`).
"""
gci._current = im
## Any Artist ##
# (getp is simply imported)
def setp(*args, **kwargs):
ret = _setp(*args, **kwargs)
draw_if_interactive()
return ret
if _setp.__doc__ is not None:
setp.__doc__ = _setp.__doc__
## Figures ##
def figure(num=None, # autoincrement if None, else integer from 1-N
figsize = None, # defaults to rc figure.figsize
dpi = None, # defaults to rc figure.dpi
facecolor = None, # defaults to rc figure.facecolor
edgecolor = None, # defaults to rc figure.edgecolor
frameon = True,
FigureClass = Figure,
**kwargs
):
"""
call signature::
figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
Create a new figure and return a :class:`matplotlib.figure.Figure`
instance. If *num* = *None*, the figure number will be incremented and
a new figure will be created. The returned figure objects have a
*number* attribute holding this number.
If *num* is an integer, and ``figure(num)`` already exists, make it
active and return the handle to it. If ``figure(num)`` does not exist
it will be created. Numbering starts at 1, matlab style::
figure(1)
If you are creating many figures, make sure you explicitly call "close"
on the figures you are not using, because this will enable pylab
to properly clean up the memory.
Optional keyword arguments:
========= =======================================================
Keyword Description
========= =======================================================
figsize width x height in inches; defaults to rc figure.figsize
dpi resolution; defaults to rc figure.dpi
facecolor the background color; defaults to rc figure.facecolor
edgecolor the border color; defaults to rc figure.edgecolor
========= =======================================================
rcParams defines the default values, which can be modified in the
matplotlibrc file
*FigureClass* is a :class:`~matplotlib.figure.Figure` or derived
class that will be passed on to :meth:`new_figure_manager` in the
backends which allows you to hook custom Figure classes into the
pylab interface. Additional kwargs will be passed on to your
figure init function.
"""
if figsize is None : figsize = rcParams['figure.figsize']
if dpi is None : dpi = rcParams['figure.dpi']
if facecolor is None : facecolor = rcParams['figure.facecolor']
if edgecolor is None : edgecolor = rcParams['figure.edgecolor']
if num is None:
allnums = [f.num for f in _pylab_helpers.Gcf.get_all_fig_managers()]
if allnums:
num = max(allnums) + 1
else:
num = 1
else:
num = int(num) # crude validation of num argument
figManager = _pylab_helpers.Gcf.get_fig_manager(num)
if figManager is None:
if get_backend().lower() == 'ps': dpi = 72
figManager = new_figure_manager(num, figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
**kwargs)
# make this figure current on button press event
def make_active(event):
_pylab_helpers.Gcf.set_active(figManager)
cid = figManager.canvas.mpl_connect('button_press_event', make_active)
figManager._cidgcf = cid
_pylab_helpers.Gcf.set_active(figManager)
figManager.canvas.figure.number = num
draw_if_interactive()
return figManager.canvas.figure
def gcf():
"Return a handle to the current figure."
figManager = _pylab_helpers.Gcf.get_active()
if figManager is not None:
return figManager.canvas.figure
else:
return figure()
def get_current_fig_manager():
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None:
gcf() # creates an active figure as a side effect
figManager = _pylab_helpers.Gcf.get_active()
return figManager
# note we check for __doc__ is not None since py2exe optimize removes
# the docstrings
def connect(s, func):
return get_current_fig_manager().canvas.mpl_connect(s, func)
if FigureCanvasBase.mpl_connect.__doc__ is not None:
connect.__doc__ = dedent(FigureCanvasBase.mpl_connect.__doc__)
def disconnect(cid):
return get_current_fig_manager().canvas.mpl_disconnect(cid)
if FigureCanvasBase.mpl_disconnect.__doc__ is not None:
disconnect.__doc__ = dedent(FigureCanvasBase.mpl_disconnect.__doc__)
def close(*args):
"""
Close a figure window
``close()`` by itself closes the current figure
``close(num)`` closes figure number *num*
``close(h)`` where *h* is a :class:`Figure` instance, closes that figure
``close('all')`` closes all the figure windows
"""
if len(args)==0:
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None: return
else:
figManager.canvas.mpl_disconnect(figManager._cidgcf)
_pylab_helpers.Gcf.destroy(figManager.num)
elif len(args)==1:
arg = args[0]
if arg=='all':
for manager in _pylab_helpers.Gcf.get_all_fig_managers():
manager.canvas.mpl_disconnect(manager._cidgcf)
_pylab_helpers.Gcf.destroy(manager.num)
elif isinstance(arg, int):
_pylab_helpers.Gcf.destroy(arg)
elif isinstance(arg, Figure):
for manager in _pylab_helpers.Gcf.get_all_fig_managers():
if manager.canvas.figure==arg:
manager.canvas.mpl_disconnect(manager._cidgcf)
_pylab_helpers.Gcf.destroy(manager.num)
else:
raise TypeError('Unrecognized argument type %s to close'%type(arg))
else:
raise TypeError('close takes 0 or 1 arguments')
def clf():
"""
Clear the current figure
"""
gcf().clf()
draw_if_interactive()
def draw():
'redraw the current figure'
get_current_fig_manager().canvas.draw()
def savefig(*args, **kwargs):
fig = gcf()
return fig.savefig(*args, **kwargs)
if Figure.savefig.__doc__ is not None:
savefig.__doc__ = dedent(Figure.savefig.__doc__)
def ginput(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* clicks from the user and return a list of the
coordinates of each click.
If *timeout* is negative, does not timeout.
"""
return gcf().ginput(*args, **kwargs)
if Figure.ginput.__doc__ is not None:
ginput.__doc__ = dedent(Figure.ginput.__doc__)
def waitforbuttonpress(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* key or mouse clicks from the user and
return a list containing True's for keyboard clicks and False's
for mouse clicks.
If *timeout* is negative, does not timeout.
"""
return gcf().waitforbuttonpress(*args, **kwargs)
if Figure.waitforbuttonpress.__doc__ is not None:
waitforbuttonpress.__doc__ = dedent(Figure.waitforbuttonpress.__doc__)
# Putting things in figures
def figtext(*args, **kwargs):
ret = gcf().text(*args, **kwargs)
draw_if_interactive()
return ret
if Figure.text.__doc__ is not None:
figtext.__doc__ = dedent(Figure.text.__doc__)
def suptitle(*args, **kwargs):
ret = gcf().suptitle(*args, **kwargs)
draw_if_interactive()
return ret
if Figure.suptitle.__doc__ is not None:
suptitle.__doc__ = dedent(Figure.suptitle.__doc__)
def figimage(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
ret = gcf().figimage(*args, **kwargs)
draw_if_interactive()
gci._current = ret
return ret
if Figure.figimage.__doc__ is not None:
figimage.__doc__ = dedent(Figure.figimage.__doc__) + """
Addition kwargs: hold = [True|False] overrides default hold state"""
def figlegend(handles, labels, loc, **kwargs):
"""
Place a legend in the figure.
*labels*
a sequence of strings
*handles*
a sequence of :class:`~matplotlib.lines.Line2D` or
:class:`~matplotlib.patches.Patch` instances
*loc*
can be a string or an integer specifying the legend
location
A :class:`matplotlib.legend.Legend` instance is returned.
Example::
figlegend( (line1, line2, line3),
('label1', 'label2', 'label3'),
'upper right' )
.. seealso::
:func:`~matplotlib.pyplot.legend`:
For information about the location codes
"""
l = gcf().legend(handles, labels, loc, **kwargs)
draw_if_interactive()
return l
## Figure and Axes hybrid ##
def hold(b=None):
"""
Set the hold state. If *b* is None (default), toggle the
hold state, else set the hold state to boolean value *b*::
hold() # toggle hold
hold(True) # hold is on
hold(False) # hold is off
When *hold* is *True*, subsequent plot commands will be added to
the current axes. When *hold* is *False*, the current axes and
figure will be cleared on the next plot command.
"""
fig = gcf()
ax = fig.gca()
fig.hold(b)
ax.hold(b)
# b=None toggles the hold state, so let's get get the current hold
# state; but should pyplot hold toggle the rc setting - me thinks
# not
b = ax.ishold()
rc('axes', hold=b)
def ishold():
"""
Return the hold status of the current axes
"""
return gca().ishold()
def over(func, *args, **kwargs):
"""
over calls::
func(*args, **kwargs)
with ``hold(True)`` and then restores the hold state.
"""
h = ishold()
hold(True)
func(*args, **kwargs)
hold(h)
## Axes ##
def axes(*args, **kwargs):
"""
Add an axes at position rect specified by:
- ``axes()`` by itself creates a default full ``subplot(111)`` window axis.
- ``axes(rect, axisbg='w')`` where *rect* = [left, bottom, width,
height] in normalized (0, 1) units. *axisbg* is the background
color for the axis, default white.
- ``axes(h)`` where *h* is an axes instance makes *h* the current
axis. An :class:`~matplotlib.axes.Axes` instance is returned.
======= ============ ================================================
kwarg Accepts Desctiption
======= ============ ================================================
axisbg color the axes background color
frameon [True|False] display the frame?
sharex otherax current axes shares xaxis attribute with otherax
sharey otherax current axes shares yaxis attribute with otherax
polar [True|False] use a polar axes?
======= ============ ================================================
Examples:
* :file:`examples/pylab_examples/axes_demo.py` places custom axes.
* :file:`examples/pylab_examples/shared_axis_demo.py` uses
*sharex* and *sharey*.
"""
nargs = len(args)
if len(args)==0: return subplot(111, **kwargs)
if nargs>1:
raise TypeError('Only one non keyword arg to axes allowed')
arg = args[0]
if isinstance(arg, Axes):
a = gcf().sca(arg)
else:
rect = arg
a = gcf().add_axes(rect, **kwargs)
draw_if_interactive()
return a
def delaxes(*args):
"""
``delaxes(ax)``: remove *ax* from the current figure. If *ax*
doesn't exist, an error will be raised.
``delaxes()``: delete the current axes
"""
if not len(args):
ax = gca()
else:
ax = args[0]
ret = gcf().delaxes(ax)
draw_if_interactive()
return ret
def gca(**kwargs):
"""
Return the current axis instance. This can be used to control
axis properties either using set or the
:class:`~matplotlib.axes.Axes` methods, for example, setting the
xaxis range::
plot(t,s)
set(gca(), 'xlim', [0,10])
or::
plot(t,s)
a = gca()
a.set_xlim([0,10])
"""
ax = gcf().gca(**kwargs)
return ax
# More ways of creating axes:
def subplot(*args, **kwargs):
"""
Create a subplot command, creating axes with::
subplot(numRows, numCols, plotNum)
where *plotNum* = 1 is the first plot number and increasing *plotNums*
fill rows first. max(*plotNum*) == *numRows* * *numCols*
You can leave out the commas if *numRows* <= *numCols* <=
*plotNum* < 10, as in::
subplot(211) # 2 rows, 1 column, first (upper) plot
``subplot(111)`` is the default axis.
New subplots that overlap old will delete the old axes. If you do
not want this behavior, use
:meth:`matplotlib.figure.Figure.add_subplot` or the
:func:`~matplotlib.pyplot.axes` command. Eg.::
from pylab import *
plot([1,2,3]) # implicitly creates subplot(111)
subplot(211) # overlaps, subplot(111) is killed
plot(rand(12), rand(12))
subplot(212, axisbg='y') # creates 2nd subplot with yellow background
Keyword arguments:
*axisbg*:
The background color of the subplot, which can be any valid
color specifier. See :mod:`matplotlib.colors` for more
information.
*polar*:
A boolean flag indicating whether the subplot plot should be
a polar projection. Defaults to False.
*projection*:
A string giving the name of a custom projection to be used
for the subplot. This projection must have been previously
registered. See :func:`matplotlib.projections.register_projection`
.. seealso::
:func:`~matplotlib.pyplot.axes`:
For additional information on :func:`axes` and
:func:`subplot` keyword arguments.
:file:`examples/pylab_examples/polar_scatter.py`
**Example:**
.. plot:: mpl_examples/pylab_examples/subplot_demo.py
"""
fig = gcf()
a = fig.add_subplot(*args, **kwargs)
bbox = a.bbox
byebye = []
for other in fig.axes:
if other==a: continue
if bbox.fully_overlaps(other.bbox):
byebye.append(other)
for ax in byebye: delaxes(ax)
draw_if_interactive()
return a
def twinx(ax=None):
"""
Make a second axes overlay *ax* (or the current axes if *ax* is
*None*) sharing the xaxis. The ticks for *ax2* will be placed on
the right, and the *ax2* instance is returned.
.. seealso::
:file:`examples/api_examples/two_scales.py`
"""
if ax is None:
ax=gca()
ax1 = ax.twinx()
draw_if_interactive()
return ax1
def twiny(ax=None):
"""
Make a second axes overlay *ax* (or the current axes if *ax* is
*None*) sharing the yaxis. The ticks for *ax2* will be placed on
the top, and the *ax2* instance is returned.
"""
if ax is None:
ax=gca()
ax1 = ax.twiny()
draw_if_interactive()
return ax1
def subplots_adjust(*args, **kwargs):
"""
call signature::
subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
Tune the subplot layout via the
:class:`matplotlib.figure.SubplotParams` mechanism. The parameter
meanings (and suggested defaults) are::
left = 0.125 # the left side of the subplots of the figure
right = 0.9 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.2 # the amount of width reserved for blank space between subplots
hspace = 0.2 # the amount of height reserved for white space between subplots
The actual defaults are controlled by the rc file
"""
fig = gcf()
fig.subplots_adjust(*args, **kwargs)
draw_if_interactive()
def subplot_tool(targetfig=None):
"""
Launch a subplot tool window for *targetfig* (default gcf).
A :class:`matplotlib.widgets.SubplotTool` instance is returned.
"""
tbar = rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
rcParams['toolbar'] = 'None'
if targetfig is None:
manager = get_current_fig_manager()
targetfig = manager.canvas.figure
else:
# find the manager for this figure
for manager in _pylab_helpers.Gcf._activeQue:
if manager.canvas.figure==targetfig: break
else: raise RuntimeError('Could not find manager for targetfig')
toolfig = figure(figsize=(6,3))
toolfig.subplots_adjust(top=0.9)
ret = SubplotTool(targetfig, toolfig)
rcParams['toolbar'] = tbar
_pylab_helpers.Gcf.set_active(manager) # restore the current figure
return ret
def box(on=None):
"""
Turn the axes box on or off according to *on*.
If *on* is *None*, toggle state.
"""
ax = gca()
if on is None:
on = not ax.get_frame_on()
ax.set_frame_on(on)
draw_if_interactive()
def title(s, *args, **kwargs):
"""
Set the title of the current axis to *s*.
Default font override is::
override = {'fontsize': 'medium',
'verticalalignment': 'bottom',
'horizontalalignment': 'center'}
.. seealso::
:func:`~matplotlib.pyplot.text`:
for information on how override and the optional args work.
"""
l = gca().set_title(s, *args, **kwargs)
draw_if_interactive()
return l
## Axis ##
def axis(*v, **kwargs):
"""
Set/Get the axis properties:
>>> axis()
returns the current axes limits ``[xmin, xmax, ymin, ymax]``.
>>> axis(v)
sets the min and max of the x and y axes, with
``v = [xmin, xmax, ymin, ymax]``.
>>> axis('off')
turns off the axis lines and labels.
>>> axis('equal')
changes limits of *x* or *y* axis so that equal increments of *x*
and *y* have the same length; a circle is circular.
>>> axis('scaled')
achieves the same result by changing the dimensions of the plot box instead
of the axis data limits.
>>> axis('tight')
changes *x* and *y* axis limits such that all data is shown. If
all data is already shown, it will move it to the center of the
figure without modifying (*xmax* - *xmin*) or (*ymax* -
*ymin*). Note this is slightly different than in matlab.
>>> axis('image')
is 'scaled' with the axis limits equal to the data limits.
>>> axis('auto')
and
>>> axis('normal')
are deprecated. They restore default behavior; axis limits are automatically
scaled to make the data fit comfortably within the plot box.
if ``len(*v)==0``, you can pass in *xmin*, *xmax*, *ymin*, *ymax*
as kwargs selectively to alter just those limits without changing
the others.
The xmin, xmax, ymin, ymax tuple is returned
.. seealso::
:func:`xlim`, :func:`ylim`
"""
ax = gca()
v = ax.axis(*v, **kwargs)
draw_if_interactive()
return v
def xlabel(s, *args, **kwargs):
"""
Set the *x* axis label of the current axis to *s*
Default override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'top',
'horizontalalignment' : 'center'
}
.. seealso::
:func:`~matplotlib.pyplot.text`:
For information on how override and the optional args work
"""
l = gca().set_xlabel(s, *args, **kwargs)
draw_if_interactive()
return l
def ylabel(s, *args, **kwargs):
"""
Set the *y* axis label of the current axis to *s*.
Defaults override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'center',
'horizontalalignment' : 'right',
'rotation'='vertical' : }
.. seealso::
:func:`~matplotlib.pyplot.text`:
For information on how override and the optional args
work.
"""
l = gca().set_ylabel(s, *args, **kwargs)
draw_if_interactive()
return l
def xlim(*args, **kwargs):
"""
Set/Get the xlimits of the current axes::
xmin, xmax = xlim() # return the current xlim
xlim( (xmin, xmax) ) # set the xlim to xmin, xmax
xlim( xmin, xmax ) # set the xlim to xmin, xmax
If you do not specify args, you can pass the xmin and xmax as
kwargs, eg.::
xlim(xmax=3) # adjust the max leaving min unchanged
xlim(xmin=1) # adjust the min leaving max unchanged
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
ret = ax.set_xlim(*args, **kwargs)
draw_if_interactive()
return ret
def ylim(*args, **kwargs):
"""
Set/Get the ylimits of the current axes::
ymin, ymax = ylim() # return the current ylim
ylim( (ymin, ymax) ) # set the ylim to ymin, ymax
ylim( ymin, ymax ) # set the ylim to ymin, ymax
If you do not specify args, you can pass the *ymin* and *ymax* as
kwargs, eg.::
ylim(ymax=3) # adjust the max leaving min unchanged
ylim(ymin=1) # adjust the min leaving max unchanged
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
ret = ax.set_ylim(*args, **kwargs)
draw_if_interactive()
return ret
def xscale(*args, **kwargs):
"""
call signature::
xscale(scale, **kwargs)
Set the scaling for the x-axis: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ret = ax.set_xscale(*args, **kwargs)
draw_if_interactive()
return ret
xscale.__doc__ = dedent(xscale.__doc__) % {
'scale': ' | '.join([repr(_x) for _x in get_scale_names()]),
'scale_docs': get_scale_docs()}
def yscale(*args, **kwargs):
"""
call signature::
xscale(scale, **kwargs)
Set the scaling for the y-axis: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ret = ax.set_yscale(*args, **kwargs)
draw_if_interactive()
return ret
yscale.__doc__ = dedent(yscale.__doc__) % {
'scale': ' | '.join([repr(_x) for _x in get_scale_names()]),
'scale_docs': get_scale_docs()}
def xticks(*args, **kwargs):
"""
Set/Get the xlimits of the current ticklocs and labels::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = xticks()
# set the locations of the xticks
xticks( arange(6) )
# set the locations and labels of the xticks
xticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties.
"""
ax = gca()
if len(args)==0:
locs = ax.get_xticks()
labels = ax.get_xticklabels()
elif len(args)==1:
locs = ax.set_xticks(args[0])
labels = ax.get_xticklabels()
elif len(args)==2:
locs = ax.set_xticks(args[0])
labels = ax.set_xticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to xticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return locs, silent_list('Text xticklabel', labels)
def yticks(*args, **kwargs):
"""
Set/Get the ylimits of the current ticklocs and labels::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = yticks()
# set the locations of the yticks
yticks( arange(6) )
# set the locations and labels of the yticks
yticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties.
"""
ax = gca()
if len(args)==0:
locs = ax.get_yticks()
labels = ax.get_yticklabels()
elif len(args)==1:
locs = ax.set_yticks(args[0])
labels = ax.get_yticklabels()
elif len(args)==2:
locs = ax.set_yticks(args[0])
labels = ax.set_yticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to yticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return ( locs,
silent_list('Text yticklabel', labels)
)
def rgrids(*args, **kwargs):
"""
Set/Get the radial locations of the gridlines and ticklabels on a
polar plot.
call signatures::
lines, labels = rgrids()
lines, labels = rgrids(radii, labels=None, angle=22.5, **kwargs)
When called with no arguments, :func:`rgrid` simply returns the
tuple (*lines*, *labels*), where *lines* is an array of radial
gridlines (:class:`~matplotlib.lines.Line2D` instances) and
*labels* is an array of tick labels
(:class:`~matplotlib.text.Text` instances). When called with
arguments, the labels will appear at the specified radial
distances and angles.
*labels*, if not *None*, is a len(*radii*) list of strings of the
labels to use at each angle.
If *labels* is None, the rformatter will be used
Examples::
# set the locations of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0) )
# set the locations and labels of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0), ('Tom', 'Dick', 'Harry' )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.yaxis.get_ticklines()
labels = ax.yaxis.get_ticklabels()
else:
lines, labels = ax.set_rgrids(*args, **kwargs)
draw_if_interactive()
return ( silent_list('Line2D rgridline', lines),
silent_list('Text rgridlabel', labels) )
def thetagrids(*args, **kwargs):
"""
Set/Get the theta locations of the gridlines and ticklabels.
If no arguments are passed, return a tuple (*lines*, *labels*)
where *lines* is an array of radial gridlines
(:class:`~matplotlib.lines.Line2D` instances) and *labels* is an
array of tick labels (:class:`~matplotlib.text.Text` instances)::
lines, labels = thetagrids()
Otherwise the syntax is::
lines, labels = thetagrids(angles, labels=None, fmt='%d', frac = 1.1)
set the angles at which to place the theta grids (these gridlines
are equal along the theta dimension).
*angles* is in degrees.
*labels*, if not *None*, is a len(angles) list of strings of the
labels to use at each angle.
If *labels* is *None*, the labels will be ``fmt%angle``.
*frac* is the fraction of the polar axes radius at which to place
the label (1 is the edge). Eg. 1.05 is outside the axes and 0.95
is inside the axes.
Return value is a list of tuples (*lines*, *labels*):
- *lines* are :class:`~matplotlib.lines.Line2D` instances
- *labels* are :class:`~matplotlib.text.Text` instances.
Note that on input, the *labels* argument is a list of strings,
and on output it is a list of :class:`~matplotlib.text.Text`
instances.
Examples::
# set the locations of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90) )
# set the locations and labels of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90), ('NE', 'NW', 'SW','SE') )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.xaxis.get_ticklines()
labels = ax.xaxis.get_ticklabels()
else:
lines, labels = ax.set_thetagrids(*args, **kwargs)
draw_if_interactive()
return (silent_list('Line2D thetagridline', lines),
silent_list('Text thetagridlabel', labels)
)
## Plotting Info ##
def plotting():
"""
Plotting commands
=============== =========================================================
Command Description
=============== =========================================================
axes Create a new axes
axis Set or return the current axis limits
bar make a bar chart
boxplot make a box and whiskers chart
cla clear current axes
clabel label a contour plot
clf clear a figure window
close close a figure window
colorbar add a colorbar to the current figure
cohere make a plot of coherence
contour make a contour plot
contourf make a filled contour plot
csd make a plot of cross spectral density
draw force a redraw of the current figure
errorbar make an errorbar graph
figlegend add a legend to the figure
figimage add an image to the figure, w/o resampling
figtext add text in figure coords
figure create or change active figure
fill make filled polygons
fill_between make filled polygons
gca return the current axes
gcf return the current figure
gci get the current image, or None
getp get a handle graphics property
hist make a histogram
hold set the hold state on current axes
legend add a legend to the axes
loglog a log log plot
imread load image file into array
imshow plot image data
matshow display a matrix in a new figure preserving aspect
pcolor make a pseudocolor plot
plot make a line plot
plotfile plot data from a flat file
psd make a plot of power spectral density
quiver make a direction field (arrows) plot
rc control the default params
savefig save the current figure
scatter make a scatter plot
setp set a handle graphics property
semilogx log x axis
semilogy log y axis
show show the figures
specgram a spectrogram plot
stem make a stem plot
subplot make a subplot (numrows, numcols, axesnum)
table add a table to the axes
text add some text at location x,y to the current axes
title add a title to the current axes
xlabel add an xlabel to the current axes
ylabel add a ylabel to the current axes
=============== =========================================================
The following commands will set the default colormap accordingly:
* autumn
* bone
* cool
* copper
* flag
* gray
* hot
* hsv
* jet
* pink
* prism
* spring
* summer
* winter
* spectral
"""
pass
def get_plot_commands(): return ( 'axes', 'axis', 'bar', 'boxplot', 'cla', 'clf',
'close', 'colorbar', 'cohere', 'csd', 'draw', 'errorbar',
'figlegend', 'figtext', 'figimage', 'figure', 'fill', 'gca',
'gcf', 'gci', 'get', 'gray', 'barh', 'jet', 'hist', 'hold', 'imread',
'imshow', 'legend', 'loglog', 'quiver', 'rc', 'pcolor', 'pcolormesh', 'plot', 'psd',
'savefig', 'scatter', 'set', 'semilogx', 'semilogy', 'show',
'specgram', 'stem', 'subplot', 'table', 'text', 'title', 'xlabel',
'ylabel', 'pie', 'polar')
def colors():
"""
This is a do nothing function to provide you with help on how
matplotlib handles colors.
Commands which take color arguments can use several formats to
specify the colors. For the basic builtin colors, you can use a
single letter
===== =======
Alias Color
===== =======
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
===== =======
For a greater range of colors, you have two options. You can
specify the color using an html hex string, as in::
color = '#eeefff'
or you can pass an R,G,B tuple, where each of R,G,B are in the
range [0,1].
You can also use any legal html name for a color, for example::
color = 'red',
color = 'burlywood'
color = 'chartreuse'
The example below creates a subplot with a dark
slate gray background
subplot(111, axisbg=(0.1843, 0.3098, 0.3098))
Here is an example that creates a pale turqoise title::
title('Is this the best color?', color='#afeeee')
"""
pass
def colormaps():
"""
matplotlib provides the following colormaps.
* autumn
* bone
* cool
* copper
* flag
* gray
* hot
* hsv
* jet
* pink
* prism
* spring
* summer
* winter
* spectral
You can set the colormap for an image, pcolor, scatter, etc,
either as a keyword argument::
imshow(X, cmap=cm.hot)
or post-hoc using the corresponding pylab interface function::
imshow(X)
hot()
jet()
In interactive mode, this will update the colormap allowing you to
see which one works best for your data.
"""
pass
## Plotting part 1: manually generated functions and wrappers ##
from matplotlib.colorbar import colorbar_doc
def colorbar(mappable=None, cax=None, ax=None, **kw):
if mappable is None:
mappable = gci()
if ax is None:
ax = gca()
ret = gcf().colorbar(mappable, cax = cax, ax=ax, **kw)
draw_if_interactive()
return ret
colorbar.__doc__ = colorbar_doc
def clim(vmin=None, vmax=None):
"""
Set the color limits of the current image
To apply clim to all axes images do::
clim(0, 0.5)
If either *vmin* or *vmax* is None, the image min/max respectively
will be used for color scaling.
If you want to set the clim of multiple images,
use, for example::
for im in gca().get_images():
im.set_clim(0, 0.05)
"""
im = gci()
if im is None:
raise RuntimeError('You must first define an image, eg with imshow')
im.set_clim(vmin, vmax)
draw_if_interactive()
def imread(*args, **kwargs):
return _imread(*args, **kwargs)
if _imread.__doc__ is not None:
imread.__doc__ = dedent(_imread.__doc__)
def matshow(A, fignum=None, **kw):
"""
Display an array as a matrix in a new figure window.
The origin is set at the upper left hand corner and rows (first
dimension of the array) are displayed horizontally. The aspect
ratio of the figure window is that of the array, unless this would
make an excessively short or narrow figure.
Tick labels for the xaxis are placed on top.
With the exception of fignum, keyword arguments are passed to
:func:`~matplotlib.pyplot.imshow`.
*fignum*: [ None | integer | False ]
By default, :func:`matshow` creates a new figure window with
automatic numbering. If *fignum* is given as an integer, the
created figure will use this figure number. Because of how
:func:`matshow` tries to set the figure aspect ratio to be the
one of the array, if you provide the number of an already
existing figure, strange things may happen.
If *fignum* is *False* or 0, a new figure window will **NOT** be created.
"""
if fignum is False or fignum is 0:
ax = gca()
else:
# Extract actual aspect ratio of array and make appropriately sized figure
fig = figure(fignum, figsize=figaspect(A))
ax = fig.add_axes([0.15, 0.09, 0.775, 0.775])
im = ax.matshow(A, **kw)
gci._current = im
draw_if_interactive()
return im
def polar(*args, **kwargs):
"""
call signature::
polar(theta, r, **kwargs)
Make a polar plot. Multiple *theta*, *r* arguments are supported,
with format strings, as in :func:`~matplotlib.pyplot.plot`.
"""
ax = gca(polar=True)
ret = ax.plot(*args, **kwargs)
draw_if_interactive()
return ret
def plotfile(fname, cols=(0,), plotfuncs=None,
comments='#', skiprows=0, checkrows=5, delimiter=',',
**kwargs):
"""
Plot the data in *fname*
*cols* is a sequence of column identifiers to plot. An identifier
is either an int or a string. If it is an int, it indicates the
column number. If it is a string, it indicates the column header.
matplotlib will make column headers lower case, replace spaces with
underscores, and remove all illegal characters; so ``'Adj Close*'``
will have name ``'adj_close'``.
- If len(*cols*) == 1, only that column will be plotted on the *y* axis.
- If len(*cols*) > 1, the first element will be an identifier for
data for the *x* axis and the remaining elements will be the
column indexes for multiple subplots
*plotfuncs*, if not *None*, is a dictionary mapping identifier to
an :class:`~matplotlib.axes.Axes` plotting function as a string.
Default is 'plot', other choices are 'semilogy', 'fill', 'bar',
etc. You must use the same type of identifier in the *cols*
vector as you use in the *plotfuncs* dictionary, eg., integer
column numbers in both or column names in both.
*comments*, *skiprows*, *checkrows*, and *delimiter* are all passed on to
:func:`matplotlib.pylab.csv2rec` to load the data into a record array.
kwargs are passed on to plotting functions.
Example usage::
# plot the 2nd and 4th column against the 1st in two subplots
plotfile(fname, (0,1,3))
# plot using column names; specify an alternate plot type for volume
plotfile(fname, ('date', 'volume', 'adj_close'), plotfuncs={'volume': 'semilogy'})
"""
fig = figure()
if len(cols)<1:
raise ValueError('must have at least one column of data')
if plotfuncs is None:
plotfuncs = dict()
r = mlab.csv2rec(fname, comments=comments,
skiprows=skiprows, checkrows=checkrows, delimiter=delimiter)
def getname_val(identifier):
'return the name and column data for identifier'
if is_string_like(identifier):
return identifier, r[identifier]
elif is_numlike(identifier):
name = r.dtype.names[int(identifier)]
return name, r[name]
else:
raise TypeError('identifier must be a string or integer')
xname, x = getname_val(cols[0])
if len(cols)==1:
ax1 = fig.add_subplot(1,1,1)
funcname = plotfuncs.get(cols[0], 'plot')
func = getattr(ax1, funcname)
func(x, **kwargs)
ax1.set_xlabel(xname)
else:
N = len(cols)
for i in range(1,N):
if i==1:
ax = ax1 = fig.add_subplot(N-1,1,i)
ax.grid(True)
else:
ax = fig.add_subplot(N-1,1,i, sharex=ax1)
ax.grid(True)
yname, y = getname_val(cols[i])
funcname = plotfuncs.get(cols[i], 'plot')
func = getattr(ax, funcname)
func(x, y, **kwargs)
ax.set_ylabel(yname)
if ax.is_last_row():
ax.set_xlabel(xname)
else:
ax.set_xlabel('')
if xname=='date':
fig.autofmt_xdate()
draw_if_interactive()
## Plotting part 2: autogenerated wrappers for axes methods ##
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def acorr(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().acorr(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.acorr.__doc__ is not None:
acorr.__doc__ = dedent(Axes.acorr.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def arrow(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().arrow(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.arrow.__doc__ is not None:
arrow.__doc__ = dedent(Axes.arrow.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axhline(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axhline(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axhline.__doc__ is not None:
axhline.__doc__ = dedent(Axes.axhline.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axhspan(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axhspan(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axhspan.__doc__ is not None:
axhspan.__doc__ = dedent(Axes.axhspan.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axvline(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axvline(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axvline.__doc__ is not None:
axvline.__doc__ = dedent(Axes.axvline.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axvspan(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axvspan(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axvspan.__doc__ is not None:
axvspan.__doc__ = dedent(Axes.axvspan.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bar(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().bar(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.bar.__doc__ is not None:
bar.__doc__ = dedent(Axes.bar.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def barh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().barh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.barh.__doc__ is not None:
barh.__doc__ = dedent(Axes.barh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def broken_barh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().broken_barh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.broken_barh.__doc__ is not None:
broken_barh.__doc__ = dedent(Axes.broken_barh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def boxplot(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().boxplot(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.boxplot.__doc__ is not None:
boxplot.__doc__ = dedent(Axes.boxplot.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cohere(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().cohere(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.cohere.__doc__ is not None:
cohere.__doc__ = dedent(Axes.cohere.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def clabel(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().clabel(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.clabel.__doc__ is not None:
clabel.__doc__ = dedent(Axes.clabel.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def contour(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().contour(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
if ret._A is not None: gci._current = ret
hold(b)
return ret
if Axes.contour.__doc__ is not None:
contour.__doc__ = dedent(Axes.contour.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def contourf(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().contourf(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
if ret._A is not None: gci._current = ret
hold(b)
return ret
if Axes.contourf.__doc__ is not None:
contourf.__doc__ = dedent(Axes.contourf.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def csd(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().csd(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.csd.__doc__ is not None:
csd.__doc__ = dedent(Axes.csd.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def errorbar(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().errorbar(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.errorbar.__doc__ is not None:
errorbar.__doc__ = dedent(Axes.errorbar.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def fill(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().fill(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.fill.__doc__ is not None:
fill.__doc__ = dedent(Axes.fill.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def fill_between(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().fill_between(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.fill_between.__doc__ is not None:
fill_between.__doc__ = dedent(Axes.fill_between.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hexbin(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hexbin(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.hexbin.__doc__ is not None:
hexbin.__doc__ = dedent(Axes.hexbin.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hist(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hist(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.hist.__doc__ is not None:
hist.__doc__ = dedent(Axes.hist.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hlines(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hlines(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.hlines.__doc__ is not None:
hlines.__doc__ = dedent(Axes.hlines.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def imshow(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().imshow(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.imshow.__doc__ is not None:
imshow.__doc__ = dedent(Axes.imshow.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def loglog(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().loglog(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.loglog.__doc__ is not None:
loglog.__doc__ = dedent(Axes.loglog.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pcolor(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pcolor(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.pcolor.__doc__ is not None:
pcolor.__doc__ = dedent(Axes.pcolor.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pcolormesh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pcolormesh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.pcolormesh.__doc__ is not None:
pcolormesh.__doc__ = dedent(Axes.pcolormesh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pie(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pie(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.pie.__doc__ is not None:
pie.__doc__ = dedent(Axes.pie.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def plot(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().plot(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.plot.__doc__ is not None:
plot.__doc__ = dedent(Axes.plot.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def plot_date(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().plot_date(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.plot_date.__doc__ is not None:
plot_date.__doc__ = dedent(Axes.plot_date.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def psd(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().psd(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.psd.__doc__ is not None:
psd.__doc__ = dedent(Axes.psd.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def quiver(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().quiver(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.quiver.__doc__ is not None:
quiver.__doc__ = dedent(Axes.quiver.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def quiverkey(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().quiverkey(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.quiverkey.__doc__ is not None:
quiverkey.__doc__ = dedent(Axes.quiverkey.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def scatter(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().scatter(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.scatter.__doc__ is not None:
scatter.__doc__ = dedent(Axes.scatter.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def semilogx(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().semilogx(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.semilogx.__doc__ is not None:
semilogx.__doc__ = dedent(Axes.semilogx.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def semilogy(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().semilogy(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.semilogy.__doc__ is not None:
semilogy.__doc__ = dedent(Axes.semilogy.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def specgram(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().specgram(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret[-1]
hold(b)
return ret
if Axes.specgram.__doc__ is not None:
specgram.__doc__ = dedent(Axes.specgram.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spy(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().spy(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.spy.__doc__ is not None:
spy.__doc__ = dedent(Axes.spy.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def stem(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().stem(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.stem.__doc__ is not None:
stem.__doc__ = dedent(Axes.stem.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def step(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().step(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.step.__doc__ is not None:
step.__doc__ = dedent(Axes.step.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def vlines(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().vlines(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.vlines.__doc__ is not None:
vlines.__doc__ = dedent(Axes.vlines.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def xcorr(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().xcorr(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.xcorr.__doc__ is not None:
xcorr.__doc__ = dedent(Axes.xcorr.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def barbs(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().barbs(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.barbs.__doc__ is not None:
barbs.__doc__ = dedent(Axes.barbs.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cla(*args, **kwargs):
ret = gca().cla(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.cla.__doc__ is not None:
cla.__doc__ = dedent(Axes.cla.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def grid(*args, **kwargs):
ret = gca().grid(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.grid.__doc__ is not None:
grid.__doc__ = dedent(Axes.grid.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def legend(*args, **kwargs):
ret = gca().legend(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.legend.__doc__ is not None:
legend.__doc__ = dedent(Axes.legend.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def table(*args, **kwargs):
ret = gca().table(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.table.__doc__ is not None:
table.__doc__ = dedent(Axes.table.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def text(*args, **kwargs):
ret = gca().text(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.text.__doc__ is not None:
text.__doc__ = dedent(Axes.text.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def annotate(*args, **kwargs):
ret = gca().annotate(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.annotate.__doc__ is not None:
annotate.__doc__ = dedent(Axes.annotate.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def autumn():
'''
set the default colormap to autumn and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='autumn')
im = gci()
if im is not None:
im.set_cmap(cm.autumn)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bone():
'''
set the default colormap to bone and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='bone')
im = gci()
if im is not None:
im.set_cmap(cm.bone)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cool():
'''
set the default colormap to cool and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='cool')
im = gci()
if im is not None:
im.set_cmap(cm.cool)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def copper():
'''
set the default colormap to copper and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='copper')
im = gci()
if im is not None:
im.set_cmap(cm.copper)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def flag():
'''
set the default colormap to flag and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='flag')
im = gci()
if im is not None:
im.set_cmap(cm.flag)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def gray():
'''
set the default colormap to gray and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='gray')
im = gci()
if im is not None:
im.set_cmap(cm.gray)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hot():
'''
set the default colormap to hot and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hot')
im = gci()
if im is not None:
im.set_cmap(cm.hot)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hsv():
'''
set the default colormap to hsv and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hsv')
im = gci()
if im is not None:
im.set_cmap(cm.hsv)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def jet():
'''
set the default colormap to jet and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='jet')
im = gci()
if im is not None:
im.set_cmap(cm.jet)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pink():
'''
set the default colormap to pink and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='pink')
im = gci()
if im is not None:
im.set_cmap(cm.pink)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def prism():
'''
set the default colormap to prism and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='prism')
im = gci()
if im is not None:
im.set_cmap(cm.prism)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spring():
'''
set the default colormap to spring and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spring')
im = gci()
if im is not None:
im.set_cmap(cm.spring)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def summer():
'''
set the default colormap to summer and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='summer')
im = gci()
if im is not None:
im.set_cmap(cm.summer)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def winter():
'''
set the default colormap to winter and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='winter')
im = gci()
if im is not None:
im.set_cmap(cm.winter)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spectral():
'''
set the default colormap to spectral and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spectral')
im = gci()
if im is not None:
im.set_cmap(cm.spectral)
draw_if_interactive()
| agpl-3.0 |
Radymus/QMetric | setup.py | 1 | 1166 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# Author: "Radim Spigel" <[email protected]>
"""
Created on Tue Apr 1 20:30:40 2014
@author: Radim Spigel
"""
from setuptools import setup
version_string = '0.1.0'
description = (
"This is project is for evaluate the hypothetical "
"quality of the projects written in python.")
setup_kwargs = {
'name': 'QMetric',
'description': 'Evaluation the hypothetical quality for python projects. ',
'keywords': ['git', 'quality', 'python', 'gittle'],
'version': version_string,
'url': 'https://github.com/Radymus/QMetric',
'license': 'GPLv3',
'author': "Radim Spigel",
'author_email': '[email protected]',
'long_description': description,
'packages': [],
'scripts': ['QMetric.py'],
'install_requires': [
'dulwich==0.9.4',
'gittle==0.3.0',
'pylint==1.1.0',
'pandas==0.12.0',
'jinja2==2.7.1',
'funky==0.0.2',
'lxml==3.1.2',
'scipy==0.12.1',
'matplotlib==1.3.1',
'radon==0.5.1',
'mpld3==0.2'
],
}
setup(**setup_kwargs)
| gpl-3.0 |
probcomp/bdbcontrib | tests/test_population.py | 1 | 4635 | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
# matplotlib needs to set the backend before anything else gets to.
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from contextlib import contextmanager
from io import BytesIO
from string import ascii_lowercase # pylint: disable=deprecated-module
from textwrap import dedent
import bayeslite
import os
import pandas
import pytest
import random
import re
import sys
import tempfile
import test_plot_utils
from bayeslite.loggers import CaptureLogger
from bdbcontrib import Population, population
testvars = {'dataset': None, 'input_df': None}
import multiprocessing
import time
@contextmanager
def ensure_timeout(delay, target):
proc = multiprocessing.Process(target=target)
proc.start()
proc.join(delay)
assert not proc.is_alive()
# proc.terminate()
# proc.join()
@contextmanager
def prepare():
if testvars['dataset'] is None:
(df, csv_data) = test_plot_utils.dataset(40)
tempd = tempfile.mkdtemp(prefix="bdbcontrib-test-population")
csv_path = os.path.join(tempd, "data.csv")
with open(csv_path, "w") as csv_f:
csv_f.write(csv_data.getvalue())
bdb_path = os.path.join(tempd, "data.bdb")
name = ''.join(random.choice(ascii_lowercase) for _ in range(32))
dts = Population(name=name, csv_path=csv_path, bdb_path=bdb_path,
logger=CaptureLogger(
verbose=pytest.config.option.verbose),
session_capture_name="test_population.py")
ensure_timeout(10, lambda: dts.analyze(models=10, iterations=20))
testvars['dataset'] = dts
testvars['input_df'] = df
yield testvars['dataset'], testvars['input_df']
def test_analyze_and_analysis_status_and_reset():
with prepare() as (dts, _df):
resultdf = dts.analysis_status()
assert 'iterations' == resultdf.index.name, repr(resultdf)
assert 'count of model instances' == resultdf.columns[0], repr(resultdf)
assert 1 == len(resultdf), repr(resultdf)
assert 10 == resultdf.ix[20, 0], repr(resultdf)
resultdf = dts.analyze(models=11, iterations=1)
assert 'iterations' == resultdf.index.name, repr(resultdf)
assert 'count of model instances' == resultdf.columns[0], repr(resultdf)
dts.logger.result(str(resultdf))
assert 2 == len(resultdf), repr(resultdf)
assert 10 == resultdf.ix[21, 0], repr(resultdf)
assert 1 == resultdf.ix[1, 0], repr(resultdf)
dts.reset()
resultdf = dts.analysis_status()
assert 'iterations' == resultdf.index.name, repr(resultdf)
assert 'count of model instances' == resultdf.columns[0], repr(resultdf)
assert 0 == len(resultdf), repr(resultdf)
ensure_timeout(10, lambda: dts.analyze(models=10, iterations=20))
resultdf = dts.analysis_status()
assert 'iterations' == resultdf.index.name, repr(resultdf)
assert 'count of model instances' == resultdf.columns[0], repr(resultdf)
assert 1 == len(resultdf), repr(resultdf)
assert 10 == resultdf.ix[20, 0], repr(resultdf)
# This is the only test that needs the files anymore (for reset),
# so now that we're done, clean those up. The rest of the tests can
# happen in any order based on the in-memory bdb.
import shutil
shutil.rmtree(os.path.dirname(dts.csv_path))
def test_q():
with prepare() as (dts, df):
resultdf = dts.query('SELECT COUNT(*) FROM %t;')
#resultdf.to_csv(sys.stderr, header=True)
assert 1 == len(resultdf)
assert 1 == len(resultdf.columns)
assert '"COUNT"(*)' == resultdf.columns[0]
assert len(df) == resultdf.iloc[0, 0]
resultdf = dts.query(dedent('''\
ESTIMATE DEPENDENCE PROBABILITY OF
floats_1 WITH categorical_1 BY %g'''))
#resultdf.to_csv(sys.stderr, header=True)
| apache-2.0 |
anurag313/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
xuewei4d/scikit-learn | sklearn/experimental/tests/test_enable_hist_gradient_boosting.py | 16 | 1641 | """Tests for making sure experimental imports work as expected."""
import textwrap
from sklearn.utils._testing import assert_run_python_script
def test_imports_strategies():
# Make sure different import strategies work or fail as expected.
# Since Python caches the imported modules, we need to run a child process
# for every test case. Else, the tests would not be independent
# (manually removing the imports from the cache (sys.modules) is not
# recommended and can lead to many complications).
good_import = """
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
"""
assert_run_python_script(textwrap.dedent(good_import))
good_import_with_ensemble_first = """
import sklearn.ensemble
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
"""
assert_run_python_script(textwrap.dedent(good_import_with_ensemble_first))
bad_imports = """
import pytest
with pytest.raises(ImportError):
from sklearn.ensemble import HistGradientBoostingClassifier
with pytest.raises(ImportError):
from sklearn.ensemble._hist_gradient_boosting import (
HistGradientBoostingClassifier)
import sklearn.experimental
with pytest.raises(ImportError):
from sklearn.ensemble import HistGradientBoostingClassifier
"""
assert_run_python_script(textwrap.dedent(bad_imports))
| bsd-3-clause |
alonsopg/AuthorProfiling | src/develop_pca_baseline.py | 1 | 7883 | #!/usr/bin/env python
# -*- coding: utf-8
from __future__ import print_function
# Importar librerías requeridas
import cPickle as pickle
import scipy
import numpy as np
import argparse
import os
from sklearn.metrics.metrics import accuracy_score
from sklearn.cross_validation import KFold
# Variables de configuaración
NAME='develop'
if __name__ == "__main__":
# Las opciones de línea de comando
p = argparse.ArgumentParser(NAME)
p.add_argument("DIR",default=None,
action="store", help="Directory with corpus")
p.add_argument("-m", "--mode",type=str,
action="store", dest="mode",default="gender",
help="Mode (gender|age|extroverted|stable|agreeable|conscientious|open) [gender]")
p.add_argument("-f", "--folds",type=int,
action="store", dest="folds",default=20,
help="Folds during cross validation [20]")
p.add_argument("-d", "--dir",
action="store_true", dest="dir",default="feats",
help="Default directory for features [feats]")
p.add_argument("-v", "--verbose",
action="store_true", dest="verbose",
help="Verbose mode [Off]")
p.add_argument("--estimators",
action="store", dest="estimators",default=10000,type=int,
help="Define el valor para n_estimators")
opts = p.parse_args()
# prepara función de verbose
if opts.verbose:
def verbose(*args):
print(*args)
else:
verbose = lambda *a: None
feats=['1grams','tfidf','lb_reyes','lb_hu','lf_reyes','lf_hu','whissell_t']
if opts.mode=="gender":
index_y=0
elif opts.mode=="age":
index_y=1
elif opts.mode.startswith("ex"):
index_y=2
elif opts.mode.startswith("st"):
index_y=3
elif opts.mode.startswith("agre"):
index_y=4
elif opts.mode.startswith("co"):
index_y=5
elif opts.mode.startswith("op"):
index_y=6
# Carga etiquetas
truth={}
for line in open(os.path.join(opts.DIR,'truth.txt')):
bits=line.split(':::')
truth[bits[0]]=bits[1:]
# Carga las matrices
x=[]
feats_=[]
for feat in feats:
verbose('Loading:', feat)
# Lee los indices de los rengloes
try:
with open(os.path.join(opts.dir,feat+'.idx'),'rb') as idxf:
ids = pickle.load(idxf)
except IOError:
verbose('Warning, no features...')
continue
# Lee la matrix de features de disco
with open(os.path.join(opts.dir,feat+'.dat'), 'rb') as infile:
x_ = pickle.load(infile)
if type(x_) is scipy.sparse.csr.csr_matrix:
x_ = x_.toarray()
x.append(x_)
feats_.append(feat)
verbose("Loaded",len(x),"matrix features")
for feat,x_ in zip(feats_,x):
verbose('Sumary', feat)
verbose("Rows :", x_.shape[0] )
verbose("Features :", x_.shape[1] )
verbose('----------\n')
x=np.hstack(x)
# Checa que etiquetas e identificatores coincidan
if not x.shape[0]==len(ids):
print("Error con matrix de features {0} e identificadores {1}".
format(len(x.shape), x.shape[0]))
verbose("Truth :", len(truth) )
verbose("Ids :", len(ids) )
verbose("Rows :", x.shape[0] )
verbose("Features :", x.shape[1] )
verbose('----------\n')
# recuperando las etiquetas
try:
y_labels= [truth[id_usuario][index_y] for idd,id_usuario in ids]
except ValueError:
y_labels= [truth[id_usuario][index_y] for id_usuario in ids]
# Pasando etiquetas a números
if opts.mode in ['age','gender']:
labels={}
for label in y_labels:
try:
labels[label]+=1
except KeyError:
labels[label]=1
labels=labels.keys()
for label in labels:
verbose("Label",label,"-->",labels.index(label))
verbose('----------\n')
# Creando el vector de etiquetas
y=np.array([ labels.index(label) for label in y_labels])
else:
y=np.array([float(l) for l in y_labels])
print(y)
#reducir x_train
#aplicar la misma reduccion y_test
kf = KFold(len(y), n_folds=opts.folds)
y_=[]
prediction_=[]
verbose("Cross validation:")
for i,(train,test) in enumerate(kf):
# Cortando datos en training y test
from sklearn.decomposition import PCA
pca = PCA(n_components=='mle')
X_train, X_test, y_train, y_test = x[train],x[test],y[train],y[test]
X_train = pca.transform_fit(X_train)
X_test = pca.transform(X_test)
if opts.mode in ['age','gender']:
# Preparando la máquina de aprendizaje
verbose(" Training fold (%i)"%(i+1))
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
classifier=RandomForestClassifier(n_estimators=10000, criterion='entropy')
#classifier = SVC(C=10, kernel='linear',
#gamma=10, coef0=0.0, shrinking=True,
#probability=False, tol=0.001, cache_size=20000,
#class_weight='auto', verbose=False, max_iter=-1,
#random_state=None)
# Aprendiendo
classifier.fit(X_train, y_train)
# Prediciendo
verbose(" Predicting fold (%i)"%(i+1))
prediction = classifier.predict(X_test)
verbose(' Accuracy fold (%i):'%(i+1), accuracy_score(y_test, prediction))
y_.extend(y_test)
prediction_.extend(prediction)
else:
# Preparando la máquina de aprendizaje
verbose(" Regressing fold (%i)"%(i+1))
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
#regressor=RandomForestRegressor(n_estimators=opts.estimators)
regressor = SVR(kernel='linear', degree=3, gamma=1.0, coef0=1.0,
tol=0.001, C=10, epsilon=0.1, shrinking=True, probability=False
, cache_size=200, verbose=False, max_iter=-1,
random_state=None)
# Aprendiendo
regressor.fit(X_train, y_train)
# Prediciendo
verbose(" Predicting fold (%i)"%(i+1))
prediction = regressor.predict(X_test)
y_.extend(y_test)
prediction_.extend(prediction)
verbose('----------\n')
verbose("Evaluation")
if opts.mode in ['age','gender']:
from sklearn.metrics.metrics import precision_score, recall_score, confusion_matrix, classification_report, accuracy_score, f1_score
# Calculando desempeño
print( 'Accuracy :', accuracy_score(y_, prediction_))
print( 'Precision :', precision_score(y_, prediction_))
print( 'Recall :', recall_score(y_, prediction_))
print( 'F-score :', f1_score(y_, prediction_))
print( '\nClasification report:\n', classification_report(y_,
prediction_))
print( '\nConfussion matrix :\n',confusion_matrix(y_, prediction_))
else:
from sklearn.metrics.metrics import mean_absolute_error, mean_squared_error,r2_score
print( 'Mean Abs Error :', mean_absolute_error(y_, prediction_))
print( 'Mean Sqr Error :', mean_squared_error(y_, prediction_))
print( 'R2 Error :', r2_score(y_, prediction_))
#plots:
#import matplotlib.pyplot as plt
#confusion_matrix_plot = confusion_matrix(y_test, prediction)
#plt.title('matriz de confusion')
#plt.colorbar()
#plt.xlabel()
#plt.xlabel('categoria de verdad')
#plt.ylabel('categoria predecida')
#plt.show()
| gpl-2.0 |
garciparedes/python-examples | numerical/utils/csv_to_parquet.py | 1 | 1282 | """ From: 'https://stackoverflow.com/a/45618618/3921457' """
import logging
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from numerical.utils.constants import (
CSV_FILE_PATH,
PARQUET_FILE_PATH,
CHUNK_SIZE,
)
logger = logging.getLogger(__name__)
def csv_to_parquet():
logger.info(f'Starting...')
stream = pd.read_csv(
CSV_FILE_PATH,
chunksize=CHUNK_SIZE,
low_memory=False,
sep=',',
encoding='latin-1',
)
logger.info(f'CSV Stored Size: {CSV_FILE_PATH.stat().st_size / 1024 ** 3:.3f} GB')
chunk = next(stream)
logger.debug(f'Processing 1-th chunk...')
parquet_schema = pa.Table.from_pandas(chunk).schema
parquet_writer = pq.ParquetWriter(PARQUET_FILE_PATH, parquet_schema, compression='snappy')
for i, chunk in enumerate(stream, 2):
logger.debug(f'Processing {i}-th chunk...')
table = pa.Table.from_pandas(chunk, parquet_schema)
parquet_writer.write_table(table)
parquet_writer.close()
logger.info(f'Parquet Stored Size: {PARQUET_FILE_PATH.stat().st_size / 1024 ** 3:.3f} GB')
logger.info(f'Finished!')
def main():
logging.basicConfig(level=logging.INFO)
csv_to_parquet()
if __name__ == '__main__':
main()
| mpl-2.0 |
brchiu/tensorflow | tensorflow/python/client/notebook.py | 61 | 4779 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import socket
import sys
from tensorflow.python.platform import app
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
FLAGS = None
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--password",
type=str,
default=None,
help="""\
Password to require. If set, the server will allow public access. Only
used if notebook config file does not exist.\
""")
parser.add_argument(
"--notebook_dir",
type=str,
default="experimental/brain/notebooks",
help="root location where to store notebooks")
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = (
[sys.argv[0]] + [x for x in sys.argv[1:] if x.startswith("--flagfile")])
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
dibondar/PyPhotonicReagents | libs/gui/iterate_file.py | 1 | 6770 | ########################################################################
#
# The abstract base class for iterating over a data set in the HDF5 file.
# This class can be used, .e.g., to view scans
#
########################################################################
import wx
import h5py
import ntpath
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.pyplot import cm
from matplotlib.figure import Figure
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
########################################################################
class IterateFile (wx.Frame) :
"""
The abstract base class for iterating over a data set in the HDF5 file.
This class can be used, .e.g., to view scans.
To use this abstract class the method <self.UpdateFrame(event=None)> must be defined.
This method is called when the current frame is changed.
The optional method <self.IniLoad (hdf5_file)> will be called after the HDF5 file was
successfully opened to load desired data.
The first step in <self.UpdateFrame> should be a call to <self.GetCurrentFrame>
"""
def __init__ (self, groupename, parent=None, filename = None, title=None) :
"""
<filename> the HDF5 file name which will be iterated over
<groupename> the HDF5 group name where trainable data sets are stored.
The data set over which iterations is be performed must be labels by integers.
"""
# If file name is not specified, then ask user what file should be open
if filename == None :
openFileDialog = wx.FileDialog (parent, "Chose HDF5 file for viewing", "", "", \
"HDF5 files (*.hdf5)|*.hdf5", wx.FD_OPEN | wx.FD_FILE_MUST_EXIST | wx.FD_CHANGE_DIR)
# Check whether user cancelled
if openFileDialog.ShowModal() == wx.ID_CANCEL :
raise ValueError ("User did not chose the file to view")
else : filename = openFileDialog.GetPath()
# Open the HDF5 file
self.data_file = h5py.File (filename, 'r')
# Loading other data, if desired
try : self.IniLoad (self.data_file)
except AttributeError : pass
# Saving the group over which the iteration happens
self.iteration_group = self.data_file[groupename]
# Converting the data set names in the itterable group to a list of int
self.frames = sorted( self.iteration_group.keys(), key=int )
# Construct GUI
if title == None :
title = "Viewing file [%s]" % ntpath.basename(filename)
wx.Frame.__init__ (self, parent, title=title)
self.ConstructGUI ()
self.Show ()
self.Maximize (True)
self.UpdateFrame ()
def ConstructGUI (self) :
"""
Create GUI
"""
panel = wx.Panel(self)
sizer = wx.BoxSizer (wx.VERTICAL)
################################ Animation control button ###################################
boxsizer = wx.BoxSizer (wx.HORIZONTAL)
# Go to initial frame button
self.initial_frame_button = wx.Button (panel, label="<<")
def GoToInitialFrame (event) :
self.current_frame.SetValue (0); self.UpdateFrame ()
self.Bind (wx.EVT_BUTTON, GoToInitialFrame, self.initial_frame_button)
boxsizer.Add(self.initial_frame_button, flag=wx.LEFT, border=5)
# Go to the previous frame
self.previous_frame_button = wx.Button (panel, label="<")
def GoToPreviousFrame (event) :
current_value = self.current_frame.GetValue()
if current_value > 0 :
self.current_frame.SetValue(current_value-1); self.UpdateFrame ()
self.Bind (wx.EVT_BUTTON, GoToPreviousFrame, self.previous_frame_button)
boxsizer.Add(self.previous_frame_button, flag=wx.LEFT, border=5)
# Variable storing current frame number
self.current_frame = wx.SpinCtrl (panel, value="0", min=0, max=len(self.frames)-1)
self.current_frame.Bind (wx.EVT_SPINCTRL, self.UpdateFrame)
boxsizer.Add(self.current_frame, flag=wx.LEFT, border=5)
# Animation button
self.animation_button = wx.Button (panel)
self.animation_button.__start_label = "Start animation"
self.animation_button.__stop_label = "STOP animation"
self.animation_button.SetLabel (self.animation_button.__start_label)
self.Bind (wx.EVT_BUTTON, self.OnAnimation, self.animation_button)
boxsizer.Add(self.animation_button, flag=wx.LEFT, border=5)
# Go to the next frame button
self.next_frame_button = wx.Button (panel, label=">")
def GoToNextFrame (event) :
current_value = self.current_frame.GetValue()
if current_value < len(self.frames)-1 :
self.current_frame.SetValue(current_value+1); self.UpdateFrame ()
self.Bind (wx.EVT_BUTTON, GoToNextFrame, self.next_frame_button)
boxsizer.Add(self.next_frame_button, flag=wx.LEFT, border=5)
# Go to the last frame button
self.final_frame_button = wx.Button (panel, label=">>")
def GoToLastFrame (event) :
self.current_frame.SetValue (len(self.frames)-1); self.UpdateFrame ()
self.Bind (wx.EVT_BUTTON, GoToLastFrame, self.final_frame_button)
boxsizer.Add(self.final_frame_button, flag=wx.LEFT, border=5)
sizer.Add(boxsizer, flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT|wx.GROW, border=5)
################################ Canvas for plotting ##############################
# Matplotlib canvas
boxsizer = wx.BoxSizer (wx.VERTICAL)
self.dpi = 80
display_width, display_hight = wx.DisplaySize()
self.fig = Figure((0.49*display_width/self.dpi, 0.8*display_hight/self.dpi), dpi=self.dpi)
self.canvas = FigCanvas (panel, -1, self.fig)
boxsizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
boxsizer.Add(NavigationToolbar(self.canvas), 0, wx.EXPAND)
sizer.Add(boxsizer, flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT|wx.GROW, border=5)
#########################################################################################
panel.SetSizer (sizer)
def GetCurrentFrame (self) :
"""
Load the data set
"""
return self.iteration_group[ self.frames[self.current_frame.GetValue()] ]
def OnAnimation (self, event=None) :
"""
<self.animation_button> was clicked
"""
if self.animation_button.GetLabel() == self.animation_button.__start_label :
def DoAnimation () :
wx.Yield()
current_value = self.current_frame.GetValue()
if current_value < len(self.frames)-1 :
# Continue animation
self.current_frame.SetValue(current_value+1)
self.UpdateFrame()
# Decide whether to continue animation
if self.animation_button.GetLabel() == self.animation_button.__stop_label :
wx.CallAfter(DoAnimation)
else :
# Stop animation
self.OnAnimation()
# Initiate animation
wx.CallAfter(DoAnimation)
self.animation_button.SetLabel( self.animation_button.__stop_label )
else :
# Stop animation
self.animation_button.SetLabel( self.animation_button.__start_label )
| bsd-3-clause |
bsipocz/statsmodels | statsmodels/datasets/tests/test_utils.py | 26 | 1697 | import os
import sys
from statsmodels.datasets import get_rdataset, webuse, check_internet
from numpy.testing import assert_, assert_array_equal, dec
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_get_rdataset():
# smoke test
if sys.version_info[0] >= 3:
#NOTE: there's no way to test both since the cached files were
#created with Python 2.x, they're strings, but Python 3 expects
#bytes and the index file path is hard-coded so both can't live
#side by side
pass
#duncan = get_rdataset("Duncan-py3", "car", cache=cur_dir)
else:
duncan = get_rdataset("Duncan", "car", cache=cur_dir)
assert_(duncan.from_cache)
#internet_available = check_internet()
#@dec.skipif(not internet_available)
def t_est_webuse():
# test copied and adjusted from iolib/tests/test_foreign
from statsmodels.iolib.tests.results.macrodata import macrodata_result as res2
#base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/"
base_gh = "http://statsmodels.sourceforge.net/devel/_static/"
res1 = webuse('macrodata', baseurl=base_gh, as_df=False)
assert_array_equal(res1 == res2, True)
#@dec.skipif(not internet_available)
def t_est_webuse_pandas():
# test copied and adjusted from iolib/tests/test_foreign
from pandas.util.testing import assert_frame_equal
from statsmodels.datasets import macrodata
dta = macrodata.load_pandas().data
base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/"
res1 = webuse('macrodata', baseurl=base_gh)
res1 = res1.astype(float)
assert_frame_equal(res1, dta)
| bsd-3-clause |
qifeigit/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 167 | 1659 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
lcharleux/argiope | argiope/tests/mesh/sets.py | 1 | 1535 | import argiope as ag
import numpy as np
import pandas as pd
ELEMENTS = ag.mesh.ELEMENTS
mesh = ag.mesh.read_msh("demo.msh")
tag = "SURFACE"
nodes, elements = mesh.nodes, mesh.elements
loc = elements.conn[elements.sets.SURFACE[mesh._null]].stack().stack().unique()
loc = loc[loc != 0]
nodes.loc[loc, ("sets", tag) ] = True
"""
loc = mesh.elements.loc[:, ("sets", tag, self._null)].as_matrix().flatten()
nlabels = np.unique(self.elements.conn.as_matrix()[loc].flatten())
self.nodes[("sets", tag)] = False
self.nodes.loc[nlabels, ("sets", tag)] = False
"""
"""
coords = mesh.nodes.coords.copy()
node_map = pd.Series(data = np.arange(len(coords)), index = coords.index)
conn = node_map.loc[conn.values.flatten()].values.reshape(*conn.shape)
"""
"""
nodes, elements = self.nodes, self.elements
#NODES
nodes_map = np.arange(nodes.index.max()+1)
nodes_map[nodes.index] = np.arange(len(nodes.index))
nodes_map[0] = -1
coords = nodes.coords.as_matrix()
#ELEMENTS
connectivities = elements.conn.as_matrix()
connectivities[np.isnan(connectivities)] = 0
connectivities = connectivities.astype(np.int32)
connectivities = nodes_map[connectivities]
labels = np.array(elements.index)
etype = np.array(elements.type.argiope.iloc[:,0])
#FACES
verts = []
for i in range(len(etype)):
face = connectivities[i][argiope.mesh.ELEMENTS[etype[i]]["faces"]]
vert = np.array([coords[n] for n in face])
verts.append(vert[:,:2])
verts = np.array(verts)
patches = collections.PolyCollection(verts, *args,**kwargs )
return patches
"""
| gpl-3.0 |
xwolf12/scikit-learn | sklearn/decomposition/nmf.py | 100 | 19059 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted, check_non_negative
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
random_state = check_random_state(random_state)
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
rng = check_random_state(self.random_state)
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_, random_state=rng)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a',
random_state=rng)
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar',
random_state=rng)
elif init == "random":
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
| bsd-3-clause |
DevinZ1993/Code-Cupboard | java/Sort/run.py | 4 | 1415 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib.pylab as pl
import os
import subprocess
def plot_txt(path,plot):
fin = open(path,'r')
pl.title(plot)
col_names = fin.readline().split()
pl.xlabel("Input Size")
pl.ylabel("Time Costs")
mat = [[] for name in col_names]
line = fin.readline()
while line:
row = line.split()
try:
for i,v in enumerate(row):
assert(v is not None)
mat[i].append(v)
except Exception,e:
print "Failed to plot line `"+line[:-1],
print "` due to "+str(e)
line = fin.readline()
fin.close()
curves = []
for idx,val in enumerate(mat[1:]):
curves.append(pl.plot(mat[0],val,label=col_names[idx]))
pl.legend()
pl.ylim(ymin=-500)
pl.show()
if __name__=='__main__':
root_path = os.path.dirname(os.path.realpath(__file__))
bin_path = os.path.join(root_path, "bin")
if not os.path.exists(os.path.join(root_path, "data/")):
os.mkdir(os.path.join(root_path, "data"))
txt_path = os.path.join(root_path, "data/time.txt")
print "This may take some time. Wait a minute ... "
# subprocess.call is synchronous, while os.system is asynchronous.
subprocess.call("java -classpath "+bin_path+" CountSort > "+txt_path, shell=True)
plot_txt(txt_path, "Time Performance of Integer Sorting")
| mpl-2.0 |
jllanfranchi/pygeneric | plotGoodies.py | 1 | 21446 | from __future__ import absolute_import, division, print_function
import colorsys
from fractions import Fraction
import numpy as np
import pylab
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.colors import LinearSegmentedColormap, ListedColormap
from smartFormat import smartFormat, TeX
__all__ = [
"MARKERS",
"DARK_BLUE",
"DARK_RED",
"LIGHT_BLUE",
"LIGHT_RED",
"colorCycleOrthog",
"invertColor",
"hsvaFact",
"colorCycleRainbow",
"human_safe",
"my_rainbow",
"grayify_cmap",
"show_colormap",
"plotDefaults",
"generateColorCycle",
"rugplot",
"onpick_peakfind",
"onpickLegend_toggle",
"complexPlot",
"plotMatrix",
"removeBorder",
"findRenderer",
"ScaledMaxNLocator",
"logticks_format",
"smartticks_format",
"fmtstr_format",
"fractticks_format",
"maskZeros",
]
MARKERS = [
".",
"v",
"o",
"*",
"+",
"D",
"^",
"s",
"p",
"x",
"<",
">",
"h",
"H",
"d",
"|",
"_",
]
DARK_BLUE = (0.0, 0.0, 0.7)
DARK_RED = (0.7, 0.0, 0.0)
LIGHT_BLUE = (0.4, 0.4, 0.8)
LIGHT_RED = (0.8, 0.4, 0.4)
"""
Use the following as:
#mpl.rc('axes', color_cycle=colorCycleOrthog)
source: http://stackoverflow.com/questions/470690/how-to-automatically-generate-n-distinct-colors
... but modified somewhat from that!
"""
colorCycleOrthog = (
"#000000", # 0 Black
"#803E75", # 2 Strong Purple
"#FF6800", # 3 Vivid Orange
"#8A9DD7", # 4 Very Light Blue
"#FFB300", # 1 Vivid Yellow
"#C10020", # 5 Vivid Red
"#CEA262", # 6 Grayish Yellow
"#817066", # 7 Medium Gray
# The following will not be good for people with defective color vision
"#007D34", # 8 Vivid Green
"#F6768E", # 9 Strong Purplish Pink
"#00538A", # 10 Strong Blue
"#93AA00", # 11 Vivid Yellowish Green
"#593315", # 12 Deep Yellowish Brown
"#F14AD3", # 13 PINK/Magenta! (used to be: #F13A13, Vivid Reddish Orange
"#53377A", # 14 Strong Violet
"#FF8E00", # 15 Vivid Orange Yellow
"#54BF00", # 16 Vivid Greenish Yellow
"#0000A5", # 17 BLUE!
"#7F180D", # 18 Strong Reddish Brown
#'#F13A13', # 13 Vivid Reddish Orange
#'#B32851', # 16 Strong Purplish Red
#'#FF7A5C', # 19 Strong Yellowish Pink
)
def invertColor(c):
r, g, b, a = mpl.colors.colorConverter.to_rgba(c)
if len(c) == 3:
return (1 - r, 1 - g, 1 - b)
return (1 - r, 1 - g, 1 - b, a)
# if isinstance(c, basestring):
# c = c.replace('#', '')
# r, g, b = (int(c[2*i:2*i+2], 16) for i in range(3))
# ri = 255-r
# gi = 255-g
# bi = 255-b
# return '#%02x%02x%02x'%(ri, gi, bi)
def hsvaFact(c, hf=1.0, sf=1.0, vf=1.0, af=1.0, clip=True):
r, g, b, a = mpl.colors.colorConverter.to_rgba(c)
h, s, v = colorsys.rgb_to_hsv(r, g, b)
ri, gi, bi = colorsys.hsv_to_rgb(h * hf, s * sf, v * vf)
if clip:
# Clip all values to range [0, 1]
result = (
np.clip(ri, 0, 1),
np.clip(gi, 0, 1),
np.clip(bi, 0, 1),
np.clip(a * af, 0, 1),
)
else:
# Rescale to fit largest within [0, 1]; if all of r, g, b fit in this
# range, do nothing
maxval = max(ri, gi, bi)
# Scale colors if one exceeds range
if maxval > 1:
ri /= maxval
gi /= maxval
bi /= maxval
# Clip alpha to range [0, 1]
alpha = np.clip(a * af, a_min=0, a_max=1)
result = (ri, gi, bi, alpha)
return result
colorCycleRainbow = (
"#FF1008",
"#FF5C2F",
"#FFA055",
"#DED579",
"#ACF59A",
"#7AFFB7",
"#48F1D0",
"#17CBE4",
"#1C93F3",
"#4E4DFC",
"#8000FF",
)
human_safe = ListedColormap(colorCycleOrthog, name="human_safe")
my_rainbow = ListedColormap(colorCycleRainbow, name="my_rainbow")
def grayify_cmap(cmap):
"""Return a grayscale version of the colormap
From: https://jakevdp.github.io/blog/2014/10/16/how-bad-is-your-colormap/"""
cmap = plt.cm.get_cmap(cmap)
colors = cmap(np.arange(cmap.N))
# convert RGBA to perceived greyscale luminance
# cf. http://alienryderflex.com/hsp.html
RGB_weight = [0.299, 0.587, 0.114]
luminance = np.sqrt(np.dot(colors[:, :3] ** 2, RGB_weight))
colors[:, :3] = luminance[:, np.newaxis]
if isinstance(cmap, LinearSegmentedColormap):
return cmap.from_list(cmap.name + "_grayscale", colors, cmap.N)
elif isinstance(cmap, ListedColormap):
return ListedColormap(colors=colors, name=cmap.name + "_grayscale")
def show_colormap(cmap):
"""From: https://jakevdp.github.io/blog/2014/10/16/how-bad-is-your-colormap/"""
im = np.outer(np.ones(100), np.arange(1000))
fig, ax = plt.subplots(2, figsize=(6, 1.5), subplot_kw=dict(xticks=[], yticks=[]))
fig.subplots_adjust(hspace=0.1)
ax[0].imshow(im, cmap=cmap)
ax[1].imshow(im, cmap=grayify_cmap(cmap))
def plotColorCycle(color_cycle=colorCycleOrthog):
N = len(color_cycle)
x = np.linspace(0, 2 * np.pi, 100)
f = plt.figure(333)
plt.clf()
ax = f.add_subplot(111)
for n in range(N):
ax.plot(
x,
np.cos(x - 2 * np.pi / N * n),
lw=3,
label=format(n, "2d") + ": " + color_cycle[n][1:],
color=color_cycle[n],
)
plt.legend(loc="center right")
ax.set_xlim([0, 8.2])
ax.set_ylim([-1.1, 1.1])
plt.tight_layout()
def plotDefaults():
plt.ion()
mpl.rc("font", **{"family": "serif", "weight": "normal", "size": 16})
mpl.rc("axes", color_cycle=human_safe.colors)
# generateColorCycle(n_colors=6)
def generateColorCycle(cmap=mpl.cm.brg, n_colors=8, set_it=True):
cmap_indices = np.array(
np.round(np.arange(0, n_colors) * (cmap.N - 1) / (n_colors - 1)), dtype=int
)
color_cycle = [
"#%0.2X%0.2X%0.2X" % tuple(np.round(c[0:3] * 255)) for c in cmap(cmap_indices)
]
if set_it:
mpl.rc("axes", color_cycle=color_cycle)
return color_cycle
def rugplot(a, y0, dy, ax, **kwargs):
return ax.plot([a, a], [y0, y0 + dy], **kwargs)
def onpick_peakfind(event):
"""Use this by:
>> fig = figure(1)
>> ax = axis(111)
>> line, = ax.plot(x, y, picker=5)
>> fig.canvas.mpl_connect('pick_event', onpick_peakfind)
"""
print(event, event.canvas)
thisline = event.artist
vis = thisline.get_visible()
# -- This function doesn't handle the lines in the legend
# fig = event.canvas.figure
# leg = fig.
# print leg.__dict__
# for child in leg.get_children():
# print "child:", child
# if thisline in leg.get_lines():
# return
# -- If the line has been made invisible, ignore it (return from function)
if not vis:
return
c = thisline.get_color()
ls = thisline.get_linestyle()
lw = thisline.get_linewidth()
mk = thisline.get_marker()
mkec = thisline.get_markeredgecolor()
mkew = thisline.get_markeredgewidth()
mkfc = thisline.get_markerfacecolor()
mksz = thisline.get_markersize()
xdata = thisline.get_xdata()
ydata = thisline.get_ydata()
label = thisline.get_label()
freqrangeind = event.ind
# print 'onpick points:', zip(xdata[ind], ydata[ind])
# print freqrangeind
# print ""
# print ydata[freqrangeind]
minvalind = np.argmin(ydata[freqrangeind])
maxvalind = np.argmax(ydata[freqrangeind])
minval = ydata[freqrangeind][minvalind]
minx = xdata[freqrangeind][minvalind]
maxval = ydata[freqrangeind][maxvalind]
maxx = xdata[freqrangeind][maxvalind]
print("")
print(label)
print("min:", minval, "at", minx, "max:", maxval, "at", maxx)
halfInd = -1
maxInd = -1
try:
maxInd = pylab.find(ydata[freqrangeind] == maxval)
maxInd = maxInd[0]
# print maxInd
maxInd = freqrangeind[0] + maxInd
halfPower = maxval - 10 * np.log10(2)
# quarterind = find(ydata[freqrangeind] < maxval-10*np.log10(4))
halfInd = pylab.find(ydata < halfPower)
inddiff = halfInd - maxInd
upperInd = min(halfInd[pylab.find(inddiff > 0)])
lowerInd = max(halfInd[pylab.find(inddiff < 0)])
# print lowerInd, maxInd, upperInd
yLower = ydata[lowerInd : maxInd + 1]
xLower = xdata[lowerInd : maxInd + 1]
dyLower = max(yLower) - min(yLower)
yUpper = ydata[maxInd : upperInd + 1]
xUpper = xdata[maxInd : upperInd + 1]
dyUpper = max(yUpper) - min(yUpper)
plt.figure(999)
plt.clf()
# print ls, lw, mk, mkfc, mksz
# print l
# print l.get_markerfacecolor()
# print l.get_color()
# l.set_color(c)
# l.set_linestyle(ls)
# l.set_linewidth(lw)
# l.set_marker(mk)
# l.set_markeredgecolor(mkec)
# l.set_markeredgewidth(mkew)
# l.set_markerfacecolor(mkfc)
# l.set_markersize(mksz)
peakPlotTitle = plt.title(label, fontsize=14)
interpKind = "linear"
interpLower = interp1d(yLower, xLower, kind=interpKind)
interpUpper = interp1d(np.flipud(yUpper), np.flipud(xUpper), kind=interpKind)
lowerHalfPowerFreq = interpLower(halfPower)
upperHalfPowerFreq = interpUpper(halfPower)
iyLower = np.arange(min(yLower), max(yLower), dyLower / 40)
ixLower = interpLower(iyLower)
iyUpper = np.arange(max(yUpper), min(yUpper), -dyUpper / 40)
ixUpper = interpUpper(iyUpper)
delta_f = upperHalfPowerFreq - lowerHalfPowerFreq
f0 = xdata[maxInd]
Q = f0 / delta_f
print(
"f0:",
f0,
"delta_f:",
delta_f,
"pkval:",
ydata[maxInd],
"Q:",
Q,
"eta:",
1 / Q,
)
plt.plot(
np.concatenate((ixLower, ixUpper)),
np.concatenate((iyLower, iyUpper)),
"b.-",
alpha=0.2,
linewidth=8,
)
plt.plot(
[lowerHalfPowerFreq, upperHalfPowerFreq],
[halfPower] * 2,
"c-",
linewidth=15,
alpha=0.25,
)
l, = plt.plot(
np.concatenate((xLower, xUpper)),
np.concatenate((yLower, yUpper)),
color=c,
linestyle=ls,
linewidth=3,
marker=mk,
markerfacecolor=mkfc,
markersize=mksz,
markeredgewidth=mkew,
markeredgecolor=mkec,
)
pylab.text(
(lowerHalfPowerFreq + upperHalfPowerFreq) / 2,
halfPower,
"FWHM = "
+ lowPrec(delta_f)
+ ", Q = "
+ lowPrec(Q)
+ r", $\eta$ = "
+ lowPrec(1 / Q),
horizontalalignment="center",
verticalalignment="center",
fontsize=12,
)
plt.draw()
except:
pass
# raise()
# print "failed to find/fit peak", halfInd, maxInd
def onpickLegend_toggle(event):
try:
# on the pick event, find the orig line corresponding to the
# legend proxy line, and toggle the visibility
legline = event.artist
origline = lined[legline]
vis = not origline.get_visible()
origline.set_visible(vis)
# Change the alpha on the line in the legend so we can see what lines
# have been toggled
if vis:
legline.set_alpha(1.0)
else:
legline.set_alpha(0.2)
f4.canvas.draw()
except:
pass
def complexPlot(
f,
data,
plot_kwargs=None,
fig_kwargs=None,
label=None,
title=None,
xlabel=None,
magPlot=True,
phasePlot=True,
realPlot=True,
imagPlot=True,
squareMag=True,
magScale="log",
phaseScale="deg",
realScale="linear",
imagScale="linear",
freqScale="log",
unwrapPhase=False,
fignum=301,
):
nPlots = magPlot + phasePlot + realPlot + imagPlot
# plt.close(fignum)
if fig_kwargs is None:
fig = plt.figure(fignum, figsize=(7, 2.00 * nPlots))
else:
fig = plt.figure(fignum, **fig_kwargs)
if plot_kwargs is None:
plot_kwargs = [{}] * nPlots
elif isinstance(plot_kwargs, dict):
plot_kwargs = [plot_kwargs] * nPlots
# -- Stack plots directly on top of one another
# plt.subplots_adjust(hspace=0.001)
# fig.clf()
plotN = 0
axesList = []
xticklabels = []
magSq = (np.abs(data)) ** 2
if magPlot:
if squareMag:
M = magSq
ylab = r"Mag$^2$"
else:
M = np.sqrt(magSq)
ylab = r"Mag"
plotN += 1
kwargs = plot_kwargs.pop(0)
ax = fig.add_subplot(nPlots, 1, plotN)
ax.plot(f, M, label=label, **kwargs)
ax.set_ylabel(ylab)
ax.grid(b=True)
ax.set_yscale(magScale)
axesList.append(ax)
if plotN < nPlots:
xticklabels += ax.get_xticklabels()
ax.set_xscale(freqScale)
ax.set_xlim(min(f), max(f))
if plotN == 1 and title is not None:
ax.set_title(title)
if label is not None:
ax.legend(loc="best")
if phasePlot:
plotN += 1
if plotN == 1:
sharex = None
else:
sharex = axesList[0]
kwargs = plot_kwargs.pop(0)
phi = np.arctan2(np.imag(data), np.real(data))
if unwrapPhase:
phi = np.unwrap(phi) # , np.pi*(1-1/10))
if phaseScale == "deg":
phaseUnits = r"deg"
phi = phi * 180 / np.pi
else:
phaseUnits = r"rad"
ax = fig.add_subplot(nPlots, 1, plotN, sharex=sharex)
ax.plot(f, phi, label=label, **kwargs)
ax.set_ylabel(r"Phase (" + phaseUnits + r")")
ax.grid(b=True)
axesList.append(ax)
if plotN < nPlots:
xticklabels += ax.get_xticklabels()
ax.set_xscale(freqScale)
ax.set_xlim(min(f), max(f))
if plotN == 1 and title is not None:
ax.set_title(title)
if label is not None:
ax.legend(loc="best")
if realPlot:
plotN += 1
if plotN == 1:
sharex = None
else:
sharex = axesList[0]
kwargs = plot_kwargs.pop(0)
ax = fig.add_subplot(nPlots, 1, plotN, sharex=sharex)
ax.plot(f, np.real(data), label=label, **kwargs)
ax.set_ylabel("Real")
ax.grid(b=True)
axesList.append(ax)
if plotN < nPlots:
xticklabels += ax.get_xticklabels()
ax.set_xscale(freqScale)
ax.set_yscale(realScale)
ax.set_xlim(min(f), max(f))
if plotN == 1 and title is not None:
ax.set_title(title)
if label is not None:
ax.legend(loc="best")
if imagPlot:
plotN += 1
if plotN == 1:
sharex = None
else:
sharex = axesList[0]
kwargs = plot_kwargs.pop(0)
ax = fig.add_subplot(nPlots, 1, plotN, sharex=sharex)
ax.plot(f, np.imag(data), label=label, **kwargs)
ax.set_ylabel("Imaginary")
ax.grid(b=True)
axesList.append(ax)
if plotN < nPlots:
xticklabels += ax.get_xticklabels()
ax.set_xscale(freqScale)
ax.set_yscale(imagScale)
ax.set_xlim(min(f), max(f))
if plotN == 1 and title is not None:
ax.set_title(title)
if label is not None:
ax.legend(loc="best")
ax.set_xscale(freqScale)
if xlabel is not None:
ax.set_xlabel(xlabel)
# plt.setp(xticklabels, visible=False)
# fig.tight_layout()
return fig, axesList
def plotMatrix(tuplesDict, labelsList):
"""From:
http://fromthepantothefire.com/matplotlib/rock_paper_scissors.py"""
# list of string labels for rows/columns and
# data in dictionary of tuples of these labels (row_label, col_label)
# Map text labels to index used on plot
# this is convenient if you want to reorganize the display order
# just update the labelsList order.
labelNameToIndex = {}
for i, lab in enumerate(labelsList):
labelNameToIndex[lab] = i
# number of rows and columns
numLabels = len(labelsList)
# create a list of data points
xyz = []
for t in tuplesDict:
x = labelNameToIndex[t[1]]
# y values are reversed so output oriented the way I
# think about matrices (0, 0) in upper left.
y = numLabels - 1 - labelNameToIndex[t[0]]
# extract value and color
(z, c) = tuplesDict[t]
xyz.append((x, y, z, c))
for x, y, z, c in xyz:
plt.scatter([x], [y], s=[z], color=c, alpha=0.8)
tickLocations = list(range(numLabels))
plt.xticks(tickLocations, labelsList, rotation=90)
# reverse the labels for y axis to match the data
plt.yticks(tickLocations, labelsList[::-1])
# set the axis 1 beyond the data so it looks good.
plt.axis([-1, numLabels, -1, numLabels])
def removeBorder(axes=None, top=False, right=False, left=True, bottom=True):
"""
Minimize chartjunk by stripping out unnecessary plot borders and axis ticks
The top/right/left/bottom keywords toggle whether the corresponding plot
border is drawn
from ChrisBeaumont,
https://github.com/cs109/content/blob/master/README.md
"""
ax = axes or plt.gca()
ax.spines["top"].set_visible(top)
ax.spines["right"].set_visible(right)
ax.spines["left"].set_visible(left)
ax.spines["bottom"].set_visible(bottom)
# turn off all ticks
ax.yaxis.set_ticks_position("none")
ax.xaxis.set_ticks_position("none")
# now re-enable visibles
if top:
ax.xaxis.tick_top()
if bottom:
ax.xaxis.tick_bottom()
if left:
ax.yaxis.tick_left()
if right:
ax.yaxis.tick_right()
def findRenderer(fig):
"""From http://stackoverflow.com/questions/22667224/matplotlib-get-text-bounding-box-independent-of-backend"""
if hasattr(fig.canvas, "get_renderer"):
# Some backends, such as TkAgg, have the get_renderer method, which
# makes this easy.
renderer = fig.canvas.get_renderer()
else:
# Other backends do not have the get_renderer method, so we have a work
# around to find the renderer. Print the figure to a temporary file
# object, and then grab the renderer that was used.
# (I stole this trick from the matplotlib backend_bases.py
# print_figure() method.)
import io
fig.canvas.print_pdf(io.BytesIO())
renderer = fig._cachedRenderer
return renderer
class ScaledMaxNLocator(mpl.ticker.MaxNLocator):
def __init__(self, scale, *args, **kwargs):
super(ScaledMaxNLocator, self).__init__(**kwargs)
self.scale = scale
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
# print self.scale, vmin, vmax, [float(tl)/float(self.scale) for tl in self.tick_values(vmin*self.scale, vmax*self.scale)]
return [
float(tl) / float(self.scale)
for tl in self.tick_values(vmin * self.scale, vmax * self.scale)
]
def logticks_format(value, index):
"""
By Francesco Montesano
http://stackoverflow.com/questions/19239297/matplotlib-bad-ticks-labels-for-loglog-twin-axis
This function decompose value in base*10^{exp} and return a latex string.
If 0<=value<99: return the value as it is.
if 0.1<value<0: returns as it is rounded to the first decimal
otherwise returns $base*10^{exp}$
I've designed the function to be use with values for which the decomposition
returns integers
Use as:
import matplotlib.ticker as ticker
subs = [1., 3., 6.]
ax.xaxis.set_minor_locator(ticker.LogLocator(subs=subs))
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.FuncFormatter(ticks_format))
"""
exp = np.floor(np.log10(value))
base = value / 10 ** exp
if exp == 0 or exp == 1:
return "${0:d}$".format(int(value))
if exp == -1:
return "${0:.1f}$".format(value)
else:
if base == 1:
return "$10^{{{0:d}}}$".format(int(exp))
return "${0:d}\\times10^{{{1:d}}}$".format(int(base), int(exp))
def smartticks_format(**kwargs):
sfmtargs = dict(sciThresh=[4, 4], sigFigs=3, keepAllSigFigs=False)
if not kwargs is None:
sfmtargs.update(kwargs)
def smart_ticks_formatter(value, index):
return smartFormat(value, **sfmtargs)
return smart_ticks_formatter
def fmtstr_format(fmt):
def fixed_ticks_formatter(value, index):
return TeX(format(value, fmt))
return fixed_ticks_formatter
def fractticks_format(DENOM_LIMIT):
def fract_ticks_formatter(value, index):
f = Fraction(value).limit_denominator(DENOM_LIMIT)
if f.denominator == 1:
return r"$" + format(f.numerator, "d") + r"$"
return (
r"$" + format(f.numerator, "d") + r"/" + format(f.denominator, "d") + r"$"
)
return fract_ticks_formatter
def maskZeros(H):
return H == 0
| mit |
ashhher3/ibis | ibis/util.py | 6 | 3963 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import ibis.compat as compat
def guid():
try:
from ibis.comms import uuid4_hex
return uuid4_hex()
except ImportError:
from uuid import uuid4
guid = uuid4()
return guid.hex if compat.PY3 else guid.get_hex()
def bytes_to_uint8_array(val, width=70):
"""
Formats a byte string for use as a uint8_t* literal in C/C++
"""
if len(val) == 0:
return '{}'
lines = []
line = '{' + str(ord(val[0]))
for x in val[1:]:
token = str(ord(x))
if len(line) + len(token) > width:
lines.append(line + ',')
line = token
else:
line += ',%s' % token
lines.append(line)
return '\n'.join(lines) + '}'
def unique_by_key(values, key):
id_to_table = {}
for x in values:
id_to_table[key(x)] = x
return compat.dict_values(id_to_table)
def indent(text, spaces):
block = ' ' * spaces
return '\n'.join(block + x for x in text.split('\n'))
def any_of(values, t):
for x in values:
if isinstance(x, t):
return True
return False
def all_of(values, t):
for x in values:
if not isinstance(x, t):
return False
return True
def promote_list(val):
if not isinstance(val, list):
val = [val]
return val
class IbisSet(object):
def __init__(self, keys=None):
self.keys = keys or []
@classmethod
def from_list(cls, keys):
return IbisSet(keys)
def __contains__(self, obj):
for other in self.keys:
if obj.equals(other):
return True
return False
def add(self, obj):
self.keys.append(obj)
class IbisMap(object):
def __init__(self):
self.keys = []
self.values = []
def __contains__(self, obj):
for other in self.keys:
if obj.equals(other):
return True
return False
def set(self, key, value):
self.keys.append(key)
self.values.append(value)
def get(self, key):
for k, v in zip(self.keys, self.values):
if key.equals(k):
return v
raise KeyError(key)
def is_function(v):
return isinstance(v, (types.FunctionType, types.LambdaType))
def adjoin(space, *lists):
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
Brought over from from pandas
"""
out_lines = []
newLists = []
lengths = [max(map(len, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = [x.ljust(lengths[i]) for x in lst]
nl.extend([' ' * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
out_lines.append(_join_unicode(lines))
return _join_unicode(out_lines, sep='\n')
def _join_unicode(lines, sep=''):
try:
return sep.join(lines)
except UnicodeDecodeError:
sep = compat.unicode_type(sep)
return sep.join([x.decode('utf-8') if isinstance(x, str) else x
for x in lines])
def deprecate(f, message):
def g(*args, **kwargs):
print(message)
return f(*args, **kwargs)
return g
| apache-2.0 |
DJArmstrong/autovet | Features/Centroiding/scripts/old/set_nan.py | 4 | 1593 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 22 20:36:09 2016
@author:
Maximilian N. Guenther
Battcock Centre for Experimental Astrophysics,
Cavendish Laboratory,
JJ Thomson Avenue
Cambridge CB3 0HE
Email: [email protected]
"""
import numpy as np
import matplotlib.pyplot as plt
#::: if only 1 object is contained in dic
def set_nan(dic, key='FLUX'):
###### REMOVE BROKEN ITEMS #######
#::: nan
ind_broken = np.where( dic[key] == 0. )
if key in dic: dic[key][ind_broken] = np.nan
# dic['HJD'][ind_broken] = np.nan #this is not allowed to be set to nan!!! Otherwise the binning will be messed up!!!
if 'CCDX' in dic: dic['CCDX'][ind_broken] = np.nan
if 'CCDY' in dic: dic['CCDY'][ind_broken] = np.nan
if 'CENTDX' in dic: dic['CENTDX'][ind_broken] = np.nan
if 'CENTDY' in dic: dic['CENTDY'][ind_broken] = np.nan
return dic
#::: if multiple objects are contained in dic
def set_nan_multi(dic, key='FLUX'):
###### REMOVE BROKEN ITEMS #######
#::: nan
N_obj = dic[key].shape[0]
for obj_nr in range(N_obj):
ind_broken = np.where( dic[key][obj_nr] == 0. )
if key in dic: dic[key][obj_nr,ind_broken] = np.nan
# dic['HJD'][ind_broken] = np.nan #this is not allowed to be set to nan!!! Otherwise the binning will be messed up!!!
if 'CCDX' in dic: dic['CCDX'][obj_nr,ind_broken] = np.nan
if 'CCDY' in dic: dic['CCDY'][obj_nr,ind_broken] = np.nan
if 'CENTDX' in dic: dic['CENTDX'][obj_nr,ind_broken] = np.nan
if 'CENTDY' in dic: dic['CENTDY'][obj_nr,ind_broken] = np.nan
return dic | gpl-3.0 |
corradio/electricitymap | parsers/US_BPA.py | 1 | 3624 | #!/usr/bin/env python3
"""Parser for the Bonneville Power Administration area of the USA."""
from io import StringIO
import arrow
import logging
import pandas as pd
import requests
GENERATION_URL = 'https://transmission.bpa.gov/business/operations/Wind/baltwg.txt'
GENERATION_MAPPING = {'Wind': 'wind',
'Hydro': 'hydro',
'Fossil/Biomass': 'unknown',
'Nuclear': 'nuclear'}
def get_data(url, session=None):
"""Returns a pandas dataframe."""
s = session or requests.Session()
req = s.get(url)
df = pd.read_table(StringIO(req.text), skiprows=11)
return df
def timestamp_converter(timestamp):
"""Turns a timestamp str into an aware datetime object."""
arr_dt_naive = arrow.get(timestamp, 'MM/DD/YYYY HH:mm')
dt_aware = arr_dt_naive.replace(tzinfo='America/Los_Angeles').datetime
return dt_aware
def data_processor(df, logger):
"""
Takes a dataframe and drops all generation rows that are empty or more
than 1 day old. Turns each row into a dictionary and removes any generation
types that are unknown.
Returns a list of tuples in the form (datetime, production).
"""
df = df.dropna(thresh=2)
df.columns = df.columns.str.strip()
# 5min data for the last 24 hours.
df = df.tail(288)
df['Date/Time'] = df['Date/Time'].map(timestamp_converter)
known_keys = GENERATION_MAPPING.keys() | {'Date/Time', 'Load'}
column_headers = set(df.columns)
unknown_keys = column_headers - known_keys
for k in unknown_keys:
logger.warning('New data {} seen in US-BPA data source'.format(k),
extra={'key': 'US-BPA'})
keys_to_remove = unknown_keys | {'Load'}
processed_data = []
for index, row in df.iterrows():
production = row.to_dict()
dt = production.pop('Date/Time')
dt = dt.to_pydatetime()
mapped_production = {GENERATION_MAPPING[k]: v for k, v in production.items()
if k not in keys_to_remove}
processed_data.append((dt, mapped_production))
return processed_data
def fetch_production(zone_key='US-BPA', session=None, target_datetime=None, logger=logging.getLogger(__name__)):
"""
Requests the last known production mix (in MW) of a given zone
Arguments:
zone_key (optional) -- used in case a parser is able to fetch multiple zones
session (optional) -- request session passed in order to re-use an existing session
Return:
A dictionary in the form:
{
'zoneKey': 'FR',
'datetime': '2017-01-01T00:00:00Z',
'production': {
'biomass': 0.0,
'coal': 0.0,
'gas': 0.0,
'hydro': 0.0,
'nuclear': null,
'oil': 0.0,
'solar': 0.0,
'wind': 0.0,
'geothermal': 0.0,
'unknown': 0.0
},
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
"""
if target_datetime:
raise NotImplementedError('This parser is not yet able to parse past dates')
raw_data = get_data(GENERATION_URL, session=session)
processed_data = data_processor(raw_data, logger)
data = []
for item in processed_data:
datapoint = {'zoneKey': zone_key,
'datetime': item[0],
'production': item[1],
'storage': {},
'source': 'bpa.gov'}
data.append(datapoint)
return data
if __name__ == '__main__':
print('fetch_production() ->')
print(fetch_production())
| gpl-3.0 |
DimensionalScoop/kautschuk | AP_SS16/503/python/plot.py | 1 | 1645 | import os
import numpy as np
from uncertainties.unumpy import uarray
import data
import helpers as hel
import plot_helpers as plot
import main as m
import functions as f
import matplotlib as mpl
import matplotlib.pyplot as plt
#plot.plot(range(len(m.q)),m.q, "Teilchennummer","Ladung","../plots/ladung.pdf", None)
#plot.plot(range(len(m.q_korr)),m.q_korr, "Teilchennummer","Korrigierte Ladung in C","../plots/ladung2.pdf", None)
plot.plot2(range(len(m.q_new_korr)),m.q_new_korr, "Teilchennummer","Korrigierte Ladung $q / C$","../plots/ladung2.pdf", None)
plot.plot([0,5,9,12,15,22],m.q_gone_korr, "Teilchennummer","Korrigierte Ladung $q / C$","../plots/ladung2.pdf", None)
x,y = plot.extract_error(m.q_korr)
x2,y2 = plot.extract_error(m.q)
x3,y3 = plot.extract_error(m.q_new)
x4,y4 = plot.extract_error(m.q_new_korr)
bla = range(39, 45)
print(bla)
#plt.plot([0,12,22],m.q_gone_korr, 'r.')
plt.plot(np.linspace(0,0.1,len(x4)),x4,'m.')
#plt.plot(range(len(x2)),x2, 'b.') #hahaha korrektur macht zero unterschied -.-
#plt.plot(range(len(x3)),x3, 'm.') #gemittelte Teilchen
#plt.plot(bla,x3, 'm.') #gemittelte Teilchen
#plt.plot(range(len(x3)),x3, 'm.')
#plt.plot(range(len(x4)),x4, 'c.')
#x_flow = np.linspace(-0.2,26,1000)
#plt.plot(x_flow, f.linearFit(x_flow, m.params[0], m.params[1]), 'm-',
# label='linearer Fit', linewidth=0.8)
for i in range(22):
c = m.tryit1.nominal_value
# c = 1.602e-19
c = 1.46e-19
# c = m.tryit2.nominal_value
plt.axhline(y=i*c, linewidth=0.2, color='g')
a = i*c + c*0.25
b = i*c - c*0.25
plt.axhspan(b, a, facecolor='g', alpha=0.1)
plt.savefig('../plots/ladung3.pdf')
| mit |
MarineLasbleis/GrowYourIC | GrowYourIC/geodyn_static.py | 1 | 2741 | #!/usr/bin/env python3
# Project : From geodynamic to Seismic observations in the Earth's inner core
# Author : Marine Lasbleis
""" Defines classes for static models, derived from geodyn classes. """
import numpy as np
import matplotlib.pyplot as plt # for figures
import math
from scipy.integrate import ode
from scipy.optimize import fsolve
# personal routines
from . import positions
from . import intersection
from . import geodyn
class Hemispheres(geodyn.Model):
""" Static hemispheres:
proxy is just defines as -1 in the western hemisphere and +1 in the eastern one."""
def __init__(self, angletheta=0., anglephi=30.):
self.name = "Static hemispheres"
self.anglephi = anglephi
self.angletheta = angletheta
self.tau_ic = 0.
def proxy_singlepoint(self, point, proxy_type):
""" -1 in western hemisphere, +1 in the eastern hemisphere"""
proxy = {} # empty dict
proxy["hemisphere"] = np.sign(
np.sin((point.phi + self.anglephi) * np.pi / 180.))
return proxy
def velocity(self, time, point):
""" has to be defined, but set to 0 """
return [0., 0., 0.]
def radius_ic(self, t):
return self.rICB
def verification(self):
pass
class Radial_sym(geodyn.Model):
""" Simple radial symmetry (no flow)
An additional function can be added and given as a variable (fonction) to define the radial dependency.
See the class Innermost_IC for an example.
"""
def __init__(self, fonction=None):
self.name = "Radial symmetry"
self.tau_ic = 0.
if fonction is None: # TODO maybe should be something as "check if self.radial_dependency is defined, and if not, then defines it"?
def fonction(r):
return r
# has to be a function with 1 argument (radius)
self.radial_dependency = fonction
def proxy_singlepoint(self, point, proxy_type):
""" """
proxy = {} # empty dict
proxy["radius"] = self.radial_dependency(point.r)
#np.sign(np.sin((point.phi + self.anglephi) * np.pi / 180.))
return proxy
def velocity(self, time, point):
""" has to be defined, but set to 0 """
return [0., 0., 0.]
def radius_ic(self, t):
return self.rICB
def verification(self):
pass
class Innermost_IC(Radial_sym):
""" """
def __init__(self, radius_IIC):
self.radius_IIC = radius_IIC
def fonction(r):
if r > self.radius_IIC:
answer = 1.
else:
answer = 0.
return answer
Radial_sym.__init__(self, fonction)
self.name = "Innermost inner core"
| mit |
mfherbst/bohrium | bridge/npbackend/bohrium/contexts.py | 1 | 2966 | """
Bohrium Contexts
================
"""
import sys
import os
from . import backend_messaging as messaging
class EnableBohrium:
"""Enable Bohrium within the context"""
def __init__(self):
# In order to avoid complications, we import common libraries BEFORE enabling Bohrium
try:
import matplotlib
if os.environ.get("DISPLAY", "") == "":
matplotlib.use('Agg') # When no DISPLAY, we assume a headless matplotlib is used
import matplotlib.pyplot
import matplotlib.pylab
except ImportError:
pass
try:
import scipy
import scipy.sparse
import scipy.io
except ImportError:
pass
try:
import netCDF4
except ImportError:
pass
# Let's save to real NumPy module
self.__numpy = sys.modules['numpy']
self.__numpy_random = sys.modules['numpy.random']
self.__numpy_linalg = sys.modules['numpy.linalg']
# Sub-module matlib has to be imported explicitly once in order to be available through bohrium
try:
import numpy.matlib
except ImportError:
pass
def __enter__(self):
import numpy
import bohrium
# Overwrite with Bohrium
sys.modules['numpy_force'] = numpy
sys.modules['numpy'] = bohrium
sys.modules['numpy.random'] = bohrium.random
sys.modules['numpy.linalg'] = bohrium.linalg
def __exit__(self, *args):
# Put NumPy back together
sys.modules.pop('numpy_force', None)
sys.modules['numpy'] = self.__numpy
sys.modules['numpy.random'] = self.__numpy_random
sys.modules['numpy.linalg'] = self.__numpy_linalg
class DisableBohrium:
"""Disable Bohrium within the context"""
def __enter__(self):
# Save current state
import numpy
self._numpy = sys.modules['numpy']
self._numpy_random = sys.modules['numpy.random']
self._numpy_linalg = sys.modules['numpy.linalg']
# Make sure that numpy points to numpy (and not Bohrium)
sys.modules['numpy'] = sys.modules.get("numpy_force", self._numpy)
def __exit__(self, *args):
# Load the state before entering context
sys.modules['numpy'] = self._numpy
sys.modules['numpy.random'] = self._numpy_random
sys.modules['numpy.linalg'] = self._numpy_linalg
class Profiling:
"""Profiling the Bohrium backends within the context."""
def __init__(self):
pass
def __enter__(self):
messaging.statistic_enable_and_reset()
def __exit__(self, *args):
print(messaging.statistic())
class DisableGPU:
"""Disable the GPU backend within the context."""
def __init__(self):
pass
def __enter__(self):
messaging.gpu_disable()
def __exit__(self, *args):
messaging.gpu_enable()
| lgpl-3.0 |
pyspace/test | docs/conf.py | 1 | 22175 | # -*- coding: utf-8 -*-
#
# aBRI documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 30 13:24:06 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, inspect
# Add root of the tree --> go to place before docs
root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
if not root_dir in sys.path:
sys.path.append(root_dir)
import pySPACE
try:
pySPACE.load_configuration("config.yaml")
except:
pass
import pySPACE.missions.nodes
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# autodoc is an extension to extract documentation automatically
# viewcode is an extension to link the corresponding sourcecode
# as a link automatically with syntax highlighting
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.graphviz',
'sphinx.ext.todo',
'sphinx.ext.pngmath',
'sphinx.ext.autosummary',
# 'numpy_ext.numpydoc',
# 'matplotlib.sphinxext.plot_directive',
# 'matplotlib.sphinxext.only_directives',
]
autosummary_generate = True
# switches the showing of todos on or of
#todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pySPACE'
copyright = u'2012, pySPACE Developer Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5'
# The full version, including alpha/beta/rc tags.
release = '0.5 alpha'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['.build','templates','includes']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Variable for the settings of autodoc
# 'members' of the module or class used with autodoc will we shown
# So the classes don't have to be listed individually when using autodoc with a file module
# 'undoc-members' makes sure, even undocumented members are listed
# 'show-inheritance' adds a short line where you get to know what the mother class is.
# 'private-members' is only available since version 1.1 of sphinx
# Now also 'members' beginning with "_" will be included in documentation.
# 'inherited-members' would also include inherited members in the documentation generation.
# Normally they are ommited because the class doesn't change these functions.
# If you set one of these flags in this configuration value,
# you can use a negated form, 'no-flag', in an autodoc directive, to disable it once.
# .. automodule:: foo
# :no-undoc-members:
# undoc-members','inherited-members','members','show-inheritance', 'private-members'
# Python “special” members (that is, those named like __special__) will be included if the special-members flag option is given
autodoc_default_flags = ['members','show-inheritance','undoc-members','private-members', 'special-members']
# 'private-members' is only available since version 1.1 of sphinx
# Now also 'members' beginning with "_" will be included in documentation.
# # Activate this parameter to say where its documentation comes from
# # The default 'should be' to concatenate the doc-strings of the class and its
# # __init__ function.
# autoclass_content = 'class' #'both', 'class', 'init'
autoclass_content = 'class' #'both'
# # This value selects if automatically documented members
# # are sorted alphabetical (value 'alphabetical'),
# # by member type (value 'groupwise') or by source order (value 'bysource').
autodoc_member_order = 'bysource'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_theme = "sphinxdoc"
#html_theme_options = {
# "rightsidebar": "false",
# "relbarbgcolor": "black"
#}
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'pySPACE.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "graphics/pyspace-logo_small.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "graphics/pyspace-logo.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'docs'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'pySPACE.tex', ur'pySPACE Documentation',
ur'pySPACE Developer Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "graphics/pyspace-logo_small.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
def fix_sig(app, what, name, obj, options, signature,
return_annotation):
""" Underline class name and separate it from parameters
**Deprecated**
"""
if 'class' == what:
# underline class name manually
new_signature="\n"
new_signature+="-"*(len(str(name))+7) # 'class ' is not included in length
new_signature+="\n"
if signature:
# delete beginning and ending brackets
parameters = signature #[1:-1]
parameters = parameters.replace(", **kwargs","")
parameters = parameters.replace(", *args","")
if len(parameters)>0:
# add the parameters seperately
# unfortunately this is done in bold for unknown reasons
# (probably the first newline is the reason)
# extra dot is added for extra blank line
new_signature+=".\n"
new_signature+="Parameters:"
new_signature+="\n\n"
# the parameters should be indented but this doesn't work
new_signature+=" "
new_signature+=parameters
return (new_signature, return_annotation)
else:
return (signature, return_annotation)
def missing_docstring(app, what, name, obj, options, lines):
""" Construct a list of components having no docsting
.. todo:: Discover where the 'type ERROR' comes from in CCS
"""
if len(lines)==0 and not str(name).endswith("__init__"):
f = open(os.path.join(os.path.dirname(__file__),".build","html","undocumented.txt"),"a")
f.write(str(name)+"\n")
else:
for line in lines:
if "document" in line and "todo" in line:
f = open(os.path.join(os.path.dirname(__file__),".build","html","undocumented.txt"),"a")
f.write("\n"+str(name)+"\n"+line+"\n \n")
if 'class' == what and str(name).endswith("Node"):
# e.g. pySPACE.missions.nodes.spatial_filtering.spatial_filtering.SpatialFilteringNode
lines.append("")
lines.append("")
lines.append(":POSSIBLE NODE NAMES:")
lines.append("")
for key, value in pySPACE.missions.nodes.NODE_MAPPING.items():
if value.__module__+"."+value.__name__==name:
lines.append(" - **"+key+"**")
lines.append("")
lines.append("")
# Add class summary
# For unknown reasons, this part produces warnings and errors
# referring to except and types, but the reason is unclear
if 'class' == what and not len(lines)==0 and not "Metaclass" in name and \
not name.endswith("SklearnNode"):
new_lines=[]
new_lines.append("")
new_lines.append("**Class Components Summary**")
new_lines.append("")
new_lines.append(".. autosummary::")
new_lines.append("")
method_list = inspect.getmembers(obj) #,inspect.ismethod
for method,value in method_list:
if not method in ["__doc__","__module__","__metaclass__","__dict__","__init__","__weakref__"] and method in obj.__dict__.keys():
new_lines.append(" "+method)
# if "type" in obj.__name__ or "type" in method or "except" in new_lines[-1]:
# print obj
# print name
# print method
# print
# only one method found
if len(new_lines)<=5:
new_lines=[]
lines.extend(new_lines)
lines.append("")
lines.append("")
def setup(app):
""" Activate fix_sig and missing_docstring and delete old 'undocumented.txt'
.. todo:: Fix file handling. Only works with 'make html_complete'
"""
# app.connect('autodoc-process-signature', fix_sig)
app.connect('autodoc-process-docstring', missing_docstring)
# clean up auto-un-documentation files
fname=os.path.join(os.path.dirname(__file__),".build","html","undocumented.txt")
if os.access(fname,os.F_OK):
os.remove(fname)
######################### preparation #########################################
# delete old list of nodes
fname=os.path.join(os.path.dirname(__file__),"nodes.rst")
if os.access(fname,os.F_OK):
os.remove(fname)
location = "pySPACE.missions.nodes"
offset = len(location)+1
node_list=[]
for key,value in pySPACE.missions.nodes.DEFAULT_NODE_MAPPING.items():
node_list.append(value.__module__+"."+value.__name__)
node_list.sort()
######################### header ###############################################
f=open(fname,"a")
f.write(".. AUTO-GENERATED FILE -- DO NOT EDIT! (conf.py)\n")
f.write(".. _node_list: \n")
f.write("\n")
f.write("List of all Nodes \n")
f.write("======================= \n")
f.write("\n")
f.write("pySPACE comes along with a big choice of processing nodes.\n")
f.write("They can be accessed via :class:`~pySPACE.missions.operations.node_chain.NodeChainOperation`.\n")
f.write("In the following you can get an overview on their functionality, \n")
f.write("the mapping from node names in specification files to the node class \n")
f.write("and vice versa.\n")
f.write("\n")
f.write("For details on the usage of the nodes and for getting usage examples, "
"have a look at their documentation.\n")
######################### node summary #########################################
f.write("\n")
f.write("Mapping of Class Names to Functionality \n")
f.write("--------------------------------------- \n")
f.write("\n")
#f.write("\n.. currentmodule:: %s\n\n"%location)
#f.write(".. autosummary:: \n")
#f.write(" :nosignatures:\n")
#f.write(" :toctree: nodes\n")
f.write("\n")
current_location=""
for node in node_list:
if not node == "pySPACE.missions.nodes.base_node.BaseNode" and \
not "template" in node:
new_location=node[offset:].split(".")[0]
if not new_location==current_location:
current_module=location+"."+new_location
f.write("\n")
f.write("%s\n"%new_location)
f.write("+"*(len(new_location))+"\n")
f.write(" \n|\n\n")
f.write(".. currentmodule:: %s\n"%location)
f.write(".. autosummary:: \n\n %s\n\n|\n\n"%current_module[offset:])
# if not current_module=="pySPACE.missions.nodes.splitter":
# f.write(".. automodule:: %s\n :no-members:\n\n"%current_module)
# else:
# f.write("Control how data is split into training and testing data\n\n")
f.write(".. currentmodule:: %s\n"%current_module)
f.write(".. autosummary:: \n")
f.write(" :nosignatures:\n")
f.write("\n")
current_location=new_location
current_offset = len(current_module)+1
f.write(" "+node[current_offset:]+"\n")
f.write("\n")
######################### node name --> class name ############################
node_name_dict = pySPACE.missions.nodes.NODE_MAPPING
name_list = [(name,value.__module__[offset:]+"."+value.__name__) for name,value in node_name_dict.items()]
f.write(".. currentmodule:: %s\n\n"%location)
f.write("Mapping of Node Names to Class Names \n")
f.write("------------------------------------ \n")
f.write("\n")
name_list.sort(key=lambda x: x[0].lower())
for name,class_name in name_list:
f.write(" - "+name+": "+":class:`"+class_name+"`"+"\n")
######################### class name --> node name ############################
f.write("\n")
f.write("Mapping of Class Names to Node Names \n")
f.write("------------------------------------ \n")
f.write("\n")
name_list.sort(key=lambda x: (x[1].lower(),x[0]))
for name,class_name in name_list:
f.write(" - "+":class:`"+class_name+"`"+": "+name+"\n")
f.close()
######################### operation example list #############################
#examples operations
fname=os.path.join(os.path.dirname(__file__),"examples","operations.rst")
if os.access(fname,os.F_OK):
os.remove(fname)
specs_path=os.path.join(os.path.dirname(__file__),"examples","specs")
examples=os.path.join(specs_path,"operations","examples")
f=open(fname,"a")
f.write(".. _operation_examples: \n")
f.write("\n")
f.write("Operation Examples \n")
f.write("=========================== \n")
f.write("\n")
f.write("These are examples of yaml files you can use as a template\n")
f.write("for your own operations. For details on operations have a look at the respective documentation.\n")
f.write("\n")
# adding example files
for folder, _, files in os.walk(examples):
for fname in files:
f.write(fname + "\n")
f.write("------------------------------------------\n")
f.write("\n")
f.write(".. literalinclude:: " + os.path.join("specs","operations","examples",fname) + "\n")
f.write("\t" + ":language: yaml" + "\n")
f.write("\n")
f.close()
######################### operation chain example list ########################
#examples operation_chains
examples=os.path.join(specs_path,"operation_chains","examples")
fname=os.path.join(os.path.dirname(__file__),"examples","operation_chains.rst")
if os.access(fname,os.F_OK):
os.remove(fname)
f=open(fname,"a")
f.write(".. _operation_chain_examples: \n")
f.write("\n")
f.write("Operation Chain Examples \n")
f.write("============================ \n")
f.write("\n")
f.write("These are examples of yaml files you can use as a template\n")
f.write("for your own operation chains. For details on operation chains have a look at the respective documentation.\n")
f.write("\n")
# adding example files
for folder, _, files in os.walk(examples):
for fname in files:
f.write(fname + "\n")
f.write("------------------------------------------\n")
f.write("\n")
f.write(".. literalinclude:: " + os.path.join("specs","operation_chains","examples",fname) + "\n")
f.write("\t" + ":language: yaml" + "\n")
f.write("\n")
f.close()
######################### preparation of external node documentation ##########
# delete old list of nodes
fname=os.path.join(os.path.dirname(__file__),"external_nodes.rst")
if os.access(fname,os.F_OK):
os.remove(fname)
location = "pySPACE.missions.nodes"
offset = len(location)+1
f=open(fname,"a")
######################### header ###############################################
f.write(".. AUTO-GENERATED FILE -- DO NOT EDIT! (conf.py)\n")
f.write(".. _external_nodes: \n")
f.write("\n")
f.write("Documentation of External and Wrapped Nodes \n")
f.write("=========================================== \n")
f.write("\n")
f.write("pySPACE comes along with wrappers to external algorithms.\n")
f.write("\n")
f.write("For details on the usage of the nodes and for getting usage examples, \n"
"have a look at their documentation.\n")
node_list = []
for key, value in pySPACE.missions.nodes.DEFAULT_NODE_MAPPING.items():
if value.__module__ == "pySPACE.missions.nodes.external":
node_list.append(value.__module__+"."+value.__name__)
node_list.sort()
if len(node_list) > 0:
f.write("\n")
f.write(".. _external_folder: \n")
f.write("\n")
f.write("External Nodes \n")
f.write("-------------- \n")
f.write("\n")
f.write("Nodes from :mod:`external folder <pySPACE.missions.nodes.external>`\n\n")
cl = ""
for node in node_list:
cl += "\n:class:`" + node + "`\n"
cl += "~"*(len(node)+9)+"\n\n"
cl += ".. autoclass:: %s\n" % node
cl += " :noindex:\n\n"
f.write(cl)
else:
f.write("Module for external node wrapping: :mod:`pySPACE.missions.nodes.external`\n")
######################### scikit nodes #########################################
node_list = []
for key, value in pySPACE.missions.nodes.DEFAULT_NODE_MAPPING.items():
if value.__name__.endswith("SklearnNode"):
node_list.append(value.__module__+"."+value.__name__)
node_list.sort()
if len(node_list) > 0:
f.write("\n")
f.write(".. _scikit_nodes: \n")
f.write("\n")
f.write("Scikit Nodes \n")
f.write("------------ \n")
f.write("\n")
f.write("Nodes from :mod:`scikits wrapper <pySPACE.missions.nodes.scikits_nodes>`\n\n")
cl = ""
for node in node_list:
cl += "\n:class:`" + node + "`\n"
cl += "~"*(len(node)+9)+"\n\n"
cl += ".. autoclass:: %s\n :no-members:\n\n" % node
f.write(cl)
f.close()
inheritance_graph_attrs = dict(rankdir="TB",fontsize=5,ratio='compress',nodesep=0.1,sep=0.1, pad=0.001,size= '"10.0, 25.0"') #, size='""'
graphviz_output_format = 'png' #'svg' svg is good for scaling but linking seems to work only with png
#inheritance_node_attrs = dict(shape='rectangle', fontsize=8, height=0.7,
# color='grey', style='filled')
inheritance_node_attrs = dict(shape='rectangle', fontsize=10, height=0.02,width=0.02,margin=0.005)
| gpl-3.0 |
kamorozov/DDparser | Bin_graph.py | 1 | 4807 | import matplotlib.pyplot as plt
def x_eq(y, mixture):
yx = eval(equations_yx.get(mixture))
return yx
def y_eq(x, mixture):
xy = eval(equations_xy.get(mixture))
return xy
def save_graph(mix, x_1, x_1d, F):
x_1w = 1 - x_1d
D_work = F * (x_1 - x_1w)/(x_1d - x_1w)
f = F / D_work
R_min = (x_1d - y_eq(x_1, mix)) / (y_eq(x_1, mix) - x_1)
R = R_min * 1.3
y = x_1d
x = x_1d
x_work = [x]
y_work = [y]
N = 0
x_pr = 0
x_last = 0
while x > x_1w:
x = x_eq(y, mix)
x_work.append(x)
y_work.append(y_eq(x, mix))
x_last = x
if round(x_last, 5) > round(x_pr, 5):
N = -1
break
else:
if round(x_last, 5) == round(x_pr, 5):
break
if x > x_1:
y_work_up = R / (R + 1) * x + x_1d / (R + 1)
y = y_work_up
x_work.append(x)
y_work.append(y)
elif x < x_1:
y_work_down = (R + f) / (R + 1) * x - (1 - f) / (R + 1) * x_1w
y = y_work_down
x_work.append(x)
y_work.append(y)
x_pr = x_last
N +=1
plt.plot(x_work, y_work)
x = []
y =[]
for i in range(0, 101):
x.append(i / 100)
y_i = y_eq(i / 100, mix)
y.append(y_i)
plt.plot([0, 1], [0, 1])
plt.plot(x, y)
x_work_line = []
y_work_line = []
for j in range(int(x_1w * 1000), int(x_1d * 1000 + 1)):
x_work_line.append(j / 1000)
if j / 1000 <= x_1:
y_work_down = (R + f) / (R + 1) * (j / 1000) - (1 - f) / (R + 1) * x_1w
y_work_line.append(y_work_down)
elif j / 1000 >= x_1:
y_work_up = R / (R + 1) * (j / 1000) + x_1d / (R + 1)
y_work_line.append(y_work_up)
plt.plot(x_work_line, y_work_line)
plt.axis([0, 1, 0, 1])
eq_file = open('equation.txt', 'r')
equations_yx = {}
equations_xy = {}
for temp_line in eq_file:
if temp_line[0] != '#':
line = temp_line.split('\t')
line[2] = line[2][:-1]
eq = line[2].split(',')
eq = '.'.join(eq)
for i in range(2, 10):
if eq.find('x' + str(i)) > -1:
eq = eq[:eq.find('x' + str(i))] + ' * x ** ' + str(i) + eq[eq.find('x' + str(i)) + 2:]
if eq.find('x +') > -1:
eq = eq[:eq.find('x +')] + ' * x' + eq[eq.find('x +') + 1:]
elif eq.find('x -') > -1:
eq = eq[:eq.find('x -')] + ' * x' + eq[eq.find('x -') + 1:]
elif eq[-1] == 'x':
eq[-1] = ' * x'
if line[1] == 'yx':
while eq.find('x') > -1:
eq = eq[:eq.find('x')] + 'y' + eq[eq.find('x') + 1:]
line[2] = eq
if line[1] == 'yx':
equations_yx[line[0]] = line[2]
else:
equations_xy.update({line[0]: line[2]})
print('Список имеющихся систем:')
for mix in equations_xy:
print(mix, end=' ')
print()
mix = input('Система: ')
x_1 = float(input('Исходный состав по первому компоненту: '))
x_1d = float(input('Состав дистилата по первому компоненту: '))
F = float(input('Поток исходный смеси: '))
x_1w = 1 - x_1d
D_work = F * (x_1 - x_1w)/(x_1d - x_1w)
f = F / D_work
R_min = (x_1d - y_eq(x_1, mix)) / (y_eq(x_1, mix) - x_1)
R = R_min * 1.3
y = x_1d
x = x_1d
x_work = [x]
y_work = [y]
N = 0
x_pr = 0
x_last = 0
while x > x_1w:
x = x_eq(y, mix)
x_work.append(x)
y_work.append(y_eq(x, mix))
x_last = x
if round(x_last, 5) > round(x_pr, 5):
N = -1
break
else:
if round(x_last, 5) == round(x_pr, 5):
break
if x > x_1:
y_work_up = R / (R + 1) * x + x_1d / (R + 1)
y = y_work_up
x_work.append(x)
y_work.append(y)
elif x < x_1:
y_work_down = (R + f) / (R + 1) * x - (1 - f) / (R + 1) * x_1w
y = y_work_down
x_work.append(x)
y_work.append(y)
x_pr = x_last
N +=1
plt.plot(x_work, y_work)
x = []
y =[]
for i in range(0, 101):
x.append(i / 100)
y_i = y_eq(i / 100, mix)
y.append(y_i)
plt.plot([0, 1], [0, 1])
plt.plot(x, y)
x_work_line = []
y_work_line = []
for j in range(int(x_1w * 1000), int(x_1d * 1000 + 1)):
x_work_line.append(j / 1000)
if j / 1000 <= x_1:
y_work_down = (R + f) / (R + 1) * (j / 1000) - (1 - f) / (R + 1) * x_1w
y_work_line.append(y_work_down)
elif j / 1000 >= x_1:
y_work_up = R / (R + 1) * (j / 1000) + x_1d / (R + 1)
y_work_line.append(y_work_up)
plt.plot(x_work_line, y_work_line)
plt.axis([0, 1, 0, 1])
plt.show()
| mit |
cangermueller/biseq | biseq/fwrv_to_txt.py | 1 | 2967 | import argparse
import pandas as pd
import numpy as np
import logging
import os
class FwrvToTxt(object):
def __init__(self):
pass
def main(self, args):
self.name = os.path.basename(args[0])
p = argparse.ArgumentParser('Split Babraham forward/reverse counts files by samples',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('fw_file', help='Forward counts file', metavar='fw-file')
p.add_argument('rv_file', help='Reverse counts file', metavar='rv-file')
p.add_argument('-o', '--out-dir', help='Output directory', default='.')
p.add_argument('--samples', help='Indices of samples', nargs='+')
p.add_argument('--prefix', help='Prefix of output files', default='s')
p.add_argument('--nrows', help='Only read that number of rows', type=int)
p.add_argument('--no-drop', help='Do not drop zero counts sites', dest='drop', default=True, action='store_false')
p.add_argument('--verbose', help='More detailed log messages', default=False, action='store_true')
p.add_argument('--log-file', help='Write log messages to file')
opts = p.parse_args(args[1:])
logging.basicConfig(filename=opts.log_file, format='%(levelname)s (%(asctime)s): %(message)s')
log = logging.getLogger(__name__)
if opts.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
if opts.verbose:
log.debug('Command line arguments:')
log.debug(str(opts))
log.info('Reading positions ...')
pos = pd.read_table(opts.fw_file, usecols=['Chromosome', 'Start', 'End'],
dtype=dict(Chromosome=np.str, Start=np.int, End=np.int),
nrows=opts.nrows)
pos.columns = ['chromo', 'start', 'end']
columns = pd.read_table(opts.fw_file, nrows=1).columns
if not opts.samples:
opts.samples = range(list(columns == 'Distance').index(True) + 1, len(columns))
log.info('Reading forward counts ...')
fw = pd.read_table(opts.fw_file, usecols=opts.samples, nrows=opts.nrows).astype(np.int16)
log.info('Reading reverse counts ...')
rv = pd.read_table(opts.rv_file, usecols=opts.samples, nrows=opts.nrows).astype(np.int16)
log.info('Splitting counts by samples ...')
sample_names = fw.columns
for sample_name in sample_names:
log.info('\t' + sample_name)
counts = pd.concat((pos, fw[sample_name], fw[sample_name] + rv[sample_name]), axis=1)
counts.columns = list(counts.columns[:-2]) + ['nmet','ntot']
if opts.drop:
counts = counts[(counts.ntot > 0)]
filename = os.path.join(opts.out_dir, '%s%s.txt' % (opts.prefix, sample_name))
counts.to_csv(filename, sep='\t', index=False)
log.info('Done!')
| gpl-3.0 |
waterponey/scikit-learn | sklearn/linear_model/bayes.py | 50 | 16145 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : float
estimated precision of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
# Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_) /
(lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1) /
(np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /
(rmse_ + 2 * alpha_2))
# Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_) +
n_samples * log(alpha_) -
alpha_ * rmse_ -
(lambda_ * np.sum(coef_ ** 2)) -
logdet_sigma_ -
n_samples * log(2 * np.pi))
self.scores_.append(s)
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_offset, y_offset, X_scale)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
# Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
# Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) *
X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) /
((coef_[keep_lambda]) ** 2 +
2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) /
(rmse_ + 2. * alpha_2))
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +
np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset, y_offset, X_scale)
return self
| bsd-3-clause |
zorojean/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
xubenben/scikit-learn | sklearn/utils/tests/test_extmath.py | 130 | 16270 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _batch_mean_variance_update
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
## ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
## ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
## ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
## min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = _batch_mean_variance_update(
X2, old_means, old_variances, old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _batch_mean_variance_update(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
| bsd-3-clause |
vivekmishra1991/scikit-learn | sklearn/__init__.py | 59 | 3038 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
ericdill/miniature-hipster | miniature-hipster/plotting/waterfall.py | 1 | 4362 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
# imports
from __future__ import unicode_literals
import os
from collections import deque
# matplotlib
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import PolyCollection
# scipy/numpy
import numpy as np
from scipy.signal import savgol_filter
from scipy.interpolate import UnivariateSpline
# pandas
import pandas as pd
from pandas import DataFrame, Series, Index
from pprint import pprint
################################################################################
__author__ = 'edill'
#### DATA FOLDER DIRECTORY #####################################################
folder = 'C:\\DATA\\New folder\\Data_4_Eric\\SCAN_Xr0'
folder = 'C:\\DATA\\New folder\\Data_4_Eric\\SCAL_alongX'
# todo plot multiple waterfalls on the same figure
#### PLOTTING OPTIONS ##########################################################
x_label = '2-theta (degrees)'
y_label = 'Depth (um)'
z_label = 'Intensity (arb.)'
min_x = None
max_x = None
start_frame = None
end_frame = None
min_z = 0
max_z = None
alpha = 1
smooth = False
smooth_window_length = 91
smooth_poly_order = 7 # must be odd!
space_between_frames = 1 # y-spacing for each line
# color = cm.datad['winter']
# print(color)
color = 'hot_r'
cstart = 0.3
cstop = 0.7
frame_offset = 0
frame_delta = 0.2
print_color_options = True
if print_color_options:
print(list(cm.datad))
#### RUNTIME BEHAVIOR ##########################################################
# init some parameters
norm = cm.colors.Normalize(vmin=0, vmax=1)
files = os.listdir(folder)
# do in-place sorting of files
files.sort()
pprint(files)
data = DataFrame()
smoothed = DataFrame()
# init the defaults
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = len(files)
for fname in files[start_frame:end_frame]:
with open(folder + os.sep + fname, 'r') as f:
# data.append(np.asarray([line.split() for line in f.next()]).T)
x, y = np.asarray([[float(val) for val in line.split()] for line in f]).T
val = Series(y, index=x)
data[fname] = Series(y, index=x)
# init the defaults
if min_x is None:
min_x = float(data.index[0])
if max_x is None:
max_x = float(data.index[-1])
# apply x-axis filtering
data = data[data.index > min_x]
data = data[data.index < max_x]
indices = data.index
if smooth:
# smooth the data into a new data frame
for d in data:
# smooth data
vals = data[d].values
y = savgol_filter(vals, smooth_window_length, smooth_poly_order, mode='nearest').tolist()
len(y)
smoothed[d] = Series(y, indices)
to_plot = smoothed
else:
# data.ix[0] = 0
# data.ix[-1] = 0
to_plot = data
to_plot.ix[0] = 0
to_plot.ix[-1] = 0
# to_plot.index.insert(0, 0)
# to_plot.index.insert(len(data.index), 0)
# set the min and max values for z after smoothing
if min_z is None:
min_z = np.min([np.min(data[col_name]) for col_name in data])
else:
for col_name in to_plot:
colvals = to_plot[col_name].values
colvals[colvals < min_z] = min_z
if max_z is None:
max_z = np.max([np.max(data[col_name]) for col_name in data])
else:
for col_name in to_plot:
colvals = to_plot[col_name].values
colvals[colvals > max_z] = max_z
poly_pairs = deque()
# create the color map
rgba = cm.ScalarMappable(cmap=color, norm=norm)
colors = rgba.to_rgba(x=np.linspace(cstart, cstop,
len(to_plot.columns)),
alpha=alpha)
for idx, d in enumerate(to_plot):
vals = to_plot[d]
poly_pairs.append([(x, y) for x, y in zip(indices, to_plot[d].values)])
# create the collection of polygons to plot
poly = PolyCollection(list(poly_pairs), facecolors=colors)
# init the matplotlib figures
fig = plt.figure()
ax3 = Axes3D(fig)
# set the offset of each polygon
start = frame_offset
end = start + frame_delta * len(data.columns)
zs = np.arange(start, end, frame_delta)
# add the polygons to the
ax3.add_collection3d(poly, zs=zs, zdir='y')
ax3.set_xlabel(x_label)
ax3.set_ylabel(y_label)
ax3.set_zlabel(z_label)
ax3.set_xlim3d(min_x, max_x)
ax3.set_ylim3d(start, end)
ax3.set_zlim3d(min_z, max_z)
# show the plot
plt.show() | bsd-3-clause |
kklmn/xrt | xrt/plotter.py | 1 | 82259 | # -*- coding: utf-8 -*-
u"""
Module :mod:`plotter` provides classes describing axes and plots, as well as
containers for the accumulated arrays (histograms) for subsequent
pickling/unpickling or for global flux normalization. The module defines
several constants for default plot positions and sizes. The user may want to
modify them in the module or externally as in the xrt_logo.py example.
.. note::
Each plot has a 2D positional histogram, two 1D positional histograms and,
typically, a 1D color histogram (e.g. energy).
.. warning::
The two 1D positional histograms are not calculated from the 2D one!
In other words, the 1D histograms only respect their corresponding limits
and not the other dimension’s limits. There can be situations when the 2D
image is black because the screen is misplaced but one 1D histogram may
still show a beam distribution if in that direction the screen is
positioned correctly. This was the reason why the 1D histograms were
designed not to be directly dependent on the 2D one – this feature
facilitates the troubleshooting of misalignments. On the other hand, this
behavior may lead to confusion if a part of the 2D distribution is outside
of the visible 2D area. In such cases one or two 1D histograms may show a
wider distribution than the one visible on the 2D image. For correcting
this behavior, one can mask the beam by apertures or by selecting the
physical or optical limits of an optical element.
.. tip::
If you do not want to create plot windows (e.g. when they are too many or
when you run xrt on a remote machine) but only want to save plots, you can
use a non-interactive matplotlib backend such as Agg (for PNGs), PDF, SVG
or PS::
matplotlib.use('agg')
Importantly, this must be done at the very top of your script, right after
import matplotlib and before importing anything else.
"""
from __future__ import unicode_literals
__author__ = "Konstantin Klementiev, Roman Chernikov"
__date__ = "16 Mar 2017"
import os
import copy
import pickle
import numpy as np
import scipy as sp
import matplotlib as mpl
from matplotlib.ticker import MaxNLocator
from . import runner
# from runner import runCardVals, runCardProcs
from .backends import raycing
try:
from .gui.commons import qt
hasQt = True
except ImportError:
hasQt = False
from matplotlib.figure import Figure
try: # for Python 3 compatibility:
unicode = unicode
except NameError:
# 'unicode' is undefined, must be Python 3
unicode = str
basestring = (str, bytes)
else:
# 'unicode' exists, must be Python 2
unicode = unicode
basestring = basestring
# otherwise it does not work correctly on my Ubuntu9.10 and mpl 0.99.1.1:
mpl.rcParams['axes.unicode_minus'] = False
#mpl.rcParams['text.usetex'] = True
#mpl.rcParams['font.family'] = 'serif'
#mpl.rcParams['font.serif'] = 'cm'
mpl.rcParams['axes.linewidth'] = 0.75
#mpl.rcParams['backend'] = 'Qt5agg'
#mpl.rcParams['backend'] = 'Agg'
#mpl.rcParams['xtick.major.pad'] = '5'
#mpl.rcParams['ytick.major.pad'] = '5'
import matplotlib.pyplot as plt
epsHist = 1e-100 # prevents problem with normalization of histograms
# [Sizes and positions of plots]
dpi = 100
xOrigin2d = 80 # all sizes are in pixels
yOrigin2d = 48
space2dto1d = 4
height1d = 84
xspace1dtoE1d = 112
yspace1dtoE1d = 76
heightE1dbar = 10
heightE1d = 84
xSpaceExtraWhenNoEHistogram = 42
xSpaceExtra = 22
ySpaceExtra = 28
# [Sizes and positions of texts]
xlabelpad = 4 # x-axis label to axis
ylabelpad = 4 # y-axis label to axis
xTextPos = 1.02 # 0 to 1 relative to the figure size
yTextPosNrays = 1.0
yTextPosNraysR = 1.32
yTextPosGoodrays = 0.8
yTextPosGoodraysR = 1.1
yTextPosI = 0.58
xTextPosDx = 0.5
yTextPosDx = 1.02
xTextPosDy = 1.05
yTextPosDy = 0.5
xTextPosStatus = 0.999
yTextPosStatus = 0.001
yTextPosNrays1 = 0.88
yTextPosNrays2 = 0.66
yTextPosNrays3 = 0.44
yTextPosNrays4 = 0.22
# [Bins]
defaultBins = 128
defaultPixelPerBin = 2
extraMargin = 4 # bins. Extra margins to histograms when limits are not given.
# [Axis labels]
axisLabelFontSize = 10
defaultXTitle = '$x$'
defaultXUnit = 'mm'
defaultYTitle = '$z$'
defaultYUnit = 'mm'
defaultCTitle = 'energy'
defaultCUnit = 'eV'
defaultFwhmFormatStrForXYAxes = '%.1f'
defaultFwhmFormatStrForCAxis = '%.2f'
# [Development]
colorFactor = 0.85 # 2./3 for red-to-blue
colorSaturation = 0.85
# # end of rc-file ##
def versiontuple(v):
a = v.split(".")
return tuple(map(int, [''.join(c for c in s if c.isdigit()) for s in a]))
if hasQt:
class MyQtFigCanvas(qt.FigCanvas):
windowClosed = qt.pyqtSignal(int)
def __init__(self, figure, xrtplot):
super(MyQtFigCanvas, self).__init__(figure)
self.xrtplot = xrtplot
class XYCAxis(object):
u"""
Contains a generic record structure describing each of the 3 axes:
X, Y and Color (typ. Energy)."""
def __init__(
self, label='', unit='mm', factor=None, data='auto', limits=None,
offset=0, bins=defaultBins, ppb=defaultPixelPerBin,
density='histogram', invertAxis=False, outline=0.5,
fwhmFormatStr=defaultFwhmFormatStrForXYAxes):
u"""
*label*: str
The label of the axis without unit. This label will appear in the
axis caption and in the FWHM label.
*unit*: str
The unit of the axis which will follow the label in parentheses
and appear in the FWHM value
*factor*: float
Useful in order to match your axis units with the units of the
ray tracing backend. For instance, the shadow length unit is cm.
If you want to display the positions as mm: *factor=10*;
if you want to display energy as keV: *factor=1e-3*.
Another usage of *factor* is to bring the coordinates of the ray
tracing backend to the world coordinates. For instance, z-axis in
shadow is directed off the OE surface. If the OE is faced upside
down, z is directed downwards. In order to display it upside, set
minus to *factor*.
if not specified, *factor* will default to a value that depends
on *unit*. See :meth:`def auto_assign_factor`.
*data*: int for shadow, otherwise array-like or function object
shadow:
zero-based index of columns in the shadow binary files:
====== ====================================================
0 x
1 y
2 z
3 x'
4 y'
5 z'
6 Ex s polariz
7 Ey s polariz
8 Ez s polariz
9 lost ray flag
10 photon energy
11 ray index
12 optical path
13 phase (s polarization)
14 phase (p polarization)
15 x component of the electromagnetic vector (p polar)
16 y component of the electromagnetic vector (p polar)
17 z component of the electromagnetic vector (p polar)
18 empty
====== ====================================================
raycing:
use the following functions (in the table below) or pass your
own one. See :mod:`raycing` for more functions, e.g. for the
polarization properties. Alternatively, you may pass an array
of the length of the beam arrays.
======= ===================================================
x raycing.get_x
y raycing.get_y
z raycing.get_z
x' raycing.get_xprime
z' raycing.get_zprime
energy raycing.get_energy
======= ===================================================
If *data* = 'auto' then *label* is searched for "x", "y", "z",
"x'", "z'", "energy" and if one of them is found, *data* is
assigned to the listed above index or function. In raycing backend
the automatic assignment is additionally implemented for *label*
containing 'degree (for degree of polarization)', 'circular' (for
circular polarization rate), 'path', 'incid' or 'theta' (for
incident angle), 'order' (for grating diffraction order), 's',
'phi', 'r' or 's' (for parametric representation of OE).
*limits*: 2-list of floats [min, max]
Axis limits. If None, the *limits* are taken as ``np.min`` and
``np.max`` for the corresponding array acquired after the 1st ray
tracing run. If *limits* == 'symmetric', the limits are forced to
be symmetric about the origin. Can also be set outside of the
constructor as, e.g.::
plot1.xaxis.limits = [-15, 15]
*offset*: float
An offset value subtracted from the axis tick labels to be
displayed separately. It is useful for the energy axis, where the
band width is most frequently much smaller than the central value.
Ignored for x and y axes.
+-----------------+--------------------+
| no offset | non-zero offset |
+=================+====================+
| |image_offset0| | |image_offset5000| |
+-----------------+--------------------+
.. |image_offset0| imagezoom:: _images/offset0.png
:scale: 50 %
.. |image_offset5000| imagezoom:: _images/offset5000.png
:scale: 50 %
*bins*: int
Number of bins in the corresponding 1D and 2D histograms.
See also *ppb* parameter.
*ppb*: int
Screen-pixel-per-bin value. The graph arrangement was optimized
for *bins* * *ppb* = 256. If your *bins* and *ppb* give a very
different product, the graphs may look ugly (disproportional)
with overlapping tick labels.
*density*: 'histogram' or 'kde'
The way the sample density is calculated: by histogram or by kde
[KDE]_.
*invertAxis*: bool
Inverts the axis direction. Useful for energy axis in energy-
dispersive images in order to match the colors of the energy
histogram with the colors of the 2D histogram.
*outline*: float within [0, 1]
Specifies the minimum brightness of the outline drawn over the
1D histogram. The maximum brightness equals 1 at the maximum of
the 1D histogram.
+--------------------+--------------------+--------------------+
| =0 | =0.5 | =1 |
+====================+====================+====================+
| |image_outline0.0| | |image_outline0.5| | |image_outline1.0| |
+--------------------+--------------------+--------------------+
.. |image_outline0.0| imagezoom:: _images/outline00.png
:scale: 50 %
.. |image_outline0.5| imagezoom:: _images/outline05.png
:scale: 50 %
.. |image_outline1.0| imagezoom:: _images/outline10.png
:scale: 50 %
*fwhmFormatStr*: str
Python format string for the FWHM value, e.g. '%.2f'. if None, the
FWHM value is not displayed.
"""
self.label = label
self.unit = unit
if self.label:
self.displayLabel = self.label
else:
self.displayLabel = ''
if self.unit:
self.displayLabel += ' (' + self.unit + ')'
self.factor = factor
self.data = data
self.limits = limits
self.offset = offset
self.offsetDisplayUnit = self.unit
self.offsetDisplayFactor = 1
self.bins = bins
self.ppb = ppb
self.pixels = bins * ppb
self.density = density
self.extraMargin = extraMargin
self.invertAxis = invertAxis
if outline < 0:
outline = 0
if outline > 1:
outline = 1
self.outline = outline
self.fwhmFormatStr = fwhmFormatStr
self.max1D = 0
self.max1D_RGB = 0
self.globalMax1D = 0
self.globalMax1D_RGB = 0
self.useCategory = False
def auto_assign_data(self, backend):
"""
Automatically assign data arrays given the axis label."""
if "energy" in self.label:
if backend == 'shadow':
self.data = 10
elif backend == 'raycing':
self.data = raycing.get_energy
elif "x'" in self.label:
if backend == 'shadow':
self.data = 3
elif backend == 'raycing':
self.data = raycing.get_xprime
elif "z'" in self.label:
if backend == 'shadow':
self.data = 5
elif backend == 'raycing':
self.data = raycing.get_zprime
elif "x" in self.label:
if backend == 'shadow':
self.data = 0
elif backend == 'raycing':
self.data = raycing.get_x
elif "y" in self.label:
if backend == 'shadow':
self.data = 1
elif backend == 'raycing':
self.data = raycing.get_y
elif "z" in self.label:
if backend == 'shadow':
self.data = 2
elif backend == 'raycing':
self.data = raycing.get_z
elif "degree" in self.label:
self.data = raycing.get_polarization_degree
elif "circular" in self.label:
self.data = raycing.get_circular_polarization_rate
elif "incid" in self.label or "theta" in self.label:
self.data = raycing.get_incidence_angle
elif "phi" in self.label:
self.data = raycing.get_phi
elif "order" in self.label:
self.data = raycing.get_order
elif "s" in self.label:
self.data = raycing.get_s
elif "path" in self.label:
self.data = raycing.get_path
elif "r" in self.label:
self.data = raycing.get_r
elif "a" in self.label:
self.data = raycing.get_a
elif "b" in self.label:
self.data = raycing.get_b
else:
raise ValueError(
'cannot auto-assign data for axis "{0}"!'.format(self.label))
def auto_assign_factor(self, backend):
"""
Automatically assign factor given the axis label."""
factor = 1.
if self.unit in ['keV', ]:
factor = 1e-3
elif self.unit in ['mrad', 'meV']:
factor = 1.0e3
elif self.unit in [u'$\mu$rad', u'µrad', u'urad']:
factor = 1.0e6
else:
if backend == 'shadow':
if self.unit in ['m', ]:
factor = 1e-2
elif self.unit in ['mm', ]:
factor = 10.
elif self.unit in [u'$\mu$m', u'µm', 'um']:
factor = 1.0e4
elif self.unit in ['nm', ]:
factor = 1.0e7
elif backend == 'raycing':
if self.unit in ['m', ]:
factor = 1e-3
elif self.unit in ['mm', ]:
factor = 1.
elif self.unit in [u'$\mu$m', u'µm', 'um']:
factor = 1.0e3
elif self.unit in ['nm', ]:
factor = 1.0e6
elif self.unit in ['pm', ]:
factor = 1.0e9
elif self.unit in ['fm', ]:
factor = 1.0e12
elif self.unit.startswith('deg'):
factor = np.degrees(1)
elif self.unit.startswith('mdeg'):
factor = np.degrees(1)*1e3
self.factor = factor
class XYCPlot(object):
u"""
Container for the accumulated histograms. Besides giving the beam
images, this class provides with useful fields like *dx*, *dy*, *dE*
(FWHM), *cx*, *cy*, *cE* (centers) and *intensity* which can be used in
scripts for producing scan-like results."""
def __init__(
self, beam=None, rayFlag=(1,), xaxis=None, yaxis=None, caxis=None,
aspect='equal', xPos=1, yPos=1, ePos=1, title='',
invertColorMap=False, negative=False,
fluxKind='total', fluxUnit='auto',
fluxFormatStr='auto', contourLevels=None, contourColors=None,
contourFmt='%.1f', contourFactor=1., saveName=None,
persistentName=None, oe=None, raycingParam=0,
beamState=None, beamC=None, useQtWidget=False):
u"""
*beam*: str
The beam to be visualized.
In raycing backend:
The key in the dictionary returned by
:func:`~xrt.backends.raycing.run.run_process()`. The values of
that dictionary are beams (instances of
:class:`~xrt.backends.raycing.sources.Beam`).
In shadow backend:
The Shadow output file (``star.NN``, `mirr.NN`` or
``screen.NNMM``). It will also appear in the window caption
unless *title* parameter overrides it.
This parameter is used for the automatic determination of the
backend in use with the corresponding meaning of the next two
parameters. If *beam* contains a dot, shadow backend is assumed.
Otherwise raycing backend is assumed.
*rayFlag*: int or tuple of ints
shadow: 0=lost rays, 1=good rays, 2=all rays.
raycing: a tuple of integer ray states: 1=good, 2=out, 3=over,
4=alive (good + out), -NN = dead at oe number NN (numbering starts
with 1).
*xaxis*, *yaxis*, *caxis*: instance of :class:`XYCAxis` or None.
If None, a default axis is created. If caxis='category' and the
backend is raycing, then the coloring is given by ray category, the
color axis histogram is not displayed and *ePos* is ignored.
.. warning::
The axes contain arrays for the accumulation of histograms. If
you create the axes outside of the plot constructor then make
sure that these are not used for another plot. Otherwise the
histograms will be overwritten!
*aspect*: str or float
Aspect ratio of the 2D histogram, = 'equal', 'auto' or numeric
value (=x/y). *aspect* =1 is the same as *aspect* ='equal'.
*xPos*, *yPos*: int
If non-zero, the corresponding 1D histograms are visible.
*ePos*: int
Flag for specifying the positioning of the color axis histogram:
+-------------------------+---------------------------------------+
| *ePos* =1: at the right | |image_ePos1| |
| (default, as usually | |
| the diffraction plane | |
| is vertical) | |
+-------------------------+---------------------------------------+
| *ePos* =2: at the top | |image_ePos2| |
| (for horizontal | |
| diffraction plane) | |
+-------------------------+---------------------------------------+
| *ePos* =0: no | |image_ePos0| |
| color axis histogram | |
+-------------------------+---------------------------------------+
.. |image_ePos1| imagezoom:: _images/ePos=1.png
:scale: 50 %
.. |image_ePos2| imagezoom:: _images/ePos=2.png
:scale: 50 %
.. |image_ePos0| imagezoom:: _images/ePos=0.png
:scale: 50 %
*title*: str
If non-empty, this string will appear in the window caption,
otherwise the *beam* will be used for this.
*invertColorMap*: bool
Inverts colors in the HSV color map; seen differently, this is a
0.5 circular shift in the color map space. This inversion is
useful in combination with *negative* in order to keep the same
energy coloring both for black and for white images.
*negative*: bool
Useful for printing in order to save black inks.
See also *invertColorMap*.
* =False: black bknd for on-screen presentation
* =True: white bknd for paper printing
The following table demonstrates the combinations of
*invertColorMap* and *negative*:
+-------------+-------------------------+-------------------------+
| | *invertColorMap* | *invertColorMap* |
| | =False | =True |
+=============+=========================+=========================+
| *negative* | |image00| | |image10| |
| =False | | |
+-------------+-------------------------+-------------------------+
| *negative* | |image01| | |image11| |
| =True | | |
+-------------+-------------------------+-------------------------+
.. |image00| imagezoom:: _images/invertColorMap=0_negative=0.png
:scale: 50 %
.. |image01| imagezoom:: _images/invertColorMap=0_negative=1.png
:scale: 50 %
.. |image10| imagezoom:: _images/invertColorMap=1_negative=0.png
:scale: 50 %
.. |image11| imagezoom:: _images/invertColorMap=1_negative=1.png
:scale: 50 %
Note that *negative* inverts only the colors of the graphs, not
the white global background. Use a common graphical editor to
invert the whole picture after doing *negative=True*:
.. imagezoom:: _images/negative=1+fullNegative.png
:scale: 50 %
(such a picture would nicely look on a black journal cover, e.g.
on that of Journal of Synchrotron Radiation ;) )
.. _fluxKind:
*fluxKind*: str
Can begin with 's', 'p', '+-45', 'left-right', 'total', 'power',
'Es', 'Ep' and 'E'. Specifies what kind of flux to use for the
brightness of 2D and for the height of 1D histograms. If it ends
with 'log', the flux scale is logarithmic.
If starts with 'E' then the *field amplitude* or mutual intensity
is considered, not the usual intensity, and accumulated in the 2D
histogram or in a 3D stack:
- If ends with 'xx' or 'zz', the corresponding 2D cuts of mutual
intensity are accumulated in the main 2D array (the one visible
as a 2D histogram). The plot must have equal axes.
- If ends with '4D', the complete mutual intensity is calculated
and stored in *plot.total4D* with the shape
(xaxis.bins*yaxis.bins, xaxis.bins*yaxis.bins).
.. warning::
Be cautious with the size of the mutual intensity object, it is
four-dimensional!
- If ends with 'PCA', the field images are stored in *plot.field3D*
with the shape (repeats, xaxis.bins, yaxis.bins) for further
Principal Component Analysis.
- If without these endings, the field aplitudes are simply summed
in the 2D histogram.
*fluxUnit*: 'auto' or None
If a synchrotron source is used and *fluxUnit* is 'auto', the
flux will be displayed as 'ph/s' or 'W' (if *fluxKind* == 'power').
Otherwise the flux is a unitless number of rays times
transmittivity | reflectivity.
*fluxFormatStr*: str
Format string for representing the flux or power. You can use a
representation with powers of ten by utilizing 'p' as format
specifier, e.g. '%.2p'.
*contourLevels*: sequence
A sequence of levels on the 2D image for drawing the contours, in
[0, 1] range. If None, the contours are not drawn.
*contourColors*: sequence or color
A sequence of colors corresponding to *contourLevels*. A single
color value is applied to all the contours. If None, the colors are
automatic.
*contourFmt*: str
Python format string for contour values.
*contourFactor*: float
Is applied to the levels and is useful in combination with
*contourFmt*, e.g. *contourFmt* = r'%.1f mW/mm$^2$',
*contourFactor* = 1e3.
*saveName*: str or list of str or None
Save file name(s). The file type(s) are given by extensions:
png, ps, svg, pdf. Typically, *saveName* is set outside of the
constructor. For example::
filename = 'filt%04imum' %thick #without extension
plot1.saveName = [filename + '.pdf', filename + '.png']
.. _persistentName:
*persistentName*: str or None
File name for reading and storing the accumulated histograms and
other ancillary data. Ray tracing will resume the histogramming
from the state when the persistent file was written. If the file
does not exist yet, the histograms are initialized to zeros. The
persistent file is rewritten when ray tracing is completed and
the number of repeats > 0.
.. warning::
Be careful when you use it: if you intend to start from zeros,
make sure that this option is switched off or the pickle files
do not exist! Otherwise you do resume, not really start anew.
if *persistentName* ends with '.mat', a Matlab file is generated.
*oe*: instance of an optical element or None
If supplied, the rectangular or circular areas of the optical
surfaces or physical surfaces, if the optical surfaces are not
specified, will be overdrawn. Useful with raycing backend for
footprint images.
*raycingParam*: int
Used together with the *oe* parameter above for drawing footprint
envelopes. If =2, the limits of the second crystal of DCM are taken
for drawing the envelope; if =1000, all facets of a diced crystal
are displayed.
*beamState*: str
Used in raycing backend. If not None, gives another beam that
determines the state (good, lost etc.) instead of the state given
by *beam*. This may be used to visualize the *incoming* beam but
use the states of the *outgoing* beam, so that you see how the beam
upstream of the optical element will be masked by it. See the
examples for capillaries.
*beamC*: str
The same as *beamState* but refers to colors (when not of
'category' type).
"""
if not hasQt:
useQtWidget = False
if not useQtWidget:
plt.ion()
self.colorSaturation = colorSaturation
self.beam = beam # binary shadow image: star, mirr or screen
if beam is None:
self.backend = 'raycing'
elif '.' in beam:
self.backend = 'shadow'
elif ('dummy' in beam) or (beam == ''):
self.backend = 'dummy'
elif isinstance(rayFlag, (tuple, list)):
self.backend = 'raycing'
else:
self.backend = 'dummy'
self.beamState = beamState
self.beamC = beamC
self.rayFlag = rayFlag
self.fluxKind = fluxKind
self.fluxUnit = fluxUnit
if xaxis is None:
self.xaxis = XYCAxis(defaultXTitle, defaultXUnit)
else:
self.xaxis = xaxis
if yaxis is None:
self.yaxis = XYCAxis(defaultYTitle, defaultYUnit)
else:
self.yaxis = yaxis
if (caxis is None) or isinstance(caxis, basestring):
self.caxis = XYCAxis(defaultCTitle, defaultCUnit, factor=1.,)
self.caxis.fwhmFormatStr = defaultFwhmFormatStrForCAxis
if isinstance(caxis, basestring):
self.caxis.useCategory = True
ePos = 0
else:
self.caxis = caxis
if self.backend != 'dummy':
for axis in self.xaxis, self.yaxis, self.caxis:
if axis.data == 'auto':
axis.auto_assign_data(self.backend)
if axis.factor is None:
axis.auto_assign_factor(self.backend)
self.reset_bins2D()
if isinstance(aspect, (int, float)):
if aspect <= 0:
aspect = 1.
self.aspect = aspect
self.dpi = dpi
self.ePos = ePos # Position of E histogram, 1=right, 2=top, 0=none
self.negative = negative
if self.negative:
facecolor = 'w' # white
else:
facecolor = 'k' # black
# MatplotlibDeprecationWarning: The axisbg attribute was deprecated in
# version 2.0. Use facecolor instead.
kwmpl = {}
if versiontuple(mpl.__version__) >= versiontuple("2.0.0"):
kwmpl['facecolor'] = facecolor
else:
kwmpl['axisbg'] = facecolor
self.invertColorMap = invertColorMap
self.utilityInvertColorMap = False
self.fluxFormatStr = fluxFormatStr
self.saveName = saveName
self.persistentName = persistentName
self.cx, self.dx = 0, 0
self.cy, self.dy = 0, 0
self.cE, self.dE = 0, 0
xFigSize = float(xOrigin2d + self.xaxis.pixels + space2dto1d +
height1d + xSpaceExtra)
yFigSize = float(yOrigin2d + self.yaxis.pixels + space2dto1d +
height1d + ySpaceExtra)
if self.ePos == 1:
xFigSize += xspace1dtoE1d + heightE1d + heightE1dbar
elif self.ePos == 2:
yFigSize += yspace1dtoE1d + heightE1d + heightE1dbar
if self.ePos != 1:
xFigSize += xSpaceExtraWhenNoEHistogram
if useQtWidget:
self.fig = Figure(figsize=(xFigSize/dpi, yFigSize/dpi), dpi=dpi)
else:
self.fig = plt.figure(figsize=(xFigSize/dpi, yFigSize/dpi),
dpi=dpi)
self.local_size_inches = self.fig.get_size_inches()
self.fig.delaxes(self.fig.gca())
if title != '':
self.title = title
elif isinstance(beam, basestring):
self.title = beam
else:
self.title = ' '
if useQtWidget:
self.canvas = MyQtFigCanvas(figure=self.fig, xrtplot=self)
self.fig.canvas.set_window_title(self.title)
if plt.get_backend().lower() in (
x.lower() for x in mpl.rcsetup.non_interactive_bk):
xExtra = 0 # mpl backend-dependent (don't know why) pixel sizes
yExtra = 0 # mpl backend-dependent (don't know why) pixel sizes
else: # interactive backends:
if True: # runner.runCardVals.repeats > 1:
xExtra = 0
yExtra = 2
else:
xExtra = 0
yExtra = 0
frameon = True
rect2d = [xOrigin2d / xFigSize, yOrigin2d / yFigSize,
(self.xaxis.pixels-1+xExtra) / xFigSize,
(self.yaxis.pixels-1+yExtra) / yFigSize]
self.ax2dHist = self.fig.add_axes(
rect2d, aspect=aspect, xlabel=self.xaxis.displayLabel,
ylabel=self.yaxis.displayLabel, autoscale_on=False,
frameon=frameon, **kwmpl)
self.ax2dHist.xaxis.labelpad = xlabelpad
self.ax2dHist.yaxis.labelpad = ylabelpad
rect1dX = copy.deepcopy(rect2d)
rect1dX[1] = rect2d[1] + rect2d[3] + space2dto1d/yFigSize
rect1dX[3] = height1d / yFigSize
self.ax1dHistX = self.fig.add_axes(
rect1dX, sharex=self.ax2dHist, autoscale_on=False, frameon=frameon,
visible=(xPos != 0), **kwmpl)
rect1dY = copy.deepcopy(rect2d)
rect1dY[0] = rect2d[0] + rect2d[2] + space2dto1d/xFigSize
rect1dY[2] = height1d / xFigSize
self.ax1dHistY = self.fig.add_axes(
rect1dY, sharey=self.ax2dHist, autoscale_on=False, frameon=frameon,
visible=(yPos != 0), **kwmpl)
# make some labels invisible
pset = plt.setp
pset(
self.ax1dHistX.get_xticklabels() +
self.ax1dHistX.get_yticklabels() +
self.ax1dHistY.get_xticklabels() +
self.ax1dHistY.get_yticklabels(),
visible=False)
self.ax1dHistX.set_yticks([])
self.ax1dHistY.set_xticks([])
self.ax1dHistX.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter(
useOffset=False))
self.ax1dHistY.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter(
useOffset=False))
# for tick in (self.ax2dHist.xaxis.get_major_ticks() + \
# self.ax2dHist.yaxis.get_major_ticks()):
# tick.label1.set_fontsize(axisLabelFontSize)
self.ax1dHistXOffset = self.fig.text(
rect1dY[0]+rect1dY[2], 0.01, '', ha='right', va='bottom',
color='gray') # , fontweight='bold')
self.ax1dHistYOffset = self.fig.text(
0.01, rect1dX[1]+rect1dX[3], '', rotation=90, ha='left', va='top',
color='gray') # , fontweight='bold')
if self.ePos == 1: # right
rect1dE = copy.deepcopy(rect1dY)
rect1dE[0] = rect1dY[0] + rect1dY[2] + xspace1dtoE1d/xFigSize
rect1dE[2] = heightE1dbar / xFigSize
rect1dE[3] *= float(self.caxis.pixels) / self.yaxis.pixels
self.ax1dHistEbar = self.fig.add_axes(
rect1dE, ylabel=self.caxis.displayLabel, autoscale_on=False,
frameon=frameon, **kwmpl)
self.ax1dHistEbar.yaxis.labelpad = xlabelpad
self.ax1dHistEOffset = self.fig.text(
rect1dE[0], rect1dE[1]+rect1dE[3], '', ha='left', va='bottom',
color='g') # , fontweight='bold')
rect1dE[0] += rect1dE[2]
rect1dE[2] = heightE1d / xFigSize
self.ax1dHistE = self.fig.add_axes(
rect1dE, sharey=self.ax1dHistEbar, autoscale_on=False,
frameon=frameon, **kwmpl)
pset(
self.ax1dHistEbar.get_xticklabels() +
self.ax1dHistE.get_xticklabels() +
self.ax1dHistE.get_yticklabels(), visible=False)
pset(self.ax1dHistEbar, xticks=())
self.ax1dHistE.yaxis.set_major_formatter(
mpl.ticker.ScalarFormatter(useOffset=False))
if self.caxis.limits is not None:
self.ax1dHistE.set_ylim(self.caxis.limits)
self.ax1dHistE.set_xticks([])
elif self.ePos == 2: # top
rect1dE = copy.deepcopy(rect1dX)
rect1dE[1] = rect1dX[1] + rect1dX[3] + yspace1dtoE1d/yFigSize
rect1dE[3] = heightE1dbar / yFigSize
rect1dE[2] *= float(self.caxis.pixels) / self.xaxis.pixels
self.ax1dHistEbar = self.fig.add_axes(
rect1dE, xlabel=self.caxis.displayLabel, autoscale_on=False,
frameon=frameon, **kwmpl)
self.ax1dHistEbar.xaxis.labelpad = xlabelpad
self.ax1dHistEOffset = self.fig.text(
rect1dE[0]+rect1dE[2]+0.01, rect1dE[1]-0.01, '',
ha='left', va='top', color='g') # , fontweight='bold')
rect1dE[1] += rect1dE[3]
rect1dE[3] = heightE1d / yFigSize
self.ax1dHistE = self.fig.add_axes(
rect1dE, sharex=self.ax1dHistEbar, autoscale_on=False,
frameon=frameon, **kwmpl)
pset(
self.ax1dHistEbar.get_yticklabels() +
self.ax1dHistE.get_yticklabels() +
self.ax1dHistE.get_xticklabels(), visible=False)
pset(self.ax1dHistEbar, yticks=())
self.ax1dHistE.xaxis.set_major_formatter(
mpl.ticker.ScalarFormatter(useOffset=False))
if self.caxis.limits is not None:
self.ax1dHistE.set_xlim(self.caxis.limits)
self.ax1dHistE.set_yticks([])
allAxes = [self.ax1dHistX, self.ax1dHistY, self.ax2dHist]
if self.ePos != 0:
allAxes.append(self.ax1dHistE)
allAxes.append(self.ax1dHistEbar)
for ax in allAxes:
for axXY in (ax.xaxis, ax.yaxis):
for line in axXY.get_ticklines():
line.set_color('grey')
mplTxt = self.ax1dHistX.text if useQtWidget else plt.text
if self.ePos == 1:
self.textDE = mplTxt(
xTextPosDy, yTextPosDy, ' ', rotation='vertical',
transform=self.ax1dHistE.transAxes, ha='left', va='center')
elif self.ePos == 2:
self.textDE = mplTxt(
xTextPosDx, yTextPosDx, ' ',
transform=self.ax1dHistE.transAxes, ha='center', va='bottom')
self.nRaysAll = np.long(0)
self.nRaysAllRestored = np.long(-1)
self.intensity = 0.
transform = self.ax1dHistX.transAxes
self.textGoodrays = None
self.textI = None
self.power = 0.
self.flux = 0.
self.contourLevels = contourLevels
self.contourColors = contourColors
self.contourFmt = contourFmt
self.contourFactor = contourFactor
self.displayAsAbsorbedPower = False
self.textNrays = None
if self.backend == 'shadow' or self.backend == 'dummy':
self.textNrays = mplTxt(
xTextPos, yTextPosNrays, ' ', transform=transform, ha='left',
va='top')
self.nRaysNeeded = np.long(0)
if self.rayFlag != 2:
self.textGoodrays = mplTxt(
xTextPos, yTextPosGoodrays, ' ', transform=transform,
ha='left', va='top')
self.textI = mplTxt(
xTextPos, yTextPosI, ' ', transform=transform, ha='left',
va='top')
elif self.backend == 'raycing':
# =0: ignored, =1: good,
# =2: reflected outside of working area, =3: transmitted without
# intersection
# =-NN: lost (absorbed) at OE#NN-OE numbering starts from 1 !!!
# If NN>1000 then
# the slit with ordinal number NN-1000 is meant.
self.nRaysAlive = np.long(0)
self.nRaysGood = np.long(0)
self.nRaysOut = np.long(0)
self.nRaysOver = np.long(0)
self.nRaysDead = np.long(0)
self.nRaysAccepted = np.long(0)
self.nRaysAcceptedE = 0.
self.nRaysSeeded = np.long(0)
self.nRaysSeededI = 0.
self.textNrays = mplTxt(
xTextPos, yTextPosNraysR, ' ', transform=transform, ha='left',
va='top')
self.textGood = None
self.textOut = None
self.textOver = None
self.textAlive = None
self.textDead = None
if 1 in self.rayFlag:
self.textGood = mplTxt(
xTextPos, yTextPosNrays1, ' ', transform=transform,
ha='left', va='top')
if 2 in self.rayFlag:
self.textOut = mplTxt(
xTextPos, yTextPosNrays2, ' ', transform=transform,
ha='left', va='top')
if 3 in self.rayFlag:
self.textOver = mplTxt(
xTextPos, yTextPosNrays3, ' ', transform=transform,
ha='left', va='top')
if 4 in self.rayFlag:
self.textAlive = mplTxt(
xTextPos, yTextPosGoodraysR, ' ', transform=transform,
ha='left', va='top')
if not self.caxis.useCategory:
self.textI = mplTxt(
xTextPos, yTextPosNrays4, ' ', transform=transform,
ha='left', va='top')
else:
if (np.array(self.rayFlag) < 0).sum() > 0:
self.textDead = mplTxt(
xTextPos, yTextPosNrays4, ' ', transform=transform,
ha='left', va='top')
self.textDx = mplTxt(
xTextPosDx, yTextPosDx, ' ', transform=self.ax1dHistX.transAxes,
ha='center', va='bottom')
self.textDy = mplTxt(
xTextPosDy, yTextPosDy, ' ', rotation='vertical',
transform=self.ax1dHistY.transAxes, ha='left', va='center')
self.textStatus = mplTxt(
xTextPosStatus, yTextPosStatus, '', transform=self.fig.transFigure,
ha='right', va='bottom', fontsize=9)
self.textStatus.set_color('r')
self.ax1dHistX.imshow(
np.zeros((2, 2, 3)), aspect='auto', interpolation='nearest',
origin='lower', figure=self.fig)
self.ax1dHistY.imshow(
np.zeros((2, 2, 3)), aspect='auto', interpolation='nearest',
origin='lower', figure=self.fig)
if self.ePos != 0:
self.ax1dHistE.imshow(
np.zeros((2, 2, 3)), aspect='auto', interpolation='nearest',
origin='lower', figure=self.fig)
self.ax1dHistEbar.imshow(
np.zeros((2, 2, 3)), aspect='auto', interpolation='nearest',
origin='lower', figure=self.fig)
self.ax2dHist.imshow(
np.zeros((2, 2, 3)), aspect=self.aspect, interpolation='nearest',
origin='lower', figure=self.fig)
self.contours2D = None
self.oe = oe
self.oeSurfaceLabels = []
self.raycingParam = raycingParam
self.draw_footprint_area()
if self.xaxis.limits is not None:
if not isinstance(self.xaxis.limits, str):
self.ax2dHist.set_xlim(self.xaxis.limits)
self.ax1dHistX.set_xlim(self.xaxis.limits)
if self.yaxis.limits is not None:
if not isinstance(self.yaxis.limits, str):
self.ax2dHist.set_ylim(self.yaxis.limits)
self.ax1dHistY.set_ylim(self.yaxis.limits)
self.cidp = self.fig.canvas.mpl_connect(
'button_press_event', self.on_press)
if not useQtWidget:
plt.ioff()
self.fig.canvas.draw()
def reset_bins2D(self):
if self.fluxKind.startswith('E'):
dtype = np.complex128
else:
dtype = np.float64
self.total2D = np.zeros((self.yaxis.bins, self.xaxis.bins),
dtype=dtype)
self.total2D_RGB = np.zeros((self.yaxis.bins, self.xaxis.bins, 3))
self.max2D_RGB = 0
self.globalMax2D_RGB = 0
self.size2D = self.yaxis.bins * self.xaxis.bins
self.is4D = self.fluxKind.lower().endswith('4d')
if self.is4D:
self.total4D = np.zeros((self.size2D, self.size2D), dtype=dtype)
self.isPCA = self.fluxKind.lower().endswith('pca')
if self.isPCA:
self.total4D = []
for ax in [self.xaxis, self.yaxis, self.caxis]:
if isinstance(ax, XYCAxis):
ax.binEdges = np.zeros(ax.bins + 1)
ax.total1D = np.zeros(ax.bins)
ax.total1D_RGB = np.zeros((ax.bins, 3))
def update_user_elements(self):
return # 'user message'
def clean_user_elements(self):
pass
def on_press(self, event):
"""
Defines the right button click event for stopping the loop.
"""
if event.button == 3:
runner.runCardVals.stop_event.set()
self.textStatus.set_text("stopping ...")
def timer_callback(self, evt=None):
"""
This code will be executed on every timer tick. We have to start
:meth:`runner.dispatch_jobs` here as otherwise we cannot force the
redrawing.
"""
if self.areProcessAlreadyRunning:
return
self.areProcessAlreadyRunning = True
runner.dispatch_jobs()
def set_axes_limits(self, xmin, xmax, ymin, ymax, emin, emax):
"""
Used in multiprocessing for automatic limits of the 3 axes: x, y and
energy (caxis). It is meant only for the 1st ray tracing run.
"""
# if (self.xaxis.limits is None) or isinstance(self.xaxis.limits, str):
# the check is not needed: even if the limits have been already set, they may
# change due to *aspect*; this is checked in :mod:`multipro`.
self.xaxis.limits = [xmin, xmax]
self.yaxis.limits = [ymin, ymax]
self.caxis.limits = [emin, emax]
def draw_footprint_area(self):
"""
Useful with raycing backend for footprint images.
"""
if self.oe is None:
return
if self.oe.surface is None:
return
if isinstance(self.oe.surface, basestring):
surface = self.oe.surface,
else:
surface = self.oe.surface
if len(self.oeSurfaceLabels) > 0:
for isurf, surf in enumerate(surface):
self.oeSurfaceLabels[isurf].set_text(surf)
return
r = [0, 0, 0, 0]
if self.raycingParam == 2: # the second crystal of DCM
limsPhys = self.oe.limPhysX2, self.oe.limPhysY2
limsOpt = self.oe.limOptX2, self.oe.limOptY2
elif (self.raycingParam >= 1000) and hasattr(self.oe, "xStep"):
# all facets of a diced crystal
if self.oe.limPhysX[1] == np.inf:
return
if self.oe.limPhysY[1] == np.inf:
return
if self.xaxis.limits is None:
return
if self.yaxis.limits is None:
return
ixMin = int(round(max(self.oe.limPhysX[0], self.xaxis.limits[0]) /
self.oe.xStep))
ixMax = int(round(min(self.oe.limPhysX[1], self.xaxis.limits[1]) /
self.oe.xStep))
iyMin = int(round(max(self.oe.limPhysY[0], self.yaxis.limits[0]) /
self.oe.yStep))
iyMax = int(round(min(self.oe.limPhysY[1], self.yaxis.limits[1]) /
self.oe.yStep))
surface = []
limFacetXMin, limFacetXMax = [], []
limFacetYMin, limFacetYMax = [], []
for ix in range(ixMin, ixMax+1):
for iy in range(iyMin, iyMax+1):
surface.append('')
cx = ix * self.oe.xStep
cy = iy * self.oe.yStep
dxHalf = self.oe.dxFacet / 2
dyHalf = self.oe.dyFacet / 2
limFacetXMin.append(max(cx-dxHalf, self.oe.limPhysX[0]))
limFacetXMax.append(min(cx+dxHalf, self.oe.limPhysX[1]))
limFacetYMin.append(max(cy-dyHalf, self.oe.limPhysY[0]))
limFacetYMax.append(min(cy+dyHalf, self.oe.limPhysY[1]))
limsPhys = \
(limFacetXMin, limFacetXMax), (limFacetYMin, limFacetYMax)
limsOpt = None, None
else:
limsPhys = self.oe.limPhysX, self.oe.limPhysY
limsOpt = self.oe.limOptX, self.oe.limOptY
for isurf, surf in enumerate(surface):
for ilim1, ilim2, limPhys, limOpt in zip(
(0, 2), (1, 3), limsPhys, limsOpt):
if limOpt is not None:
if raycing.is_sequence(limOpt[0]):
r[ilim1], r[ilim2] = limOpt[0][isurf], limOpt[1][isurf]
else:
r[ilim1], r[ilim2] = limOpt[0], limOpt[1]
else:
if raycing.is_sequence(limPhys[0]):
r[ilim1], r[ilim2] = \
limPhys[0][isurf], limPhys[1][isurf]
else:
r[ilim1], r[ilim2] = limPhys[0], limPhys[1]
r[0] *= self.xaxis.factor
r[1] *= self.xaxis.factor
r[2] *= self.yaxis.factor
r[3] *= self.yaxis.factor
if isinstance(self.oe.shape, (str, unicode)):
if self.oe.shape.startswith('ro') and\
(self.raycingParam < 1000):
envelope = mpl.patches.Circle(
((r[1]+r[0])*0.5, (r[3]+r[2])*0.5), (r[1]-r[0])*0.5,
fc="#aaaaaa", lw=0, alpha=0.25)
elif self.oe.shape.startswith('rect') or\
(self.raycingParam >= 1000):
envelope = mpl.patches.Rectangle(
(r[0], r[2]), r[1] - r[0], r[3] - r[2],
fc="#aaaaaa", lw=0, alpha=0.25)
elif isinstance(self.oe.shape, list):
envelope = mpl.patches.Polygon(self.oe.shape, closed=True,
fc="#aaaaaa", lw=0, alpha=0.25)
self.ax2dHist.add_patch(envelope)
if self.raycingParam < 1000:
if self.yaxis.limits is not None:
yTextPos = max(r[2], self.yaxis.limits[0])
else:
yTextPos = r[2]
osl = self.ax2dHist.text(
(r[0]+r[1]) * 0.5, yTextPos, surf, ha='center',
va='top', color='w')
self.oeSurfaceLabels.append(osl)
def plot_hist1d(self, what_axis_char):
"""Plots the specified 1D histogram as imshow and calculates FWHM with
showing the ends of the FWHM bar.
Parameters:
*what_axis_char*: str [ 'x' | 'y' | 'c' ]
defines the axis
Returns:
*center*, *fwhm*: floats
the center and fwhm values for later displaying.
"""
if what_axis_char == 'x':
axis = self.xaxis
graph = self.ax1dHistX
orientation = 'horizontal'
histoPixelHeight = height1d
offsetText = self.ax1dHistXOffset
elif what_axis_char == 'y':
axis = self.yaxis
graph = self.ax1dHistY
orientation = 'vertical'
histoPixelHeight = height1d
offsetText = self.ax1dHistYOffset
elif what_axis_char == 'c':
axis = self.caxis
graph = self.ax1dHistE
if self.ePos == 1:
orientation = 'vertical'
elif self.ePos == 2:
orientation = 'horizontal'
offsetText = self.ax1dHistEOffset
histoPixelHeight = heightE1d
t1D = axis.total1D
axis.max1D = float(np.max(t1D))
if axis.max1D > epsHist:
if runner.runCardVals.passNo > 0:
mult = 1.0 / axis.globalMax1D
else:
mult = 1.0 / axis.max1D
xx = t1D * mult
else:
xx = t1D
if runner.runCardVals.passNo > 0:
xxMaxHalf = float(np.max(xx)) * 0.5 # for calculating FWHM
else:
xxMaxHalf = 0.5
t1D_RGB = axis.total1D_RGB
axis.max1D_RGB = float(np.max(t1D_RGB))
if axis.max1D_RGB > epsHist:
if runner.runCardVals.passNo > 1:
mult = 1.0 / axis.globalMax1D_RGB
else:
mult = 1.0 / axis.max1D_RGB
xxRGB = t1D_RGB * mult
else:
xxRGB = t1D_RGB
if orientation[0] == 'h':
map2d = np.zeros((histoPixelHeight, len(xx), 3))
for ix, cx in enumerate(xx):
maxPixel = int(round((histoPixelHeight-1) * cx))
if 0 <= maxPixel <= (histoPixelHeight-1):
map2d[0:maxPixel, ix, :] = xxRGB[ix, :]
if axis.outline:
maxRGB = np.max(xxRGB[ix, :])
if maxRGB > 1e-20:
scaleFactor = \
1 - axis.outline + axis.outline/maxRGB
map2d[maxPixel-1, ix, :] *= scaleFactor
extent = None
if (axis.limits is not None) and\
(not isinstance(axis.limits, str)):
ll = [l-axis.offset for l in axis.limits]
extent = [ll[0], ll[1], 0, 1]
elif orientation[0] == 'v':
map2d = np.zeros((len(xx), histoPixelHeight, 3))
for ix, cx in enumerate(xx):
maxPixel = int(round((histoPixelHeight-1) * cx))
if 0 <= maxPixel <= (histoPixelHeight-1):
map2d[ix, 0:maxPixel, :] = xxRGB[ix, :]
if axis.outline:
maxRGB = np.max(xxRGB[ix, :])
if maxRGB > 1e-20:
scaleFactor = \
1 - axis.outline + axis.outline/maxRGB
map2d[ix, maxPixel-1, :] *= scaleFactor
extent = None
if (axis.limits is not None) and \
not (isinstance(axis.limits, str)):
ll = [l-axis.offset for l in axis.limits]
extent = [0, 1, ll[0], ll[1]]
if self.negative:
map2d = 1 - map2d
if self.utilityInvertColorMap:
map2d = mpl.colors.rgb_to_hsv(map2d)
map2d[:, :, 0] -= 0.5
map2d[map2d < 0] += 1
map2d = mpl.colors.hsv_to_rgb(map2d)
graph.images[0].set_data(map2d)
if extent is not None:
graph.images[0].set_extent(extent)
del graph.lines[:] # otherwise it accumulates the FWHM lines
if axis.max1D > 0:
args = np.argwhere(xx >= xxMaxHalf)
iHistFWHMlow = np.min(args)
iHistFWHMhigh = np.max(args) + 1
histFWHMlow = axis.binEdges[iHistFWHMlow] - axis.offset
histFWHMhigh = axis.binEdges[iHistFWHMhigh] - axis.offset
if axis.fwhmFormatStr is not None:
if orientation[0] == 'h':
graph.plot([histFWHMlow, histFWHMhigh],
[xxMaxHalf, xxMaxHalf], '+', color='grey')
elif orientation[0] == 'v':
graph.plot([xxMaxHalf, xxMaxHalf],
[histFWHMlow, histFWHMhigh], '+', color='grey')
else:
histFWHMlow = 0
histFWHMhigh = 0
if axis.offset:
ll = [l-axis.offset for l in axis.limits]
offsetText.set_text('{0}{1:g} {2}'.format(
'+' if axis.offset > 0 else '',
axis.offset*axis.offsetDisplayFactor, axis.offsetDisplayUnit))
offsetText.set_visible(True)
else:
ll = axis.limits
offsetText.set_visible(False)
if orientation[0] == 'h':
if not isinstance(axis.limits, str):
graph.set_xlim(ll)
graph.set_ylim([0, 1])
elif orientation[0] == 'v':
graph.set_xlim([0, 1])
if not isinstance(axis.limits, str):
graph.set_ylim(ll)
axis.binCenters = (axis.binEdges[:-1]+axis.binEdges[1:]) * 0.5
weighted1D = axis.total1D * axis.binCenters
xxAve = axis.total1D.sum()
if xxAve != 0:
xxAve = weighted1D.sum() / xxAve
return xxAve, histFWHMhigh - histFWHMlow
def plot_colorbar(self):
"""
Plots a color bar adjacent to the caxis 1D histogram.
"""
a = np.linspace(0, colorFactor, self.caxis.pixels, endpoint=True)
a = np.asarray(a).reshape(1, -1)
if self.invertColorMap:
a -= 0.5
a[a < 0] += 1
if self.caxis.limits is None:
return
eMin, eMax = [l-self.caxis.offset for l in self.caxis.limits]
a = np.vstack((a, a))
if self.ePos == 1:
a = a.T
extent = [0, 1, eMin, eMax]
else:
extent = [eMin, eMax, 0, 1]
a = np.dstack(
(a, np.ones_like(a) * self.colorSaturation, np.ones_like(a)))
a = mpl.colors.hsv_to_rgb(a)
if self.negative:
a = 1 - a
self.ax1dHistEbar.images[0].set_data(a)
self.ax1dHistEbar.images[0].set_extent(extent)
if self.caxis.invertAxis:
if self.ePos == 2:
self.ax1dHistEbar.set_xlim(self.ax1dHistEbar.get_xlim()[::-1])
elif self.ePos == 1:
self.ax1dHistEbar.set_ylim(self.ax1dHistEbar.get_ylim()[::-1])
def plot_hist2d(self):
"""
Plots the 2D histogram as imshow.
"""
tRGB = self.total2D_RGB
self.max2D_RGB = float(np.max(tRGB))
if self.max2D_RGB > 0:
if runner.runCardVals.passNo > 1:
mult = 1.0 / self.globalMax2D_RGB
else:
mult = 1.0 / self.max2D_RGB
xyRGB = tRGB * mult
else:
xyRGB = tRGB
if self.negative:
xyRGB = 1 - xyRGB
if self.utilityInvertColorMap:
xyRGB = mpl.colors.rgb_to_hsv(xyRGB)
xyRGB[:, :, 0] -= 0.5
xyRGB[xyRGB < 0] += 1
xyRGB = mpl.colors.hsv_to_rgb(xyRGB)
xyRGB[xyRGB < 0] = 0
xyRGB[xyRGB > 1] = 1
# #test:
# xyRGB[:,:,:]=0
# xyRGB[1::2,1::2,0]=1
extent = None
if (self.xaxis.limits is not None) and (self.yaxis.limits is not None):
if (not isinstance(self.xaxis.limits, str)) and\
(not isinstance(self.yaxis.limits, str)):
extent = [self.xaxis.limits[0]-self.xaxis.offset,
self.xaxis.limits[1]-self.xaxis.offset,
self.yaxis.limits[0]-self.yaxis.offset,
self.yaxis.limits[1]-self.yaxis.offset]
self.ax2dHist.images[0].set_data(xyRGB)
if extent is not None:
self.ax2dHist.images[0].set_extent(extent)
if self.xaxis.invertAxis:
self.ax2dHist.set_xlim(self.ax2dHist.get_xlim()[::-1])
if self.yaxis.invertAxis:
self.ax2dHist.set_ylim(self.ax2dHist.get_ylim()[::-1])
if self.contourLevels is not None:
if self.contours2D is not None:
for c in self.contours2D.collections:
try:
self.ax2dHist.collections.remove(c)
except ValueError:
pass
self.ax2dHist.artists = []
dx = float(self.xaxis.limits[1]-self.xaxis.limits[0]) /\
self.xaxis.bins
dy = float(self.yaxis.limits[1]-self.yaxis.limits[0]) /\
self.yaxis.bins
if dx == 0:
dx = 1.
if dy == 0:
dy = 1.
x = np.linspace(
self.xaxis.limits[0] + dx/2, self.xaxis.limits[1] - dx/2,
self.xaxis.bins)
y = np.linspace(
self.yaxis.limits[0] + dy/2, self.yaxis.limits[1] - dy/2,
self.yaxis.bins)
X, Y = np.meshgrid(x, y)
norm = self.nRaysAll * dx * dy
if norm > 0:
Z = copy.copy(self.total2D) / norm
Z = sp.ndimage.filters.gaussian_filter(Z, 3, mode='nearest')\
* self.contourFactor
self.contourMax = np.max(Z)
if True: # self.contourMax > 1e-4:
contourLevels =\
[l*self.contourMax for l in self.contourLevels]
self.contours2D = self.ax2dHist.contour(
X, Y, Z, levels=contourLevels,
colors=self.contourColors)
self.ax2dHist.clabel(
self.contours2D, fmt=self.contourFmt, inline=True,
fontsize=10)
def textFWHM(self, axis, textD, average, hwhm):
"""Updates the text field that has average of the *axis* plus-minus the
HWHM value."""
deltaStr = axis.label + '$ = $' + axis.fwhmFormatStr +\
r'$\pm$' + axis.fwhmFormatStr + ' %s'
textD.set_text(deltaStr % (average, hwhm, axis.unit))
def _pow10(self, x, digits=1):
"""
Returns a string representation of the scientific notation of the given
number formatted for use with LaTeX or Mathtext, with specified number
of significant decimal digits.
"""
x = float(x)
if (x <= 0) or np.isnan(x).any():
return '0'
exponent = int(np.floor(np.log10(abs(x))))
coeff = np.round(x / float(10**exponent), digits)
return r"{0:.{2}f}$\cdot$10$^{{{1:d}}}$".format(
coeff, exponent, digits)
# def _round_to_n(self, x, n):
# """Round x to n significant figures"""
# return round(x, -int(np.floor(np.sign(x) * np.log10(abs(x)))) + n)
#
# def _str_fmt10(self, x, n=2):
# " Format x into nice Latex rounding to n"
# if x <= 0: return "0"
# try:
# power = int(np.log10(self._round_to_n(x, 0)))
# f_SF = self._round_to_n(x, n) * pow(10, -power)
# except OverflowError:
# return "0"
# return r"{0}$\cdot$10$^{{{1}}}$".format(f_SF, power)
def _get_flux(self):
self.flux = float(self.intensity) / self.nRaysAll *\
self.nRaysSeededI / self.nRaysSeeded
def _get_power(self):
self.power = self.intensity / self.nRaysAll
def plot_plots(self):
"""
Does all graphics update.
"""
self.cx, self.dx = self.plot_hist1d('x')
self.cy, self.dy = self.plot_hist1d('y')
if self.ePos != 0:
self.cE, self.dE = self.plot_hist1d('c')
self.plot_colorbar()
if self.caxis.fwhmFormatStr is not None:
self.textFWHM(self.caxis, self.textDE, self.cE, self.dE/2)
self.plot_hist2d()
if self.textNrays:
self.textNrays.set_text(r'$N_{\rm all} = $%s' % self.nRaysAll)
if self.textGoodrays:
if (runner.runCardVals.backend == 'shadow'):
strDict = {0: r'lost', 1: r'good'}
self.textGoodrays.set_text(
''.join([r'$N_{\rm ', strDict[self.rayFlag[0]],
r'} = $%s']) % self.nRaysNeeded)
if self.textI:
if self.fluxFormatStr == 'auto':
cond = (self.fluxUnit is None) or \
self.fluxKind.startswith('power')
if (runner.runCardVals.backend == 'raycing'):
cond = cond or (self.nRaysSeeded == 0)
if cond:
fluxFormatStr = '%g'
else:
fluxFormatStr = '%.2p'
else:
fluxFormatStr = self.fluxFormatStr
isPowerOfTen = False
if fluxFormatStr.endswith('p'):
pos = fluxFormatStr.find('.')
if 0 < pos+1 < len(fluxFormatStr):
isPowerOfTen = True
powerOfTenDecN = int(fluxFormatStr[pos+1])
if (runner.runCardVals.backend == 'raycing'):
for iTextPanel, iEnergy, iN, substr in zip(
[self.textGood, self.textOut, self.textOver, self.textAlive,
self.textDead],
[raycing.hueGood, raycing.hueOut, raycing.hueOver, 0,
raycing.hueDead],
[self.nRaysGood, self.nRaysOut, self.nRaysOver,
self.nRaysAlive, self.nRaysDead],
['good', 'out', 'over', 'alive', 'dead']):
if iTextPanel is not None:
iTextPanel.set_text(''.join(
[r'$N_{\rm ', substr, r'} = $%s']) % iN)
if self.caxis.useCategory:
eMin, eMax = self.caxis.limits
if iEnergy == 0:
color = 'black'
else:
hue = (iEnergy-eMin) / (eMax-eMin) * colorFactor
# hue = iEnergy / 10.0 * colorFactor
color = np.dstack((hue, 1, 1))
color = \
mpl.colors.hsv_to_rgb(color)[0, :].reshape(3, )
iTextPanel.set_color(color)
if self.textI:
if (self.fluxUnit is None) or (self.nRaysSeeded == 0):
intensityStr = r'$\Phi = $'
if isPowerOfTen:
intensityStr += self._pow10(
self.intensity, powerOfTenDecN)
else:
intensityStr += fluxFormatStr % self.intensity
self.textI.set_text(intensityStr)
else:
if self.fluxKind.startswith('power'):
if self.nRaysAll > 0:
self._get_power()
if self.displayAsAbsorbedPower:
powerStr2 = r'P$_{\rm abs} = $'
else:
powerStr2 = r'P$_{\rm tot} = $'
powerStr = powerStr2 + fluxFormatStr + ' W'
self.textI.set_text(powerStr % self.power)
else:
if (self.nRaysAll > 0) and (self.nRaysSeeded > 0):
self._get_flux()
if isPowerOfTen:
intensityStr = self._pow10(
self.flux, powerOfTenDecN)
else:
intensityStr = fluxFormatStr % self.flux
intensityStr = \
r'$\Phi = ${0} ph/s'.format(intensityStr)
self.textI.set_text(intensityStr)
self.update_user_elements()
if (runner.runCardVals.backend == 'shadow'):
if self.textI:
intensityStr = r'$I = $'
if isPowerOfTen:
intensityStr += self._pow10(
self.intensity, powerOfTenDecN)
else:
intensityStr += fluxFormatStr % self.intensity
self.textI.set_text(intensityStr)
if self.xaxis.fwhmFormatStr is not None:
self.textFWHM(self.xaxis, self.textDx, self.cx, self.dx/2)
if self.yaxis.fwhmFormatStr is not None:
self.textFWHM(self.yaxis, self.textDy, self.cy, self.dy/2)
self.fig.canvas.draw()
def save(self, suffix=''):
"""
Saves matplotlib figures with the *suffix* appended to the file name(s)
in front of the extension.
"""
if self.saveName is None:
return
if isinstance(self.saveName, basestring):
fileList = [self.saveName, ]
else: # fileList is a sequence
fileList = self.saveName
for aName in fileList:
(fileBaseName, fileExtension) = os.path.splitext(aName)
saveName = ''.join([fileBaseName, suffix, fileExtension])
self.fig.savefig(saveName, dpi=self.dpi)
# otherwise mpl qt backend wants to change it (only in Windows):
self.fig.set_size_inches(self.local_size_inches)
self.fig.canvas.draw()
def clean_plots(self):
"""
Cleans the graph in order to prepare it for the next ray tracing.
"""
runner.runCardVals.iteration = 0
runner.runCardVals.stop_event.clear()
runner.runCardVals.finished_event.clear()
for axis in [self.xaxis, self.yaxis, self.caxis]:
axis.total1D[:] = np.zeros(axis.bins)
axis.total1D_RGB[:] = np.zeros((axis.bins, 3))
self.total2D[:] = np.zeros((self.yaxis.bins, self.xaxis.bins))
self.total2D_RGB[:] = np.zeros((self.yaxis.bins, self.xaxis.bins, 3))
if self.is4D:
if self.fluxKind.startswith('E'):
dtype = np.complex128
else:
dtype = np.float64
self.total4D[:] = np.zeros((self.size2D, self.size2D), dtype=dtype)
elif self.isPCA:
self.total4D = []
try:
self.fig.canvas.window().setWindowTitle(self.title)
except AttributeError:
pass
self.nRaysAll = np.long(0)
self.nRaysAllRestored = np.long(-1)
self.nRaysAccepted = np.long(0)
self.nRaysAcceptedE = 0.
self.nRaysSeeded = np.long(0)
self.nRaysSeededI = 0.
self.intensity = 0.
self.cidp = self.fig.canvas.mpl_connect(
'button_press_event', self.on_press)
self.fig.canvas.draw()
if self.ePos != 0:
if self.caxis.fwhmFormatStr is not None:
self.textDE.set_text('')
self.textNrays.set_text('')
if self.backend == 'shadow':
self.nRaysNeeded = np.long(0)
if self.textGoodrays is not None:
self.textGoodrays.set_text('')
if self.backend == 'raycing':
self.nRaysAlive = np.long(0)
self.nRaysGood = np.long(0)
self.nRaysOut = np.long(0)
self.nRaysOver = np.long(0)
self.nRaysDead = np.long(0)
if self.textGood is not None:
self.textGood.set_text('')
if self.textOut is not None:
self.textOut.set_text('')
if self.textOver is not None:
self.textOver.set_text('')
if self.textAlive is not None:
self.textAlive.set_text('')
if self.textDead is not None:
self.textDead.set_text('')
if self.textI:
self.textI.set_text('')
if self.xaxis.fwhmFormatStr is not None:
self.textDx.set_text('')
if self.yaxis.fwhmFormatStr is not None:
self.textDy.set_text('')
self.clean_user_elements()
if self.contours2D is not None:
self.contours2D.collections = []
self.ax2dHist.collections = []
self.plot_plots()
def set_negative(self):
"""
Utility function. Makes all plots in the graph negative (in color).
"""
self.negative = not self.negative
if self.negative:
facecolor = 'w' # previously - axisbg (depreceted)
else:
facecolor = 'k'
axesList = [self.ax2dHist, self.ax1dHistX, self.ax1dHistY]
if self.ePos != 0:
axesList.append(self.ax1dHistE)
axesList.append(self.ax1dHistEbar)
for axes in axesList:
axes.set_axis_bgcolor(facecolor)
self.plot_plots()
def set_invert_colors(self):
"""
Utility function. Inverts the color map.
"""
self.invertColorMap = not self.invertColorMap # this variable is used
# at the time of handling the ray-tracing arrays, as it is cheaper
# there but needs an additional inversion at the time of plotting if
# requested by user.
self.utilityInvertColorMap = not self.utilityInvertColorMap # this
# variable is used at the time of plotting
self.plot_plots()
def card_copy(self):
"""
Returns a minimum set of properties (a "card") describing the plot.
Used for passing it to a new process or thread.
"""
return PlotCard2Pickle(self)
def store_plots(self):
"""
Pickles the accumulated arrays (histograms) and values (like flux) into
the binary file *persistentName*.
"""
saved = SaveResults(self)
if runner.runCardVals.globalNorm:
runner.runCardVals.savedResults.append(saved)
if self.persistentName and (self.nRaysAll > self.nRaysAllRestored):
if raycing.is_sequence(self.persistentName):
pn = self.persistentName[0]
else:
pn = self.persistentName
if pn.endswith('mat'):
import scipy.io as io
#if os.path.isfile(self.persistentName):
# os.remove(self.persistentName)
io.savemat(pn, vars(saved))
else:
f = open(pn, 'wb')
pickle.dump(saved, f, protocol=2)
f.close()
def restore_plots(self):
"""
Restores itself from a file, if possible.
"""
try:
if self.persistentName:
if raycing.is_sequence(self.persistentName):
pns = self.persistentName
else:
pns = self.persistentName,
for pn in pns:
if pn.endswith('mat'):
import scipy.io as io
saved_dic = {}
io.loadmat(pn, saved_dic)
saved = SaveResults(self)
saved.__dict__.update(saved_dic)
else:
pickleFile = open(pn, 'rb')
saved = pickle.load(pickleFile)
pickleFile.close()
saved.restore(self)
if True: # _DEBUG:
print('persistentName=', self.persistentName)
print('saved nRaysAll=', self.nRaysAll)
except (IOError, TypeError):
pass
class XYCPlotWithNumerOfReflections(XYCPlot):
def update_user_elements(self):
if not hasattr(self, 'ax1dHistE'):
return
if not hasattr(self, 'textUser'):
self.textUser = []
else:
self.ax1dHistE.texts[:] = [t for t in self.ax1dHistE.texts
if t not in self.textUser]
del self.textUser[:]
bins = self.caxis.total1D.nonzero()[0]
self.ax1dHistE.yaxis.set_major_locator(MaxNLocator(integer=True))
yPrev = -1e3
fontSize = 8
for i, b in enumerate(bins):
binVal = int(round(abs(
self.caxis.binEdges[b]+self.caxis.binEdges[b+1]) / 2))
textOut = ' n({0:.0f})={1:.1%}'.format(
binVal, self.caxis.total1D[b] / self.intensity)
y = self.caxis.binEdges[b+1] if i < (len(bins)-1) else\
self.caxis.binEdges[b]
tr = self.ax1dHistE.transData.transform
if abs(tr((0, y))[1] - tr((0, yPrev))[1]) < fontSize:
continue
yPrev = y
color = self.caxis.total1D_RGB[b] / max(self.caxis.total1D_RGB[b])
# va = 'bottom' if binVal < self.caxis.limits[1] else 'top'
va = 'bottom' if i < (len(bins) - 1) else 'top'
myText = self.ax1dHistE.text(
0, y, textOut, ha='left', va=va, size=fontSize, color=color)
self.textUser.append(myText)
def clean_user_elements(self):
if hasattr(self, 'textUser'):
self.ax1dHistE.texts[:] = [t for t in self.ax1dHistE.texts
if t not in self.textUser]
del self.textUser[:]
class PlotCard2Pickle(object):
"""
Container for a minimum set of properties (a "card") describing the plot.
Used for passing it to a new process or thread. Must be pickleable.
"""
def __init__(self, plot):
self.xaxis = plot.xaxis
self.yaxis = plot.yaxis
self.caxis = plot.caxis
self.aspect = plot.aspect
self.beam = plot.beam
self.beamState = plot.beamState
self.beamC = plot.beamC
self.rayFlag = plot.rayFlag
self.invertColorMap = plot.invertColorMap
self.ePos = plot.ePos
self.colorFactor = colorFactor
self.colorSaturation = colorSaturation
self.fluxKind = plot.fluxKind
self.title = plot.title
class SaveResults(object):
"""
Container for the accumulated arrays (histograms) and values (like flux)
for subsequent pickling/unpickling or for global flux normalization.
"""
def __init__(self, plot):
"""
Stores the arrays and values and finds the global histogram maxima.
"""
self.xtotal1D = copy.copy(plot.xaxis.total1D)
self.xtotal1D_RGB = copy.copy(plot.xaxis.total1D_RGB)
self.ytotal1D = copy.copy(plot.yaxis.total1D)
self.ytotal1D_RGB = copy.copy(plot.yaxis.total1D_RGB)
self.etotal1D = copy.copy(plot.caxis.total1D)
self.etotal1D_RGB = copy.copy(plot.caxis.total1D_RGB)
self.total2D = copy.copy(plot.total2D)
self.total2D_RGB = copy.copy(plot.total2D_RGB)
axes = [plot.xaxis, plot.yaxis]
if plot.ePos:
axes.append(plot.caxis)
self.cE, self.dE = copy.copy(plot.cE), copy.copy(plot.dE)
self.cx, self.dx = copy.copy(plot.cx), copy.copy(plot.dx)
self.cy, self.dy = copy.copy(plot.cy), copy.copy(plot.dy)
for axis in axes:
if axis.globalMax1D < axis.max1D:
axis.globalMax1D = axis.max1D
if axis.globalMax1D_RGB < axis.max1D_RGB:
axis.globalMax1D_RGB = axis.max1D_RGB
if plot.globalMax2D_RGB < plot.max2D_RGB:
plot.globalMax2D_RGB = plot.max2D_RGB
self.nRaysAll = copy.copy(plot.nRaysAll)
self.intensity = copy.copy(plot.intensity)
if plot.backend == 'shadow':
self.nRaysNeeded = copy.copy(plot.nRaysNeeded)
elif plot.backend == 'raycing':
self.nRaysAlive = copy.copy(plot.nRaysAlive)
self.nRaysGood = copy.copy(plot.nRaysGood)
self.nRaysOut = copy.copy(plot.nRaysOut)
self.nRaysOver = copy.copy(plot.nRaysOver)
self.nRaysDead = copy.copy(plot.nRaysDead)
if (plot.nRaysSeeded > 0):
self.nRaysAccepted = copy.copy(plot.nRaysAccepted)
self.nRaysAcceptedE = copy.copy(plot.nRaysAcceptedE)
self.nRaysSeeded = copy.copy(plot.nRaysSeeded)
self.nRaysSeededI = copy.copy(plot.nRaysSeededI)
self.flux = copy.copy(plot.flux)
self.power = copy.copy(plot.power)
self.xlimits = copy.copy(plot.xaxis.limits)
self.ylimits = copy.copy(plot.yaxis.limits)
self.elimits = copy.copy(plot.caxis.limits)
self.xbinEdges = copy.copy(plot.xaxis.binEdges)
self.ybinEdges = copy.copy(plot.yaxis.binEdges)
self.ebinEdges = copy.copy(plot.caxis.binEdges)
self.fluxKind = copy.copy(plot.fluxKind)
def restore(self, plot):
"""
Restores the arrays and values after unpickling or after running the
ray-tracing series and finding the global histogram maxima.
"""
# squeeze is needed even for floats,
# otherwise for matlab it is returned as [[value]]
plot.xaxis.total1D += np.squeeze(self.xtotal1D)
plot.xaxis.total1D_RGB += np.squeeze(self.xtotal1D_RGB)
plot.yaxis.total1D += np.squeeze(self.ytotal1D)
plot.yaxis.total1D_RGB += np.squeeze(self.ytotal1D_RGB)
plot.caxis.total1D += np.squeeze(self.etotal1D)
plot.caxis.total1D_RGB += np.squeeze(self.etotal1D_RGB)
plot.total2D += np.squeeze(self.total2D)
plot.total2D_RGB += np.squeeze(self.total2D_RGB)
plot.nRaysAll += np.squeeze(self.nRaysAll)
plot.nRaysAllRestored += np.squeeze(self.nRaysAll)
plot.intensity += np.squeeze(self.intensity)
if plot.backend == 'shadow':
plot.nRaysNeeded += np.squeeze(self.nRaysNeeded)
elif plot.backend == 'raycing':
plot.nRaysAlive += np.squeeze(self.nRaysAlive)
plot.nRaysGood += np.squeeze(self.nRaysGood)
plot.nRaysOut += np.squeeze(self.nRaysOut)
plot.nRaysOver += np.squeeze(self.nRaysOver)
plot.nRaysDead += np.squeeze(self.nRaysDead)
if hasattr(self, 'nRaysSeeded'):
if self.nRaysSeeded > 0:
plot.nRaysAccepted += np.squeeze(self.nRaysAccepted)
plot.nRaysAcceptedE += np.squeeze(self.nRaysAcceptedE)
plot.nRaysSeeded += np.squeeze(self.nRaysSeeded)
plot.nRaysSeededI += np.squeeze(self.nRaysSeededI)
plot.xaxis.limits = np.copy(np.squeeze(self.xlimits))
plot.yaxis.limits = np.copy(np.squeeze(self.ylimits))
plot.caxis.limits = np.copy(np.squeeze(self.elimits))
plot.xaxis.binEdges = np.copy(np.squeeze(self.xbinEdges))
plot.yaxis.binEdges = np.copy(np.squeeze(self.ybinEdges))
plot.caxis.binEdges = np.copy(np.squeeze(self.ebinEdges))
plot.fluxKind = np.array_str(np.copy(np.squeeze(self.fluxKind)))
# def __getstate__(self):
# odict = self.__dict__.copy() # copy the dict since we change it
# del odict['plot'] # remove plot reference, it cannot be pickled
# return odict
| mit |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.